language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def create_traces(self, df_raw):
"""Return traces for plotly chart.
Args:
df_raw: pandas dataframe with columns: `(category, label, start, end, progress)`
Returns:
list: Dash chart traces
"""
# If start is None, assign end to start so that the sort is correct
start_index = df_raw.columns.get_loc('start')
end_index = df_raw.columns.get_loc('end')
for index in [idx for idx, is_na in enumerate(df_raw['start'].isna()) if is_na]:
df_raw.iloc[index, start_index] = df_raw.iloc[index, end_index]
df_raw['progress'] = df_raw['progress'].fillna(0) # Fill possibly missing progress values for milestones
df_raw = (
df_raw
.sort_values(by=['category', 'start'], ascending=False)
.sort_values(by=['end'], ascending=False)
.reset_index(drop=True)
)
# Create color lookup using categories in sorted order
categories = set(df_raw['category'])
self.color_lookup = {cat: self.pallette[idx] for idx, cat in enumerate(categories)}
# Track which categories have been plotted
plotted_categories = []
# Create the Gantt traces
traces = []
for task in df_raw.itertuples():
y_pos = task.Index
is_first = task.category not in plotted_categories
plotted_categories.append(task.category)
traces.append(self._create_task_shape(task, y_pos, is_first))
if task.progress > 0:
traces.append(self._create_progress_shape(task, y_pos))
traces.append(self._create_annotation(task, y_pos))
return traces | def create_traces(self, df_raw):
"""Return traces for plotly chart.
Args:
df_raw: pandas dataframe with columns: `(category, label, start, end, progress)`
Returns:
list: Dash chart traces
"""
# If start is None, assign end to start so that the sort is correct
start_index = df_raw.columns.get_loc('start')
end_index = df_raw.columns.get_loc('end')
for index in [idx for idx, is_na in enumerate(df_raw['start'].isna()) if is_na]:
df_raw.iloc[index, start_index] = df_raw.iloc[index, end_index]
df_raw['progress'] = df_raw['progress'].fillna(0) # Fill possibly missing progress values for milestones
df_raw = (
df_raw
.sort_values(by=['category', 'start'], ascending=False)
.sort_values(by=['end'], ascending=False)
.reset_index(drop=True)
)
# Create color lookup using categories in sorted order
categories = set(df_raw['category'])
self.color_lookup = {cat: self.pallette[idx] for idx, cat in enumerate(categories)}
# Track which categories have been plotted
plotted_categories = []
# Create the Gantt traces
traces = []
for task in df_raw.itertuples():
y_pos = task.Index
is_first = task.category not in plotted_categories
plotted_categories.append(task.category)
traces.append(self._create_task_shape(task, y_pos, is_first))
if task.progress > 0:
traces.append(self._create_progress_shape(task, y_pos))
traces.append(self._create_annotation(task, y_pos))
return traces |
Python | def _create_hover_text(self, task):
"""Return hover text for given trace.
Args:
task: row tuple from df_raw with: `(category, label, start, end, progress)`
Returns:
string: HTML-formatted hover text
"""
dates = [format_unix(get_unix(str_ts, self.date_format), '%a, %d%b%Y') for str_ts in [task.start, task.end]]
if task.start != task.end:
date_range = f'<br><b>Start</b>: {dates[0]}<br><b>End</b>: {dates[1]}'
else:
date_range = f'<br><b>Milestone</b>: {dates[1]}'
return f'<b>{task.category}</b><br>{task.label} ({int(task.progress * 100)}%)<br>{date_range}' | def _create_hover_text(self, task):
"""Return hover text for given trace.
Args:
task: row tuple from df_raw with: `(category, label, start, end, progress)`
Returns:
string: HTML-formatted hover text
"""
dates = [format_unix(get_unix(str_ts, self.date_format), '%a, %d%b%Y') for str_ts in [task.start, task.end]]
if task.start != task.end:
date_range = f'<br><b>Start</b>: {dates[0]}<br><b>End</b>: {dates[1]}'
else:
date_range = f'<br><b>Milestone</b>: {dates[1]}'
return f'<b>{task.category}</b><br>{task.label} ({int(task.progress * 100)}%)<br>{date_range}' |
Python | def _create_task_shape(self, task, y_pos, is_first):
"""Create colored task scatter rectangle.
Args:
task: row tuple from df_raw with: `(category, label, start, end, progress)`
y_pos: top y-coordinate of task
is_first: if True, this is the first time a task of this category will be plotted
Returns:
trace: single Dash chart Scatter trace
"""
color = self.color_lookup[task.category]
scatter_kwargs = {
'fill': 'toself',
'fillcolor': color,
'hoverlabel': self.hover_label_settings,
'legendgroup': color,
'line': {'width': 1},
'marker': {'color': color},
'mode': 'lines',
'showlegend': is_first,
'text': self._create_hover_text(task),
'x': [task.start, task.end, task.end, task.start, task.start],
'y': [y_pos, y_pos, y_pos - self.rh, y_pos - self.rh, y_pos],
}
if is_first:
scatter_kwargs['name'] = task.category
return go.Scatter(**scatter_kwargs) | def _create_task_shape(self, task, y_pos, is_first):
"""Create colored task scatter rectangle.
Args:
task: row tuple from df_raw with: `(category, label, start, end, progress)`
y_pos: top y-coordinate of task
is_first: if True, this is the first time a task of this category will be plotted
Returns:
trace: single Dash chart Scatter trace
"""
color = self.color_lookup[task.category]
scatter_kwargs = {
'fill': 'toself',
'fillcolor': color,
'hoverlabel': self.hover_label_settings,
'legendgroup': color,
'line': {'width': 1},
'marker': {'color': color},
'mode': 'lines',
'showlegend': is_first,
'text': self._create_hover_text(task),
'x': [task.start, task.end, task.end, task.start, task.start],
'y': [y_pos, y_pos, y_pos - self.rh, y_pos - self.rh, y_pos],
}
if is_first:
scatter_kwargs['name'] = task.category
return go.Scatter(**scatter_kwargs) |
Python | def _create_annotation(self, task, y_pos):
"""Add task label to chart as text overlay.
Args:
task: row tuple from df_raw with: `(category, label, start, end, progress)`
y_pos: top y-coordinate of task
Returns:
trace: single Dash chart Scatter trace
"""
# For milestones with narrow fill, hover can be tricky, so intended to make the whole length of the text
# hoverable, but only the x/y point appears to be hoverable although it makes a larger hover zone at least
return go.Scatter(
hoverlabel=self.hover_label_settings,
hovertemplate=self._create_hover_text(task) + '<extra></extra>',
hovertext=self._create_hover_text(task),
legendgroup=self.color_lookup[task.category],
mode='text',
showlegend=False,
text=task.label,
textposition='middle left',
x=[task.end],
y=[y_pos - self.rh / 2],
) | def _create_annotation(self, task, y_pos):
"""Add task label to chart as text overlay.
Args:
task: row tuple from df_raw with: `(category, label, start, end, progress)`
y_pos: top y-coordinate of task
Returns:
trace: single Dash chart Scatter trace
"""
# For milestones with narrow fill, hover can be tricky, so intended to make the whole length of the text
# hoverable, but only the x/y point appears to be hoverable although it makes a larger hover zone at least
return go.Scatter(
hoverlabel=self.hover_label_settings,
hovertemplate=self._create_hover_text(task) + '<extra></extra>',
hovertext=self._create_hover_text(task),
legendgroup=self.color_lookup[task.category],
mode='text',
showlegend=False,
text=task.label,
textposition='middle left',
x=[task.end],
y=[y_pos - self.rh / 2],
) |
Python | def create_traces(self, df_raw): # noqa: CCR001
"""Return traces for plotly chart.
Args:
df_raw: pandas dataframe with columns: `(category, label, start, end)`
Returns:
list: Dash chart traces
"""
# Get all unique category names and create lookup for y positions
self.categories = sorted(cat for cat in set(df_raw['category'].tolist()) if cat)
y_pos_lookup = {cat: self.y_space * idx for idx, cat in enumerate(self.categories)}
# Create the Time Vis traces
traces = []
self._shapes = []
self._annotations = []
for vis in df_raw.itertuples():
if vis.category in y_pos_lookup:
y_pos = y_pos_lookup[vis.category]
if vis.end:
traces.append(self._create_time_vis_shape(vis, y_pos))
if vis.label:
traces.append(self._create_annotation(vis, y_pos))
else:
traces.append(self._create_event(vis, y_pos))
else:
y_pos = 0
traces.append(self._create_non_cat_shape(vis, y_pos))
return traces | def create_traces(self, df_raw): # noqa: CCR001
"""Return traces for plotly chart.
Args:
df_raw: pandas dataframe with columns: `(category, label, start, end)`
Returns:
list: Dash chart traces
"""
# Get all unique category names and create lookup for y positions
self.categories = sorted(cat for cat in set(df_raw['category'].tolist()) if cat)
y_pos_lookup = {cat: self.y_space * idx for idx, cat in enumerate(self.categories)}
# Create the Time Vis traces
traces = []
self._shapes = []
self._annotations = []
for vis in df_raw.itertuples():
if vis.category in y_pos_lookup:
y_pos = y_pos_lookup[vis.category]
if vis.end:
traces.append(self._create_time_vis_shape(vis, y_pos))
if vis.label:
traces.append(self._create_annotation(vis, y_pos))
else:
traces.append(self._create_event(vis, y_pos))
else:
y_pos = 0
traces.append(self._create_non_cat_shape(vis, y_pos))
return traces |
Python | def _create_hover_text(self, vis):
"""Return hover text for given trace.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
Returns:
string: HTML-formatted hover text
"""
new_format = f'%a, {GDP_TIME_FORMAT}'
start_date = format_unix(get_unix(vis.start, self.date_format), new_format)
if vis.end:
end_date = format_unix(get_unix(vis.end, self.date_format), new_format)
date_range = f'<b>Start</b>: {start_date}<br><b>End</b>: {end_date}'
else:
date_range = f'<b>Event</b>: {start_date}'
return f'<b>{vis.category}</b><br>{vis.label}<br>{date_range}' | def _create_hover_text(self, vis):
"""Return hover text for given trace.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
Returns:
string: HTML-formatted hover text
"""
new_format = f'%a, {GDP_TIME_FORMAT}'
start_date = format_unix(get_unix(vis.start, self.date_format), new_format)
if vis.end:
end_date = format_unix(get_unix(vis.end, self.date_format), new_format)
date_range = f'<b>Start</b>: {start_date}<br><b>End</b>: {end_date}'
else:
date_range = f'<b>Event</b>: {start_date}'
return f'<b>{vis.category}</b><br>{vis.label}<br>{date_range}' |
Python | def _create_non_cat_shape(self, vis, y_pos):
"""Create non-category time visualization (vertical across all categories).
Note: background shape is set below a transparent trace so that hover works
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
bot_y = self.y_space * len(self.categories)
self._shapes.append(
go.layout.Shape(
fillcolor=self.fillcolor,
layer='below',
line={'width': 0},
opacity=0.4,
type='rect',
x0=vis.start,
x1=vis.end,
xref='x',
y0=bot_y,
y1=y_pos,
yref='y',
),
)
return go.Scatter(
fill='toself',
opacity=0,
hoverlabel=self.hover_label_settings,
line={'width': 0},
mode='lines',
text=self._create_hover_text(vis),
x=[vis.start, vis.end, vis.end, vis.start, vis.start],
y=[y_pos, y_pos, bot_y, bot_y, y_pos],
) | def _create_non_cat_shape(self, vis, y_pos):
"""Create non-category time visualization (vertical across all categories).
Note: background shape is set below a transparent trace so that hover works
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
bot_y = self.y_space * len(self.categories)
self._shapes.append(
go.layout.Shape(
fillcolor=self.fillcolor,
layer='below',
line={'width': 0},
opacity=0.4,
type='rect',
x0=vis.start,
x1=vis.end,
xref='x',
y0=bot_y,
y1=y_pos,
yref='y',
),
)
return go.Scatter(
fill='toself',
opacity=0,
hoverlabel=self.hover_label_settings,
line={'width': 0},
mode='lines',
text=self._create_hover_text(vis),
x=[vis.start, vis.end, vis.end, vis.start, vis.start],
y=[y_pos, y_pos, bot_y, bot_y, y_pos],
) |
Python | def _create_time_vis_shape(self, vis, y_pos):
"""Create filled rectangle for time visualization.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
return go.Scatter(
fill='toself',
fillcolor=self.fillcolor,
hoverlabel=self.hover_label_settings,
line={'width': 0},
mode='lines',
text=self._create_hover_text(vis),
x=[vis.start, vis.end, vis.end, vis.start, vis.start],
y=[y_pos, y_pos, y_pos - self.rh, y_pos - self.rh, y_pos],
) | def _create_time_vis_shape(self, vis, y_pos):
"""Create filled rectangle for time visualization.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
return go.Scatter(
fill='toself',
fillcolor=self.fillcolor,
hoverlabel=self.hover_label_settings,
line={'width': 0},
mode='lines',
text=self._create_hover_text(vis),
x=[vis.start, vis.end, vis.end, vis.start, vis.start],
y=[y_pos, y_pos, y_pos - self.rh, y_pos - self.rh, y_pos],
) |
Python | def _create_annotation(self, vis, y_pos):
"""Add vis label to chart as text overlay.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
return go.Scatter(
hoverlabel=self.hover_label_settings,
hovertemplate=self._create_hover_text(vis) + '<extra></extra>',
hovertext=self._create_hover_text(vis),
mode='text',
text=vis.label,
textposition='middle right',
x=[vis.start],
y=[y_pos - self.rh / 2],
) | def _create_annotation(self, vis, y_pos):
"""Add vis label to chart as text overlay.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
return go.Scatter(
hoverlabel=self.hover_label_settings,
hovertemplate=self._create_hover_text(vis) + '<extra></extra>',
hovertext=self._create_hover_text(vis),
mode='text',
text=vis.label,
textposition='middle right',
x=[vis.start],
y=[y_pos - self.rh / 2],
) |
Python | def _create_event(self, vis, y_pos):
"""Create singular event with vertical line, marker, and text.
If label is longer than 10 characters, then the annotation is shown offset with an arrow.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
if len(vis.label) > 10:
self._annotations.append({
'align': 'right',
'arrowcolor': self.fillcolor,
'showarrow': True,
'arrowhead': 2,
'text': vis.label,
'x': vis.start,
'xanchor': 'right',
'y': y_pos - self.rh / 2,
'yanchor': 'middle',
})
self._shapes.append(
go.layout.Shape(
layer='below',
line={
'color': self.fillcolor,
'dash': 'longdashdot',
'width': 2,
},
type='line',
x0=vis.start,
x1=vis.start,
xref='x',
y0=self.y_space * len(self.categories),
y1=y_pos - self.rh / 2,
yref='y',
),
)
return go.Scatter(
hoverlabel=self.hover_label_settings,
hovertemplate=self._create_hover_text(vis) + '<extra></extra>',
hovertext=self._create_hover_text(vis),
marker={'color': self.fillcolor},
mode='markers+text',
text='' if len(vis.label) > 10 else vis.label,
textposition='top center',
x=[vis.start],
y=[y_pos - self.rh / 2],
) | def _create_event(self, vis, y_pos):
"""Create singular event with vertical line, marker, and text.
If label is longer than 10 characters, then the annotation is shown offset with an arrow.
Args:
vis: row tuple from df_raw with: `(category, label, start, end)`
y_pos: top y-coordinate of vis
Returns:
trace: single Dash chart Scatter trace
"""
if len(vis.label) > 10:
self._annotations.append({
'align': 'right',
'arrowcolor': self.fillcolor,
'showarrow': True,
'arrowhead': 2,
'text': vis.label,
'x': vis.start,
'xanchor': 'right',
'y': y_pos - self.rh / 2,
'yanchor': 'middle',
})
self._shapes.append(
go.layout.Shape(
layer='below',
line={
'color': self.fillcolor,
'dash': 'longdashdot',
'width': 2,
},
type='line',
x0=vis.start,
x1=vis.start,
xref='x',
y0=self.y_space * len(self.categories),
y1=y_pos - self.rh / 2,
yref='y',
),
)
return go.Scatter(
hoverlabel=self.hover_label_settings,
hovertemplate=self._create_hover_text(vis) + '<extra></extra>',
hovertext=self._create_hover_text(vis),
marker={'color': self.fillcolor},
mode='markers+text',
text='' if len(vis.label) > 10 else vis.label,
textposition='top center',
x=[vis.start],
y=[y_pos - self.rh / 2],
) |
Python | def calculate_grid(grid_dims, corners, width, height):
"""Calculate the grid x and y coordinates.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
corners: dictionary with keys `(x, y)` containing lists of the four exterior corner coordinates
width: float width in pixels
height: float height in pixels
Returns:
dict: with keys `(x, y)` with lists of lists containing float values
"""
grid = {'x': [], 'y': []}
for r_idx in range(grid_dims[0]):
y_offset = height * (grid_dims[0] - r_idx)
y_grid = [y_offset - _y for _y in corners['y']]
for c_idx in range(grid_dims[1]):
x_offset = width * c_idx
grid['x'].extend([x_offset + _x for _x in corners['x']])
grid['y'].extend(y_grid)
return grid | def calculate_grid(grid_dims, corners, width, height):
"""Calculate the grid x and y coordinates.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
corners: dictionary with keys `(x, y)` containing lists of the four exterior corner coordinates
width: float width in pixels
height: float height in pixels
Returns:
dict: with keys `(x, y)` with lists of lists containing float values
"""
grid = {'x': [], 'y': []}
for r_idx in range(grid_dims[0]):
y_offset = height * (grid_dims[0] - r_idx)
y_grid = [y_offset - _y for _y in corners['y']]
for c_idx in range(grid_dims[1]):
x_offset = width * c_idx
grid['x'].extend([x_offset + _x for _x in corners['x']])
grid['y'].extend(y_grid)
return grid |
Python | def calculate_border(grid_dims, width, height):
"""Calculate each line in all borders.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
width: float width in pixels
height: float height in pixels
Returns:
list: containing dictionaries keys `(x, y)` and values for the two points for each line in grid
"""
return [
{
'x': [c_idx * width] * 2,
'y': [0, height * grid_dims[0]],
} for c_idx in range(grid_dims[1] + 1)
] + [
{
'x': [0, width * grid_dims[1]],
'y': [r_idx * height] * 2,
} for r_idx in range(grid_dims[0] + 1)
] | def calculate_border(grid_dims, width, height):
"""Calculate each line in all borders.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
width: float width in pixels
height: float height in pixels
Returns:
list: containing dictionaries keys `(x, y)` and values for the two points for each line in grid
"""
return [
{
'x': [c_idx * width] * 2,
'y': [0, height * grid_dims[0]],
} for c_idx in range(grid_dims[1] + 1)
] + [
{
'x': [0, width * grid_dims[1]],
'y': [r_idx * height] * 2,
} for r_idx in range(grid_dims[0] + 1)
] |
Python | def calculate_layout(self, grid_dims, corners, titles):
"""Calculate coordinate chart layout. Called by __init__, but can be called later to update the chart.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
corners: dictionary with keys `(x, y)` containing lists of the four corner coordinates
titles: list of strings that will appear in each tile. Default is None for no titles
"""
# Calculate exterior height and width of grid
width = float(np.max(corners['x']) + np.min(corners['x']))
height = float(np.max(corners['y']) + np.min(corners['y']))
# Set grid and border coordinates for traces
self._grid = calculate_grid(grid_dims, corners, width, height)
self._borders = calculate_border(grid_dims, width, height)
# Add titles to annotations if provided
if titles is None:
self.annotations = []
else:
v_offset = np.min(corners['y']) * 0.4
self.annotations = [
go.layout.Annotation(
ax=0, ay=0,
x=(idx % grid_dims[1] + 0.5) * width, # noqa: S001
y=(grid_dims[0] - int(idx / grid_dims[1]) % grid_dims[0]) * height - v_offset,
text=title,
)
for idx, title in enumerate(titles) if title is not None
] | def calculate_layout(self, grid_dims, corners, titles):
"""Calculate coordinate chart layout. Called by __init__, but can be called later to update the chart.
Args:
grid_dims: tuple of the number of tiles in grid. In format `(row, column)`
corners: dictionary with keys `(x, y)` containing lists of the four corner coordinates
titles: list of strings that will appear in each tile. Default is None for no titles
"""
# Calculate exterior height and width of grid
width = float(np.max(corners['x']) + np.min(corners['x']))
height = float(np.max(corners['y']) + np.min(corners['y']))
# Set grid and border coordinates for traces
self._grid = calculate_grid(grid_dims, corners, width, height)
self._borders = calculate_border(grid_dims, width, height)
# Add titles to annotations if provided
if titles is None:
self.annotations = []
else:
v_offset = np.min(corners['y']) * 0.4
self.annotations = [
go.layout.Annotation(
ax=0, ay=0,
x=(idx % grid_dims[1] + 0.5) * width, # noqa: S001
y=(grid_dims[0] - int(idx / grid_dims[1]) % grid_dims[0]) * height - v_offset,
text=title,
)
for idx, title in enumerate(titles) if title is not None
] |
Python | def create_traces(self, df_raw):
"""Return traces for plotly chart.
Args:
df_raw: pandas dataframe with at minimum the column `values: str`
Returns:
list: Dash chart traces
"""
# Check that the raw data frame is properly formatted
check_raw_data(df_raw, min_keys=['values'])
# Merge x/y grid data with values. Temporarily extend values with None, then drop those rows
values = df_raw['values'].to_list()
values.extend([None] * (len(self._grid['x']) - len(values)))
df_grid = pd.DataFrame(
data={
'values': values,
'x': self._grid['x'],
'y': self._grid['y'],
},
).dropna()
return [
go.Scatter(
hoverinfo='none',
line=self.border_line or {'color': 'black'},
mode='lines',
opacity=self.border_opacity,
showlegend=False,
x=border['x'],
y=border['y'],
) for border in self._borders
] + [
go.Scatter(
hoverinfo='text',
mode='markers',
showlegend=False,
text=df_grid['values'],
x=df_grid['x'],
y=df_grid['y'],
marker=self.create_marker(df_grid, **(self.marker_kwargs or {})),
),
] | def create_traces(self, df_raw):
"""Return traces for plotly chart.
Args:
df_raw: pandas dataframe with at minimum the column `values: str`
Returns:
list: Dash chart traces
"""
# Check that the raw data frame is properly formatted
check_raw_data(df_raw, min_keys=['values'])
# Merge x/y grid data with values. Temporarily extend values with None, then drop those rows
values = df_raw['values'].to_list()
values.extend([None] * (len(self._grid['x']) - len(values)))
df_grid = pd.DataFrame(
data={
'values': values,
'x': self._grid['x'],
'y': self._grid['y'],
},
).dropna()
return [
go.Scatter(
hoverinfo='none',
line=self.border_line or {'color': 'black'},
mode='lines',
opacity=self.border_opacity,
showlegend=False,
x=border['x'],
y=border['y'],
) for border in self._borders
] + [
go.Scatter(
hoverinfo='text',
mode='markers',
showlegend=False,
text=df_grid['values'],
x=df_grid['x'],
y=df_grid['y'],
marker=self.create_marker(df_grid, **(self.marker_kwargs or {})),
),
] |
Python | def create_marker(self, df_grid, colorscale='Viridis', size=16, symbol='circle'):
"""Return a dictionary for the scatter plot.
See: https://plot.ly/python/colorscales/ (Named colorscales: Reds, Bluered, Jet, Viridis, Cividis, etc.)
Args:
df_grid: pandas dataframe with at minimum the column `values: str`, `x: float`, `y: float`
colorscale: plotly colorscale, see doc link above. Default is 'Viridis'
size: integer marker size
symbol: marker symbol (square, circle, circle-open, x, etc.)
Returns:
dict: the chart marker shape, symbol, color, etc.
"""
return {
'color': df_grid['values'],
'colorscale': colorscale,
'showscale': True,
'size': size,
'symbol': symbol,
} | def create_marker(self, df_grid, colorscale='Viridis', size=16, symbol='circle'):
"""Return a dictionary for the scatter plot.
See: https://plot.ly/python/colorscales/ (Named colorscales: Reds, Bluered, Jet, Viridis, Cividis, etc.)
Args:
df_grid: pandas dataframe with at minimum the column `values: str`, `x: float`, `y: float`
colorscale: plotly colorscale, see doc link above. Default is 'Viridis'
size: integer marker size
symbol: marker symbol (square, circle, circle-open, x, etc.)
Returns:
dict: the chart marker shape, symbol, color, etc.
"""
return {
'color': df_grid['values'],
'colorscale': colorscale,
'showscale': True,
'size': size,
'symbol': symbol,
} |
Python | def calculate_calendar_grid_corners(margin, days_in_week=7, max_weeks_in_month=6):
"""Calculate the four exterior corner coordinates of a calendar coordinate grid.
Args:
margin: float spacing between tiles
days_in_week: number of days in week. Default is 7
max_weeks_in_month: max number of weeks in a month. Default is 6
Returns:
list: dictionary with keys `(x, y)` containing lists of the four exterior corner coordinates
"""
y_indices = [[idx] * days_in_week for idx in range(max_weeks_in_month)]
return {
'x': np.add([*range(days_in_week)] * max_weeks_in_month, margin),
'y': np.add([*chain.from_iterable(y_indices)], margin),
} | def calculate_calendar_grid_corners(margin, days_in_week=7, max_weeks_in_month=6):
"""Calculate the four exterior corner coordinates of a calendar coordinate grid.
Args:
margin: float spacing between tiles
days_in_week: number of days in week. Default is 7
max_weeks_in_month: max number of weeks in a month. Default is 6
Returns:
list: dictionary with keys `(x, y)` containing lists of the four exterior corner coordinates
"""
y_indices = [[idx] * days_in_week for idx in range(max_weeks_in_month)]
return {
'x': np.add([*range(days_in_week)] * max_weeks_in_month, margin),
'y': np.add([*chain.from_iterable(y_indices)], margin),
} |
Python | def format_data(self, month_lists, year):
"""Return the formatted list that can be passed to a coordinate chart.
Args:
month_lists: list of daily values where each sublist is one month starting with January
year: year expressed in 4 decimal places (i.e. 2019)
Returns:
list: of values with additional None values to align with grid
"""
values = []
for idx_month, daily_list in enumerate(month_lists):
idx_first_day, count_days = calendar.monthrange(year, idx_month + 1)
idx_first_day += 1 # Increment to start on Sunday -- PLANNED: make this configureable
values.extend([None] * idx_first_day)
values.extend(daily_list)
values.extend([None] * (len(self.corners['x']) - idx_first_day - count_days))
return values | def format_data(self, month_lists, year):
"""Return the formatted list that can be passed to a coordinate chart.
Args:
month_lists: list of daily values where each sublist is one month starting with January
year: year expressed in 4 decimal places (i.e. 2019)
Returns:
list: of values with additional None values to align with grid
"""
values = []
for idx_month, daily_list in enumerate(month_lists):
idx_first_day, count_days = calendar.monthrange(year, idx_month + 1)
idx_first_day += 1 # Increment to start on Sunday -- PLANNED: make this configureable
values.extend([None] * idx_first_day)
values.extend(daily_list)
values.extend([None] * (len(self.corners['x']) - idx_first_day - count_days))
return values |
Python | def format_data(self, daily_values, year, month):
"""Return the formatted list that can be passed to a coordinate chart.
Args:
daily_values: list of values for each day of month
year: year expressed in 4 digits (2019, 2020, etc.)
month: month index in [1, 12]
Returns:
list: of values with additional None values to align with grid
"""
idx_first_day = calendar.monthrange(year, month)[0]
values = [None] * idx_first_day
values.extend(daily_values)
return values | def format_data(self, daily_values, year, month):
"""Return the formatted list that can be passed to a coordinate chart.
Args:
daily_values: list of values for each day of month
year: year expressed in 4 digits (2019, 2020, etc.)
month: month index in [1, 12]
Returns:
list: of values with additional None values to align with grid
"""
idx_first_day = calendar.monthrange(year, month)[0]
values = [None] * idx_first_day
values.extend(daily_values)
return values |
Python | def write_div(figure, path_or_file_object, is_div=True, **html_kwargs):
"""Write Plotly figure as HTML to specified file.
Args:
figure: Plotly figure (can be from `create_figure` for custom charts)
path_or_file_object: *string* path or file object
is_div: if True (default) will override html_kwargs to only write the minimum HTML needed
html_kwargs: additional keyword arguments passed to `plotly.io.write_html()`
"""
for key in ['include_plotlyjs', 'full_html']:
if key not in html_kwargs and is_div:
html_kwargs[key] = False
plotly.io.write_html(fig=figure, file=path_or_file_object, **html_kwargs) | def write_div(figure, path_or_file_object, is_div=True, **html_kwargs):
"""Write Plotly figure as HTML to specified file.
Args:
figure: Plotly figure (can be from `create_figure` for custom charts)
path_or_file_object: *string* path or file object
is_div: if True (default) will override html_kwargs to only write the minimum HTML needed
html_kwargs: additional keyword arguments passed to `plotly.io.write_html()`
"""
for key in ['include_plotlyjs', 'full_html']:
if key not in html_kwargs and is_div:
html_kwargs[key] = False
plotly.io.write_html(fig=figure, file=path_or_file_object, **html_kwargs) |
Python | def make_div(figure, **html_kwargs):
"""Write Plotly figure as HTML to specified file.
Args:
figure: Plotly figure (can be from `create_figure` for custom charts)
html_kwargs: additional keyword arguments passed to `plotly.io.write_html()`
Returns:
str: HTML div
"""
with io.StringIO() as output:
write_div(figure, output, is_div=True, **html_kwargs)
return output.getvalue() | def make_div(figure, **html_kwargs):
"""Write Plotly figure as HTML to specified file.
Args:
figure: Plotly figure (can be from `create_figure` for custom charts)
html_kwargs: additional keyword arguments passed to `plotly.io.write_html()`
Returns:
str: HTML div
"""
with io.StringIO() as output:
write_div(figure, output, is_div=True, **html_kwargs)
return output.getvalue() |
Python | def add_image(image_path, alt_text=None):
"""Write base64 image to HTML.
Args:
image_path: Path to image file and format will be read from file suffix.
alt_text: alternate text. If None, will show the image filename
Returns:
str: HTML
"""
with open(image_path, 'rb') as image_file:
encoded_image = b64encode(image_file.read()).decode()
image_uri = f'data:image/{image_path.suffix[1:]};base64,{encoded_image}'
return f'<img src="{image_uri}" alt="{alt_text or image_path.name}"/>' | def add_image(image_path, alt_text=None):
"""Write base64 image to HTML.
Args:
image_path: Path to image file and format will be read from file suffix.
alt_text: alternate text. If None, will show the image filename
Returns:
str: HTML
"""
with open(image_path, 'rb') as image_file:
encoded_image = b64encode(image_file.read()).decode()
image_uri = f'data:image/{image_path.suffix[1:]};base64,{encoded_image}'
return f'<img src="{image_uri}" alt="{alt_text or image_path.name}"/>' |
Python | def add_video(video_path, alt_text=None):
"""Write base64 video to HTML.
Video formats can easily be converted with ffmpeg: `ffmpeg -i video_filename.mov video_filename.webm`
Args:
video_path: Path to video file and format will be read from file suffix. Video should be in webm format
alt_text: alternate text. If None, will show the video filename
Returns:
str: HTML video tag
"""
with open(video_path, 'rb') as video_file:
encoded_video = b64encode(video_file.read()).decode()
video_uri = f'data:video/{video_path.suffix[1:]};base64,{encoded_video}'
return f'<video src="{video_uri}" controls>{alt_text or video_path.name}</video>' | def add_video(video_path, alt_text=None):
"""Write base64 video to HTML.
Video formats can easily be converted with ffmpeg: `ffmpeg -i video_filename.mov video_filename.webm`
Args:
video_path: Path to video file and format will be read from file suffix. Video should be in webm format
alt_text: alternate text. If None, will show the video filename
Returns:
str: HTML video tag
"""
with open(video_path, 'rb') as video_file:
encoded_video = b64encode(video_file.read()).decode()
video_uri = f'data:video/{video_path.suffix[1:]};base64,{encoded_video}'
return f'<video src="{video_uri}" controls>{alt_text or video_path.name}</video>' |
Python | def write_image_file(figure, path_or_file_object, image_format, **img_kwargs):
"""Write Plotly figure as an image to specified file.
Args:
figure: Plotly figure (can be from `create_figure` for custom charts)
path_or_file_object: *string* path or file object
image_format: one of `(png, jpg, jpeg, webp, svg, pdf)`
img_kwargs: additional keyword arguments passed to `plotly.io.write_image()`
"""
plotly.io.write_image(fig=figure, file=str(path_or_file_object), format=image_format, **img_kwargs) | def write_image_file(figure, path_or_file_object, image_format, **img_kwargs):
"""Write Plotly figure as an image to specified file.
Args:
figure: Plotly figure (can be from `create_figure` for custom charts)
path_or_file_object: *string* path or file object
image_format: one of `(png, jpg, jpeg, webp, svg, pdf)`
img_kwargs: additional keyword arguments passed to `plotly.io.write_image()`
"""
plotly.io.write_image(fig=figure, file=str(path_or_file_object), format=image_format, **img_kwargs) |
Python | def capture_plotly_body():
"""Return HTML body that includes necessary scripts for Plotly and MathJax.
Returns:
tuple: of the top and the bottom HTML content
"""
# Capture necessary Plotly boilerplate HTML
with io.StringIO() as output:
write_div({}, output, is_div=False, include_mathjax='.js', validate=False)
blank_plotly = BeautifulSoup(output.getvalue(), features='lxml')
# Remove the empty figure div and corresponding script
plot_div = blank_plotly.find('div', attrs={'class': 'plotly-graph-div'})
for script in blank_plotly.find_all('script')[::-1]:
# Use the ID from the plot to identify which script needs to be removed
if plot_div.attrs['id'] in script.prettify():
script.decompose()
break
plot_div.decompose()
return blank_plotly.body.prettify() | def capture_plotly_body():
"""Return HTML body that includes necessary scripts for Plotly and MathJax.
Returns:
tuple: of the top and the bottom HTML content
"""
# Capture necessary Plotly boilerplate HTML
with io.StringIO() as output:
write_div({}, output, is_div=False, include_mathjax='.js', validate=False)
blank_plotly = BeautifulSoup(output.getvalue(), features='lxml')
# Remove the empty figure div and corresponding script
plot_div = blank_plotly.find('div', attrs={'class': 'plotly-graph-div'})
for script in blank_plotly.find_all('script')[::-1]:
# Use the ID from the plot to identify which script needs to be removed
if plot_div.attrs['id'] in script.prettify():
script.decompose()
break
plot_div.decompose()
return blank_plotly.body.prettify() |
Python | def format_plotly_boilerplate(**doc_kwargs):
"""Initialize a boilerplate dominate document for creating static Plotly HTML files.
See dominate documentation: https://pypi.org/project/dominate/
Args:
doc_kwargs: keyword arguments for `dominate.document()`
Returns:
dict: dominate document instance
"""
doc = dominate.document(**doc_kwargs)
with doc:
util.raw(capture_plotly_body())
return doc | def format_plotly_boilerplate(**doc_kwargs):
"""Initialize a boilerplate dominate document for creating static Plotly HTML files.
See dominate documentation: https://pypi.org/project/dominate/
Args:
doc_kwargs: keyword arguments for `dominate.document()`
Returns:
dict: dominate document instance
"""
doc = dominate.document(**doc_kwargs)
with doc:
util.raw(capture_plotly_body())
return doc |
Python | def create_dbc_doc(theme=dbc.themes.BOOTSTRAP, custom_styles='', **doc_kwargs):
"""Create boilerplate dominate document with Bootstrap and Plotly for static HTML.
Based on: https://github.com/facultyai/dash-bootstrap-components/tree/master/docs/templates/partials
See dominate documentation: https://pypi.org/project/dominate/
Args:
theme: string URL to CSS for theming Bootstrap. Default is `dbc.themes.BOOTSTRAP`
custom_styles: optional custom CSS to add to file. Default is blank (`''`)
doc_kwargs: keyword arguments for `dominate.document()`
Returns:
dict: dominate document instance
"""
stylesheets = [
{'href': 'https://cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/styles/a11y-light.min.css'},
{'href': theme},
]
scripts = [
{'src': 'https://code.jquery.com/jquery-3.4.1.slim.min.js'},
{'src': 'https://cdn.jsdelivr.net/npm/[email protected]/dist/umd/popper.min.js'},
{'src': 'https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js'},
{'src': 'https://cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/highlight.min.js'},
]
doc = format_plotly_boilerplate(**doc_kwargs)
with doc.head:
tags.meta(charset='utf-8')
tags.meta(name='viewport', content='width=device-width, initial-scale=1')
for sheet_kwargs in stylesheets:
tags.link(rel='stylesheet', **sheet_kwargs)
util.raw(f'<style>{custom_styles}</style>')
for script_kwargs in scripts:
tags.script(**script_kwargs)
util.raw('<script>hljs.initHighlightingOnLoad();</script>')
return doc | def create_dbc_doc(theme=dbc.themes.BOOTSTRAP, custom_styles='', **doc_kwargs):
"""Create boilerplate dominate document with Bootstrap and Plotly for static HTML.
Based on: https://github.com/facultyai/dash-bootstrap-components/tree/master/docs/templates/partials
See dominate documentation: https://pypi.org/project/dominate/
Args:
theme: string URL to CSS for theming Bootstrap. Default is `dbc.themes.BOOTSTRAP`
custom_styles: optional custom CSS to add to file. Default is blank (`''`)
doc_kwargs: keyword arguments for `dominate.document()`
Returns:
dict: dominate document instance
"""
stylesheets = [
{'href': 'https://cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/styles/a11y-light.min.css'},
{'href': theme},
]
scripts = [
{'src': 'https://code.jquery.com/jquery-3.4.1.slim.min.js'},
{'src': 'https://cdn.jsdelivr.net/npm/[email protected]/dist/umd/popper.min.js'},
{'src': 'https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js'},
{'src': 'https://cdn.jsdelivr.net/gh/highlightjs/[email protected]/build/highlight.min.js'},
]
doc = format_plotly_boilerplate(**doc_kwargs)
with doc.head:
tags.meta(charset='utf-8')
tags.meta(name='viewport', content='width=device-width, initial-scale=1')
for sheet_kwargs in stylesheets:
tags.link(rel='stylesheet', **sheet_kwargs)
util.raw(f'<style>{custom_styles}</style>')
for script_kwargs in scripts:
tags.script(**script_kwargs)
util.raw('<script>hljs.initHighlightingOnLoad();</script>')
return doc |
Python | def tag_markdown(text, **markdown_kwargs):
"""Format markdown text as HTML.
Args:
text: markdown string
markdown_kwargs: additional keyword arguments for `markdown.markdown`, such as `extensions`
"""
util.raw(markdown.markdown(text, **markdown_kwargs)) | def tag_markdown(text, **markdown_kwargs):
"""Format markdown text as HTML.
Args:
text: markdown string
markdown_kwargs: additional keyword arguments for `markdown.markdown`, such as `extensions`
"""
util.raw(markdown.markdown(text, **markdown_kwargs)) |
Python | def tag_table(df_table, table_class=None):
"""Format HTML for a responsive Bootstrap table.
See Bootstrap documentation at: https://getbootstrap.com/docs/4.4/content/tables/#tables
Args:
df_table: pandas dataframe to show in table
table_class: string classes to add to table. If None, will use default string
Raises:
RuntimeError: if `df_table` is not a DataFrame object
"""
if table_class is None:
table_class = 'table table-bordered table-striped table-hover'
if not isinstance(df_table, pd.core.frame.DataFrame):
raise RuntimeError(f'df_table is not a DataFrame ({type(df_table)}):\n{df_table}')
df_table = df_table.reset_index()
with tags.div(_class='table-responsive').add(tags.table(_class=table_class)):
# Create header row
with tags.thead().add(tags.tr()):
for col in df_table.columns:
tags.th(col)
# Create body rows
with tags.tbody():
for row in df_table.itertuples(index=False):
with tags.tr():
for value in row:
tags.td(str(value)) | def tag_table(df_table, table_class=None):
"""Format HTML for a responsive Bootstrap table.
See Bootstrap documentation at: https://getbootstrap.com/docs/4.4/content/tables/#tables
Args:
df_table: pandas dataframe to show in table
table_class: string classes to add to table. If None, will use default string
Raises:
RuntimeError: if `df_table` is not a DataFrame object
"""
if table_class is None:
table_class = 'table table-bordered table-striped table-hover'
if not isinstance(df_table, pd.core.frame.DataFrame):
raise RuntimeError(f'df_table is not a DataFrame ({type(df_table)}):\n{df_table}')
df_table = df_table.reset_index()
with tags.div(_class='table-responsive').add(tags.table(_class=table_class)):
# Create header row
with tags.thead().add(tags.tr()):
for col in df_table.columns:
tags.th(col)
# Create body rows
with tags.tbody():
for row in df_table.itertuples(index=False):
with tags.tr():
for value in row:
tags.td(str(value)) |
Python | def write_lookup(key, function_lookup):
"""Determine the lookup result and add to the file.
Args:
key: string key for function lookup
function_lookup: dictionary with either the string result or equation and arguments
Raises:
RuntimeError: if error in lookup dictionary
"""
try:
match = function_lookup[key]
except KeyError:
raise RuntimeError(f'Could not find "{key}" in {function_lookup}')
if isinstance(match, str):
util.raw(match)
elif len(match) == 2:
fun, args = match
result = fun(*args)
if isinstance(result, str):
util.raw(result)
else:
raise RuntimeError(f'Match failed for "{key}". Returned: {match} from {function_lookup}') | def write_lookup(key, function_lookup):
"""Determine the lookup result and add to the file.
Args:
key: string key for function lookup
function_lookup: dictionary with either the string result or equation and arguments
Raises:
RuntimeError: if error in lookup dictionary
"""
try:
match = function_lookup[key]
except KeyError:
raise RuntimeError(f'Could not find "{key}" in {function_lookup}')
if isinstance(match, str):
util.raw(match)
elif len(match) == 2:
fun, args = match
result = fun(*args)
if isinstance(result, str):
util.raw(result)
else:
raise RuntimeError(f'Match failed for "{key}". Returned: {match} from {function_lookup}') |
Python | def markdown_machine(lines, function_lookup): # noqa: CCR001
"""Convert markdown text file into Plotly-HTML and write to doc context.
Note: you will need a document with necessary boilerplate and call this within a `with doc:` dominate context
Multiple Markdown files can then be put into a single HTML output file by calling this function with new lines
and function lookup arguments
Args:
lines: list of text file lines
function_lookup: dictionary with either the string result or equation and arguments
Will be inserted into file where `>>lookup:function_name` assuming key of `function_name`
"""
markdown_lines = []
for line in lines:
if line.startswith('>>lookup:'):
# Write stored markdown and clear list. Then write the matched lookup
tag_markdown('\n'.join(markdown_lines))
markdown_lines = []
write_lookup(line.split('>>lookup:')[1], function_lookup)
else:
markdown_lines.append(line)
if markdown_lines:
tag_markdown('\n'.join(markdown_lines)) | def markdown_machine(lines, function_lookup): # noqa: CCR001
"""Convert markdown text file into Plotly-HTML and write to doc context.
Note: you will need a document with necessary boilerplate and call this within a `with doc:` dominate context
Multiple Markdown files can then be put into a single HTML output file by calling this function with new lines
and function lookup arguments
Args:
lines: list of text file lines
function_lookup: dictionary with either the string result or equation and arguments
Will be inserted into file where `>>lookup:function_name` assuming key of `function_name`
"""
markdown_lines = []
for line in lines:
if line.startswith('>>lookup:'):
# Write stored markdown and clear list. Then write the matched lookup
tag_markdown('\n'.join(markdown_lines))
markdown_lines = []
write_lookup(line.split('>>lookup:')[1], function_lookup)
else:
markdown_lines.append(line)
if markdown_lines:
tag_markdown('\n'.join(markdown_lines)) |
Python | def write_from_markdown(filename, function_lookup, **dbc_kwargs):
"""Wrap markdown_machine to convert markdown to Bootstrap HTML.
Args:
filename: path to markdown file
function_lookup: dictionary with either the string result or equation and arguments
Will be inserted into file where `>>lookup:function_name` assuming key of `function_name`
dbc_kwargs: keyword arguments to pass to `create_dbc_doc`
Returns:
Path: created HTML filename
"""
lines = filename.read_text().split('\n')
html_filename = filename.parent / f'{filename.stem}.html'
doc = create_dbc_doc(**dbc_kwargs)
with doc:
with tags.div(_class='container').add(tags.div(_class='col')):
markdown_machine(lines, function_lookup)
html_filename.write_text(str(doc))
return html_filename | def write_from_markdown(filename, function_lookup, **dbc_kwargs):
"""Wrap markdown_machine to convert markdown to Bootstrap HTML.
Args:
filename: path to markdown file
function_lookup: dictionary with either the string result or equation and arguments
Will be inserted into file where `>>lookup:function_name` assuming key of `function_name`
dbc_kwargs: keyword arguments to pass to `create_dbc_doc`
Returns:
Path: created HTML filename
"""
lines = filename.read_text().split('\n')
html_filename = filename.parent / f'{filename.stem}.html'
doc = create_dbc_doc(**dbc_kwargs)
with doc:
with tags.div(_class='container').add(tags.div(_class='col')):
markdown_machine(lines, function_lookup)
html_filename.write_text(str(doc))
return html_filename |
Python | def add_nested_list_item(l_index, l_string, level=1):
"""Add nested list items recursively.
Args:
l_index: numeric index of the list depth (note: 1-indexed)
l_string: string to show in the list element
level: current list depth. Optional and default is 1
"""
with tags.ul():
if l_index != level:
add_nested_list_item(l_index, l_string, level + 1)
else:
tags.li(f'H{l_index}: {l_string}') | def add_nested_list_item(l_index, l_string, level=1):
"""Add nested list items recursively.
Args:
l_index: numeric index of the list depth (note: 1-indexed)
l_string: string to show in the list element
level: current list depth. Optional and default is 1
"""
with tags.ul():
if l_index != level:
add_nested_list_item(l_index, l_string, level + 1)
else:
tags.li(f'H{l_index}: {l_string}') |
Python | def create_toc(html_text, header_depth=3):
"""Return the HTML for a nested table of contents based on the HTML file path.
Args:
html_text: HTML text
header_depth: depth of headers to show. Default is 3 (H1-H3)
Returns:
string: table of contents
"""
soup = BeautifulSoup(html_text, features='lxml')
h_lookup = {f'h{idx}': idx for idx in range(1, header_depth + 1)}
toc = tags.div()
for header in soup.findAll([*h_lookup.keys()]):
with toc:
add_nested_list_item(h_lookup[header.name], header.string)
# FIXME: Figure out how to make the header links work (i.e. when clicked in TOC go to the respective header)
# > `tags.a(header.string, f'#{header.string}')`?
return str(toc) | def create_toc(html_text, header_depth=3):
"""Return the HTML for a nested table of contents based on the HTML file path.
Args:
html_text: HTML text
header_depth: depth of headers to show. Default is 3 (H1-H3)
Returns:
string: table of contents
"""
soup = BeautifulSoup(html_text, features='lxml')
h_lookup = {f'h{idx}': idx for idx in range(1, header_depth + 1)}
toc = tags.div()
for header in soup.findAll([*h_lookup.keys()]):
with toc:
add_nested_list_item(h_lookup[header.name], header.string)
# FIXME: Figure out how to make the header links work (i.e. when clicked in TOC go to the respective header)
# > `tags.a(header.string, f'#{header.string}')`?
return str(toc) |
Python | def write_toc(html_path, header_depth=3, toc_key=TOC_KEYWORD):
"""Write the nested table of contents to the specified file.
Args:
html_path: path to the HTML file
header_depth: depth of headers to show. Default is 3 (H1-H3)
toc_key: string to replace with the nested table of contents. Default is `TOC_KEYWORD`
Raises:
RuntimeError: if the key was not found in the file
"""
text = html_path.read_text()
if toc_key not in text:
raise RuntimeError(f'HTML file does not have the table of contents key ({toc_key}): {html_path}')
toc = create_toc(text, header_depth=header_depth)
html_path.write_text(text.replace('{{toc}}', toc)) | def write_toc(html_path, header_depth=3, toc_key=TOC_KEYWORD):
"""Write the nested table of contents to the specified file.
Args:
html_path: path to the HTML file
header_depth: depth of headers to show. Default is 3 (H1-H3)
toc_key: string to replace with the nested table of contents. Default is `TOC_KEYWORD`
Raises:
RuntimeError: if the key was not found in the file
"""
text = html_path.read_text()
if toc_key not in text:
raise RuntimeError(f'HTML file does not have the table of contents key ({toc_key}): {html_path}')
toc = create_toc(text, header_depth=header_depth)
html_path.write_text(text.replace('{{toc}}', toc)) |
Python | def initialize_cache(db_instance):
"""Ensure that the directory and database exist. Remove files from database if manually removed.
Args:
db_instance: Connected Database file with `DBConnect()`.
"""
table = db_instance.db.create_table(CACHE_TABLE_NAME)
removed_files = []
for row in table:
if not Path(row[FILENAME_KEY]).is_file():
removed_files.append(row[FILENAME_KEY])
for filename in removed_files:
table.delete(filename=filename) | def initialize_cache(db_instance):
"""Ensure that the directory and database exist. Remove files from database if manually removed.
Args:
db_instance: Connected Database file with `DBConnect()`.
"""
table = db_instance.db.create_table(CACHE_TABLE_NAME)
removed_files = []
for row in table:
if not Path(row[FILENAME_KEY]).is_file():
removed_files.append(row[FILENAME_KEY])
for filename in removed_files:
table.delete(filename=filename) |
Python | def match_identifier_in_cache(identifier, db_instance):
"""Return list of matches for the given identifier in the file database.
Args:
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
Returns:
list: list of match object with keys of the SQL table
"""
kwargs = {ID_KEY: identifier}
return [*get_files_table(db_instance).find(**kwargs)] | def match_identifier_in_cache(identifier, db_instance):
"""Return list of matches for the given identifier in the file database.
Args:
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
Returns:
list: list of match object with keys of the SQL table
"""
kwargs = {ID_KEY: identifier}
return [*get_files_table(db_instance).find(**kwargs)] |
Python | def store_cache_as_file(prefix, identifier, db_instance, cache_dir=CACHE_DIR, suffix='.json'):
"""Store the reference in the cache database and return the file so the user can handle saving the file.
Args:
prefix: string used to create more recognizable filenames
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
cache_dir: path to the directory to store the file. Default is `CACHE_DIR
suffix: string filename suffix. The default is `.json`
Returns:
Path: to the cached file. Caller needs to write to the file
Raises:
RuntimeError: if duplicate match found when storing
"""
# Check that the identifier isn't already in the database
matches = match_identifier_in_cache(identifier, db_instance)
if matches:
raise RuntimeError(f'Already have an entry for this identifier (`{identifier}`): {matches}')
# Update the database and store the file
filename = cache_dir / f'{prefix}_{uniq_table_id()}{suffix}'
new_row = {FILENAME_KEY: str(filename), ID_KEY: identifier, TS_KEY: time.time()}
get_files_table(db_instance).insert(new_row)
return filename | def store_cache_as_file(prefix, identifier, db_instance, cache_dir=CACHE_DIR, suffix='.json'):
"""Store the reference in the cache database and return the file so the user can handle saving the file.
Args:
prefix: string used to create more recognizable filenames
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
cache_dir: path to the directory to store the file. Default is `CACHE_DIR
suffix: string filename suffix. The default is `.json`
Returns:
Path: to the cached file. Caller needs to write to the file
Raises:
RuntimeError: if duplicate match found when storing
"""
# Check that the identifier isn't already in the database
matches = match_identifier_in_cache(identifier, db_instance)
if matches:
raise RuntimeError(f'Already have an entry for this identifier (`{identifier}`): {matches}')
# Update the database and store the file
filename = cache_dir / f'{prefix}_{uniq_table_id()}{suffix}'
new_row = {FILENAME_KEY: str(filename), ID_KEY: identifier, TS_KEY: time.time()}
get_files_table(db_instance).insert(new_row)
return filename |
Python | def store_cache_object(prefix, identifier, obj, db_instance, cache_dir=CACHE_DIR):
"""Store the object as a JSON file and track in a SQLite database to prevent duplicates.
Args:
prefix: string used to create more recognizable filenames
identifier: identifier to use as a reference if the corresponding data is already cached
obj: JSON object to write
db_instance: Connected Database file with `DBConnect()`.
cache_dir: path to the directory to store the file. Default is `CACHE_DIR
Raises:
Exception: if duplicate match found when storing
"""
filename = store_cache_as_file(prefix, identifier, db_instance, cache_dir)
try:
write_pretty_json(filename, obj)
except Exception:
# If writing the file fails, ensure that the record is removed from the database
get_files_table(db_instance).delete(filename=filename)
raise | def store_cache_object(prefix, identifier, obj, db_instance, cache_dir=CACHE_DIR):
"""Store the object as a JSON file and track in a SQLite database to prevent duplicates.
Args:
prefix: string used to create more recognizable filenames
identifier: identifier to use as a reference if the corresponding data is already cached
obj: JSON object to write
db_instance: Connected Database file with `DBConnect()`.
cache_dir: path to the directory to store the file. Default is `CACHE_DIR
Raises:
Exception: if duplicate match found when storing
"""
filename = store_cache_as_file(prefix, identifier, db_instance, cache_dir)
try:
write_pretty_json(filename, obj)
except Exception:
# If writing the file fails, ensure that the record is removed from the database
get_files_table(db_instance).delete(filename=filename)
raise |
Python | def retrieve_cache_fn(identifier, db_instance):
"""Retrieve stored object from cache database.
Args:
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
Returns:
Path: to the cached file. Caller needs to read the file
Raises:
RuntimeError: if not exactly one match found
"""
matches = match_identifier_in_cache(identifier, db_instance)
if len(matches) != 1:
raise RuntimeError(f'Did not find exactly one entry for this identifier (`{identifier}`): {matches}')
return Path(matches[0][FILENAME_KEY]) | def retrieve_cache_fn(identifier, db_instance):
"""Retrieve stored object from cache database.
Args:
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
Returns:
Path: to the cached file. Caller needs to read the file
Raises:
RuntimeError: if not exactly one match found
"""
matches = match_identifier_in_cache(identifier, db_instance)
if len(matches) != 1:
raise RuntimeError(f'Did not find exactly one entry for this identifier (`{identifier}`): {matches}')
return Path(matches[0][FILENAME_KEY]) |
Python | def retrieve_cache_object(identifier, db_instance):
"""Retrieve stored object from cache database.
Args:
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
Returns:
dict: object stored in the cache
"""
filename = retrieve_cache_fn(identifier, db_instance)
return json.loads(filename.read_text()) | def retrieve_cache_object(identifier, db_instance):
"""Retrieve stored object from cache database.
Args:
identifier: identifier to use as a reference if the corresponding data is already cached
db_instance: Connected Database file with `DBConnect()`.
Returns:
dict: object stored in the cache
"""
filename = retrieve_cache_fn(identifier, db_instance)
return json.loads(filename.read_text()) |
Python | def no_log_errors(dash_duo, suppressed_errors=None):
"""Return True if any unsuppressed errors found in console logs.
Args:
dash_duo: dash_duo instance
suppressed_errors: list of suppressed error strings. Default is None to check for any log errors
Returns:
boolean: True if no unsuppressed errors found in dash logs
"""
if suppressed_errors is None:
suppressed_errors = []
logs = dash_duo.get_logs()
# logger.debug(logs)
# HACK: get_logs always return None with webdrivers other than Chrome
# FIXME: Handle path to the executable. Example with Firefox when the Gecko Drive is installed and on path
# poetry run pytest tests -x -l --ff -vv --webdriver Firefox
# Will one of these work?
# - https://pypi.org/project/webdrivermanager/
# - https://pypi.org/project/chromedriver-binary/
# - https://pypi.org/project/undetected-chromedriver/
# - https://pypi.org/project/webdriver-manager/
#
# Actually set DASH_TEST_CHROMEPATH? Maybe still use one of the above packages to get the path?
# - https://github.com/plotly/dash/blob/5ef534943852f2d02a9da636cf18357c5df5b3e5/dash/testing/browser.py#L436
return logs is None or not [log for log in logs if log['level'] not in suppressed_errors] | def no_log_errors(dash_duo, suppressed_errors=None):
"""Return True if any unsuppressed errors found in console logs.
Args:
dash_duo: dash_duo instance
suppressed_errors: list of suppressed error strings. Default is None to check for any log errors
Returns:
boolean: True if no unsuppressed errors found in dash logs
"""
if suppressed_errors is None:
suppressed_errors = []
logs = dash_duo.get_logs()
# logger.debug(logs)
# HACK: get_logs always return None with webdrivers other than Chrome
# FIXME: Handle path to the executable. Example with Firefox when the Gecko Drive is installed and on path
# poetry run pytest tests -x -l --ff -vv --webdriver Firefox
# Will one of these work?
# - https://pypi.org/project/webdrivermanager/
# - https://pypi.org/project/chromedriver-binary/
# - https://pypi.org/project/undetected-chromedriver/
# - https://pypi.org/project/webdriver-manager/
#
# Actually set DASH_TEST_CHROMEPATH? Maybe still use one of the above packages to get the path?
# - https://github.com/plotly/dash/blob/5ef534943852f2d02a9da636cf18357c5df5b3e5/dash/testing/browser.py#L436
return logs is None or not [log for log in logs if log['level'] not in suppressed_errors] |
Python | def conv1d_transpose_wrap(value,
filter,
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
"""Wrap the built-in (contrib) conv1d_transpose function so that output
has a batch size determined at runtime, rather than being fixed by whatever
batch size was used during training"""
dyn_input_shape = tf.shape(value)
batch_size = dyn_input_shape[0]
output_shape = tf.stack([batch_size, output_shape[1], output_shape[2]])
return tf.contrib.nn.conv1d_transpose(
value,
filter,
output_shape,
stride,
padding=padding,
data_format=data_format,
name=name
) | def conv1d_transpose_wrap(value,
filter,
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
"""Wrap the built-in (contrib) conv1d_transpose function so that output
has a batch size determined at runtime, rather than being fixed by whatever
batch size was used during training"""
dyn_input_shape = tf.shape(value)
batch_size = dyn_input_shape[0]
output_shape = tf.stack([batch_size, output_shape[1], output_shape[2]])
return tf.contrib.nn.conv1d_transpose(
value,
filter,
output_shape,
stride,
padding=padding,
data_format=data_format,
name=name
) |
Python | def MakeBoundaryLoss(Geometry_tensor, boundary):
"""
Make the boundary loss using boundary given
:param Geometry_tensor: 8 element geometry h0 h1 h2 h3 r0 r1 r2 r3
:param boundary: 4 element numpy array representing [h_low, h_high, r_low, r_high]
return Boundary_loss: loss that depend on the boundary loss
"""
tolerance = 0
print("Geometry_tensor_shape",Geometry_tensor.shape)
#Make constants
print(boundary[0] * np.ones([1,4]))
h_low = tf.constant((boundary[0] - tolerance) * np.ones([1,4]), name= 'h_low',dtype=tf.float32)
h_high = tf.constant((boundary[1] + tolerance) * np.ones([1,4]), name= 'h_high',dtype=tf.float32)
r_low = tf.constant((boundary[2] - tolerance) * np.ones([1,4]), name= 'r_low',dtype=tf.float32)
r_high = tf.constant((boundary[3] + tolerance) * np.ones([1,4]), name= 'r_high',dtype=tf.float32)
#Get the 2 separate parts
h = Geometry_tensor[:,0:4]
r = Geometry_tensor[:,4:]
zero = tf.constant(0,dtype=tf.float32,name='zero')
print("shape of h:",h.shape)
print("shape of r:",r.shape)
print("shape of h_low:",h_low.shape)
Boundary_loss = tf.reduce_sum(tf.math.maximum(zero, tf.math.subtract(h, h_high)) + tf.math.maximum(zero, tf.math.subtract(h_low, h) ) +\
tf.math.maximum(zero, tf.math.subtract(r, r_high)) + tf.math.maximum(zero, tf.math.subtract(r_low, r) ))
return Boundary_loss | def MakeBoundaryLoss(Geometry_tensor, boundary):
"""
Make the boundary loss using boundary given
:param Geometry_tensor: 8 element geometry h0 h1 h2 h3 r0 r1 r2 r3
:param boundary: 4 element numpy array representing [h_low, h_high, r_low, r_high]
return Boundary_loss: loss that depend on the boundary loss
"""
tolerance = 0
print("Geometry_tensor_shape",Geometry_tensor.shape)
#Make constants
print(boundary[0] * np.ones([1,4]))
h_low = tf.constant((boundary[0] - tolerance) * np.ones([1,4]), name= 'h_low',dtype=tf.float32)
h_high = tf.constant((boundary[1] + tolerance) * np.ones([1,4]), name= 'h_high',dtype=tf.float32)
r_low = tf.constant((boundary[2] - tolerance) * np.ones([1,4]), name= 'r_low',dtype=tf.float32)
r_high = tf.constant((boundary[3] + tolerance) * np.ones([1,4]), name= 'r_high',dtype=tf.float32)
#Get the 2 separate parts
h = Geometry_tensor[:,0:4]
r = Geometry_tensor[:,4:]
zero = tf.constant(0,dtype=tf.float32,name='zero')
print("shape of h:",h.shape)
print("shape of r:",r.shape)
print("shape of h_low:",h_low.shape)
Boundary_loss = tf.reduce_sum(tf.math.maximum(zero, tf.math.subtract(h, h_high)) + tf.math.maximum(zero, tf.math.subtract(h_low, h) ) +\
tf.math.maximum(zero, tf.math.subtract(r, r_high)) + tf.math.maximum(zero, tf.math.subtract(r_low, r) ))
return Boundary_loss |
Python | def back_prop_model(features, batch_size, clip,
fc_filters, tconv_fNums, tconv_dims, tconv_filters,
n_filter, n_branch, reg_scale, boundary):
"""
Customized model for using back-propagation
Use a extra variable for the place of
"""
#Make the variable geometry
geometry_variable = tf.get_variable("Geometry_var", shape= features.shape, dtype = tf.float32,
initializer = tf.zeros_initializer(), trainable = True)
#Make a condition that if variable is True, train from feature
train_Forward = tf.get_variable("train_forward",[],dtype = tf.bool,
initializer = tf.zeros_initializer(),trainable =False)
forward_in = tf.cond(train_Forward, true_fn= lambda: features, false_fn= lambda: geometry_variable)
#Make the Boundary Loss
Boundary_loss = MakeBoundaryLoss(forward_in, boundary)
fc = forward_in
#print("Backward_Out:", backward_out)
#print("features:", features)
#print("FC layer:",fc)
for cnt, filters in enumerate(fc_filters):
fc = tf.layers.dense(inputs=fc, units=filters, activation=tf.nn.leaky_relu, name='fc{}'.format(cnt),
kernel_initializer=tf.random_normal_initializer(stddev=0.02),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=reg_scale))
preTconv = fc
tf.summary.histogram("preTconv", preTconv[0]) # select 0th element or else histogram reduces the batch
up = tf.expand_dims(preTconv, axis=2)
feature_dim = fc_filters[-1]
last_filter = 1
for cnt, (up_fNum, up_size, up_filter) in enumerate(zip(tconv_fNums, tconv_dims, tconv_filters)):
assert up_size % feature_dim == 0, "up_size={} while feature_dim={} (cnt={})! " \
"Thus mod is {}".format(up_size, feature_dim, cnt, up_size%feature_dim)
stride = up_size // feature_dim
feature_dim = up_size
f = tf.Variable(tf.random_normal([up_fNum, up_filter, last_filter]))
up = conv1d_transpose_wrap(up, f, [batch_size, up_size, up_filter], stride, name='up{}'.format(cnt))
last_filter = up_filter
preconv = up
up = tf.layers.conv1d(preconv, 1, 1, activation=None, name='conv_final')
up = up[:, clip:-clip]
up = tf.squeeze(up, axis=2)
# up = tf.layers.dense(inputs=up, units=tconv_dims[-1], activation=tf.nn.leaky_relu, name='fc_final',
# kernel_initializer=tf.random_normal_initializer(stddev=0.02))
merged_summary_op = tf.summary.merge_all()
return forward_in, up, merged_summary_op, geometry_variable, train_Forward, Boundary_loss | def back_prop_model(features, batch_size, clip,
fc_filters, tconv_fNums, tconv_dims, tconv_filters,
n_filter, n_branch, reg_scale, boundary):
"""
Customized model for using back-propagation
Use a extra variable for the place of
"""
#Make the variable geometry
geometry_variable = tf.get_variable("Geometry_var", shape= features.shape, dtype = tf.float32,
initializer = tf.zeros_initializer(), trainable = True)
#Make a condition that if variable is True, train from feature
train_Forward = tf.get_variable("train_forward",[],dtype = tf.bool,
initializer = tf.zeros_initializer(),trainable =False)
forward_in = tf.cond(train_Forward, true_fn= lambda: features, false_fn= lambda: geometry_variable)
#Make the Boundary Loss
Boundary_loss = MakeBoundaryLoss(forward_in, boundary)
fc = forward_in
#print("Backward_Out:", backward_out)
#print("features:", features)
#print("FC layer:",fc)
for cnt, filters in enumerate(fc_filters):
fc = tf.layers.dense(inputs=fc, units=filters, activation=tf.nn.leaky_relu, name='fc{}'.format(cnt),
kernel_initializer=tf.random_normal_initializer(stddev=0.02),
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=reg_scale))
preTconv = fc
tf.summary.histogram("preTconv", preTconv[0]) # select 0th element or else histogram reduces the batch
up = tf.expand_dims(preTconv, axis=2)
feature_dim = fc_filters[-1]
last_filter = 1
for cnt, (up_fNum, up_size, up_filter) in enumerate(zip(tconv_fNums, tconv_dims, tconv_filters)):
assert up_size % feature_dim == 0, "up_size={} while feature_dim={} (cnt={})! " \
"Thus mod is {}".format(up_size, feature_dim, cnt, up_size%feature_dim)
stride = up_size // feature_dim
feature_dim = up_size
f = tf.Variable(tf.random_normal([up_fNum, up_filter, last_filter]))
up = conv1d_transpose_wrap(up, f, [batch_size, up_size, up_filter], stride, name='up{}'.format(cnt))
last_filter = up_filter
preconv = up
up = tf.layers.conv1d(preconv, 1, 1, activation=None, name='conv_final')
up = up[:, clip:-clip]
up = tf.squeeze(up, axis=2)
# up = tf.layers.dense(inputs=up, units=tconv_dims[-1], activation=tf.nn.leaky_relu, name='fc_final',
# kernel_initializer=tf.random_normal_initializer(stddev=0.02))
merged_summary_op = tf.summary.merge_all()
return forward_in, up, merged_summary_op, geometry_variable, train_Forward, Boundary_loss |
Python | def initializeInBoundary(shape, boundary):
"""
Initialize a np array within the boundary
"""
RN = np.random.random(size = shape)
RN[:,0:4] = RN[:,0:4] * (boundary[1] - boundary[0]) +boundary[0]
RN[:,4:] = RN[:,4:] * (boundary[3] - boundary[2]) +boundary[2]
#print(RN)
return RN | def initializeInBoundary(shape, boundary):
"""
Initialize a np array within the boundary
"""
RN = np.random.random(size = shape)
RN[:,0:4] = RN[:,0:4] * (boundary[1] - boundary[0]) +boundary[0]
RN[:,4:] = RN[:,4:] * (boundary[3] - boundary[2]) +boundary[2]
#print(RN)
return RN |
Python | def read_flag():
"""
This function is to write the read the flags from a parameter file and put them in formats
:return: flags: a struct where all the input params are stored
"""
parser = argparse.ArgumentParser()
parser.add_argument('--linear', type=list, default=LINEAR, help='The fc layers units')
parser.add_argument('--conv-out-channel', type=list, default=CONV_OUT_CHANNEL, help='The output channel of your 1d conv')
parser.add_argument('--conv-kernel-size', type=list, default=CONV_KERNEL_SIZE, help='The kernel size of your 1d conv')
parser.add_argument('--conv-stride', type=list, default=CONV_STRIDE, help='The strides of your 1d conv')
parser.add_argument('--reg-scale', type=float, default=REG_SCALE, help='#scale for regularization of dense layers')
parser.add_argument('--x-range', type=list, default=X_RANGE, help='columns of input parameters')
parser.add_argument('--y-range', type=list, default=Y_RANGE, help='columns of output parameters')
parser.add_argument('--batch-size', default=BATCH_SIZE, type=int, help='batch size (100)')
parser.add_argument('--eval-step', default=EVAL_STEP, type=int, help='# steps between evaluations')
parser.add_argument('--train-step', default=TRAIN_STEP, type=int, help='# steps to train on the dataSet')
parser.add_argument('--lr', default=LEARN_RATE, type=float, help='learning rate')
parser.add_argument('--decay-step', default=DECAY_STEP, type=int,
help='decay learning rate at this number of steps')
parser.add_argument('--decay-rate', default=DECAY_RATE, type=float,
help='decay learn rate by multiplying this factor')
parser.add_argument('--model-name', default=MODEL_NAME, type=str, help='name of the model')
parser.add_argument('--data-dir', default=DATA_DIR, type=str, help='data directory')
parser.add_argument('--normalize-input', default=NORMALIZE_INPUT, type=bool,
help='whether we should normalize the input or not')
parser.add_argument('--stop_threshold', default=STOP_THRESHOLD, type=float,
help='The threshold below which training should stop')
parser.add_argument('--optim', default=OPTIM, type=str, help='the type of optimizer that you want to use')
parser.add_argument('--geoboundary', default=GEOBOUNDARY, type=tuple, help='the boundary of the geometric data')
flags = parser.parse_args() # This is for command line version of the code
# flags = parser.parse_args(args = [])#This is for jupyter notebook version of the code
# flagsVar = vars(flags)
return flags | def read_flag():
"""
This function is to write the read the flags from a parameter file and put them in formats
:return: flags: a struct where all the input params are stored
"""
parser = argparse.ArgumentParser()
parser.add_argument('--linear', type=list, default=LINEAR, help='The fc layers units')
parser.add_argument('--conv-out-channel', type=list, default=CONV_OUT_CHANNEL, help='The output channel of your 1d conv')
parser.add_argument('--conv-kernel-size', type=list, default=CONV_KERNEL_SIZE, help='The kernel size of your 1d conv')
parser.add_argument('--conv-stride', type=list, default=CONV_STRIDE, help='The strides of your 1d conv')
parser.add_argument('--reg-scale', type=float, default=REG_SCALE, help='#scale for regularization of dense layers')
parser.add_argument('--x-range', type=list, default=X_RANGE, help='columns of input parameters')
parser.add_argument('--y-range', type=list, default=Y_RANGE, help='columns of output parameters')
parser.add_argument('--batch-size', default=BATCH_SIZE, type=int, help='batch size (100)')
parser.add_argument('--eval-step', default=EVAL_STEP, type=int, help='# steps between evaluations')
parser.add_argument('--train-step', default=TRAIN_STEP, type=int, help='# steps to train on the dataSet')
parser.add_argument('--lr', default=LEARN_RATE, type=float, help='learning rate')
parser.add_argument('--decay-step', default=DECAY_STEP, type=int,
help='decay learning rate at this number of steps')
parser.add_argument('--decay-rate', default=DECAY_RATE, type=float,
help='decay learn rate by multiplying this factor')
parser.add_argument('--model-name', default=MODEL_NAME, type=str, help='name of the model')
parser.add_argument('--data-dir', default=DATA_DIR, type=str, help='data directory')
parser.add_argument('--normalize-input', default=NORMALIZE_INPUT, type=bool,
help='whether we should normalize the input or not')
parser.add_argument('--stop_threshold', default=STOP_THRESHOLD, type=float,
help='The threshold below which training should stop')
parser.add_argument('--optim', default=OPTIM, type=str, help='the type of optimizer that you want to use')
parser.add_argument('--geoboundary', default=GEOBOUNDARY, type=tuple, help='the boundary of the geometric data')
flags = parser.parse_args() # This is for command line version of the code
# flags = parser.parse_args(args = [])#This is for jupyter notebook version of the code
# flagsVar = vars(flags)
return flags |
Python | def write_flags_and_BVE(flags, best_validation_loss):
"""
The function that is usually executed at the end of the training where the flags and the best validation loss are recorded
They are put in the folder that called this function and save as "parameters.txt"
This parameter.txt is also attached to the generated email
:param flags: The flags struct containing all the parameters
:param best_validation_loss: The best_validation_loss recorded in a training
:return: None
"""
#To avoid terrible looking shape of y_range
yrange = flags.y_range
yrange_str = str(yrange[0]) + ' to ' + str(yrange[-1])
yrange_str = [yrange[0], yrange[-1]]
flags_dict = vars(flags)
flags_dict_copy = flags_dict.copy() # in order to not corrupt the original data strucutre
flags_dict_copy['y_range'] = yrange_str # Change the y range to be acceptable long string
flags_dict_copy['best_validation_loss'] = best_validation_loss #Append the bvl
#Convert the dictionary into pandas data frame which is easier to handle with and write read
print(flags_dict_copy)
with open('parameters.txt','w') as f:
print(flags_dict_copy, file=f) | def write_flags_and_BVE(flags, best_validation_loss):
"""
The function that is usually executed at the end of the training where the flags and the best validation loss are recorded
They are put in the folder that called this function and save as "parameters.txt"
This parameter.txt is also attached to the generated email
:param flags: The flags struct containing all the parameters
:param best_validation_loss: The best_validation_loss recorded in a training
:return: None
"""
#To avoid terrible looking shape of y_range
yrange = flags.y_range
yrange_str = str(yrange[0]) + ' to ' + str(yrange[-1])
yrange_str = [yrange[0], yrange[-1]]
flags_dict = vars(flags)
flags_dict_copy = flags_dict.copy() # in order to not corrupt the original data strucutre
flags_dict_copy['y_range'] = yrange_str # Change the y range to be acceptable long string
flags_dict_copy['best_validation_loss'] = best_validation_loss #Append the bvl
#Convert the dictionary into pandas data frame which is easier to handle with and write read
print(flags_dict_copy)
with open('parameters.txt','w') as f:
print(flags_dict_copy, file=f) |
Python | def generate_point_set(n_list, random_seed = 42):
"""
Generate len(n) random 2D points and return the list of points (2D arrays)
The number of points in the sets are in n_list
The range of the points coordinates are -1 to 1
"""
points_list = []
np.random.seed(random_seed)
for n in n_list:
points_list.append(np.random.rand(n,2))
return points_list | def generate_point_set(n_list, random_seed = 42):
"""
Generate len(n) random 2D points and return the list of points (2D arrays)
The number of points in the sets are in n_list
The range of the points coordinates are -1 to 1
"""
points_list = []
np.random.seed(random_seed)
for n in n_list:
points_list.append(np.random.rand(n,2))
return points_list |
Python | def plot_point_sets(points_list, Div_mat, diversity_names):#, diversity_measure_list, diversity_measure_name_list):
"""
Plot the points in the points list with the diversity measurement
:param points_list: the list of points
:param diversity_measure_name_list: The list of diversity measures
:param diversity_measure_list: The list of names of diversity measures to show at legend
"""
df = pd.DataFrame(Div_mat)#, columns = diversity_names)
df.columns = diversity_names
for cnt, points in enumerate(points_list):
#df = pd.DataFrame(np.transpose(Div_mat[cnt,:]))#, columns = diversity_names)
#df.info()
#df = df.T
#df.columns = diversity_names
f = plt.figure()
ax = plt.gca()
table(ax, np.round(df.iloc[cnt,:],2), loc = 'upper right', colWidths=[0.05])
plt.scatter(points[:,0],points[:,1])
plt.xlabel('x1')
plt.ylabel('x2')
plt.title(str(cnt))
plt.xlim(0,1)
plt.ylim(0,1)
f.savefig('{}.png'.format(cnt)) | def plot_point_sets(points_list, Div_mat, diversity_names):#, diversity_measure_list, diversity_measure_name_list):
"""
Plot the points in the points list with the diversity measurement
:param points_list: the list of points
:param diversity_measure_name_list: The list of diversity measures
:param diversity_measure_list: The list of names of diversity measures to show at legend
"""
df = pd.DataFrame(Div_mat)#, columns = diversity_names)
df.columns = diversity_names
for cnt, points in enumerate(points_list):
#df = pd.DataFrame(np.transpose(Div_mat[cnt,:]))#, columns = diversity_names)
#df.info()
#df = df.T
#df.columns = diversity_names
f = plt.figure()
ax = plt.gca()
table(ax, np.round(df.iloc[cnt,:],2), loc = 'upper right', colWidths=[0.05])
plt.scatter(points[:,0],points[:,1])
plt.xlabel('x1')
plt.ylabel('x2')
plt.title(str(cnt))
plt.xlim(0,1)
plt.ylim(0,1)
f.savefig('{}.png'.format(cnt)) |
Python | def heat_maps_for_metrics(points, diversity_measures, diversity_names, save_name = ''):
"""
The function which plots the color map for different diversity measurements
"""
X = np.arange(0,1,0.01)
Y = np.arange(0,1,0.01)
X_grid,Y_grid = np.meshgrid(X,Y)
#Z = np.arrays(X_grid)
grid_h, grid_w = np.shape(X_grid)
Z = np.zeros([grid_h, grid_w])
h,w = np.shape(points)
point_new = np.zeros([h+1,w])
assert w==2
point_new[0:h,:] = points
point_grid = [[[] for col in range(grid_w)] for row in range(grid_h)]
for i in range(grid_h):
for j in range(grid_w):
point_new[h,0] = X[i]
point_new[h,1] = Y[j]
point_grid[i][j] = np.array(point_new)
#print('point new at {},{} is {}'.format(i,j,point_new))
for cnt, (div_measure, div_name) in enumerate(zip(diversity_measures, diversity_names)):
f = plt.figure()
ax = plt.gca()
current_metric_value = div_measure(points)
print("current metric value",current_metric_value)
for i in range(grid_h):
for j in range(grid_w):
#print('for point {}, {}, the point is {}'.format(i,j,point_grid[i][j]))
Z[i][j] = div_measure(point_grid[i][j])
#print(Z)
#plt.scatter(points[:,0], points[:,1],color = 'k',linewidths = 15,label = 'Anchor points')
Zmax = np.max(Z)
Zmin = np.min(Z)
C = ax.pcolormesh(X_grid, Y_grid, Z, cmap = 'jet')
plt.scatter(points[:,1], points[:,0],color = 'k',linewidths = 1,label = 'Anchor {}'.format(current_metric_value))
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('color map for {} metric'.format(div_name))
plt.xlim(0,1)
plt.ylim(0,1)
fb = f.colorbar(C, ax = ax)
line_pos = (current_metric_value - Zmin)/ (Zmax - Zmin)
fb.ax.plot([0,1],[line_pos, line_pos], color = 'w', linewidth = 3,label = 'current value')
plt.legend()
f.savefig(save_name + div_name + 'heatmap')
plt.close('all') | def heat_maps_for_metrics(points, diversity_measures, diversity_names, save_name = ''):
"""
The function which plots the color map for different diversity measurements
"""
X = np.arange(0,1,0.01)
Y = np.arange(0,1,0.01)
X_grid,Y_grid = np.meshgrid(X,Y)
#Z = np.arrays(X_grid)
grid_h, grid_w = np.shape(X_grid)
Z = np.zeros([grid_h, grid_w])
h,w = np.shape(points)
point_new = np.zeros([h+1,w])
assert w==2
point_new[0:h,:] = points
point_grid = [[[] for col in range(grid_w)] for row in range(grid_h)]
for i in range(grid_h):
for j in range(grid_w):
point_new[h,0] = X[i]
point_new[h,1] = Y[j]
point_grid[i][j] = np.array(point_new)
#print('point new at {},{} is {}'.format(i,j,point_new))
for cnt, (div_measure, div_name) in enumerate(zip(diversity_measures, diversity_names)):
f = plt.figure()
ax = plt.gca()
current_metric_value = div_measure(points)
print("current metric value",current_metric_value)
for i in range(grid_h):
for j in range(grid_w):
#print('for point {}, {}, the point is {}'.format(i,j,point_grid[i][j]))
Z[i][j] = div_measure(point_grid[i][j])
#print(Z)
#plt.scatter(points[:,0], points[:,1],color = 'k',linewidths = 15,label = 'Anchor points')
Zmax = np.max(Z)
Zmin = np.min(Z)
C = ax.pcolormesh(X_grid, Y_grid, Z, cmap = 'jet')
plt.scatter(points[:,1], points[:,0],color = 'k',linewidths = 1,label = 'Anchor {}'.format(current_metric_value))
plt.xlabel('x1')
plt.ylabel('x2')
plt.title('color map for {} metric'.format(div_name))
plt.xlim(0,1)
plt.ylim(0,1)
fb = f.colorbar(C, ax = ax)
line_pos = (current_metric_value - Zmin)/ (Zmax - Zmin)
fb.ax.plot([0,1],[line_pos, line_pos], color = 'w', linewidth = 3,label = 'current value')
plt.legend()
f.savefig(save_name + div_name + 'heatmap')
plt.close('all') |
Python | def write_record(self):
"""
Write records, including model_fn, parameters into the checkpoint folder
These records can be used to reconstruct & repeat experiments
:return:
"""
#insepect.getsource = return the text of the source code for an object
model_fn_str = inspect.getsource(self.model_fn) #Get the text of the source code of the object
params = inspect.getmembers(self, lambda a: not inspect.isroutine(a)) #get all the members that are not a routine (function)
params = [a for a in params if not (a[0].startswith('__') and a[0].endswith('__'))]
with open(os.path.join(self.ckpt_dir, 'model_meta.txt'), 'w+') as f:
f.write('model_fn:\n')
f.writelines(model_fn_str)
f.write('\nparams:\n')
for key, val in params:
f.write('{}: {}\n'.format(key, val)) | def write_record(self):
"""
Write records, including model_fn, parameters into the checkpoint folder
These records can be used to reconstruct & repeat experiments
:return:
"""
#insepect.getsource = return the text of the source code for an object
model_fn_str = inspect.getsource(self.model_fn) #Get the text of the source code of the object
params = inspect.getmembers(self, lambda a: not inspect.isroutine(a)) #get all the members that are not a routine (function)
params = [a for a in params if not (a[0].startswith('__') and a[0].endswith('__'))]
with open(os.path.join(self.ckpt_dir, 'model_meta.txt'), 'w+') as f:
f.write('model_fn:\n')
f.writelines(model_fn_str)
f.write('\nparams:\n')
for key, val in params:
f.write('{}: {}\n'.format(key, val)) |
Python | def make_loss(self):
"""
Make cross entropy loss for forward part of the model
:return: mean cross entropy loss of the batch
"""
with tf.variable_scope('loss'):
mse_loss = tf.losses.mean_squared_error(self.labels, self.logits) #reconstruction loss
reg_loss = tf.losses.get_regularization_loss() #regularizaiton loss
bdy_loss = self.Boundary_loss #boundary loss
total_loss = mse_loss + reg_loss + bdy_loss #Total loss
return total_loss, mse_loss, reg_loss, bdy_loss | def make_loss(self):
"""
Make cross entropy loss for forward part of the model
:return: mean cross entropy loss of the batch
"""
with tf.variable_scope('loss'):
mse_loss = tf.losses.mean_squared_error(self.labels, self.logits) #reconstruction loss
reg_loss = tf.losses.get_regularization_loss() #regularizaiton loss
bdy_loss = self.Boundary_loss #boundary loss
total_loss = mse_loss + reg_loss + bdy_loss #Total loss
return total_loss, mse_loss, reg_loss, bdy_loss |
Python | def make_backprop_optimizer(self):
"""
Make an Backproping optimizer with the learning rate defined when the class is initialized
:return: an AdamOptimizer
"""
return tf.train.AdamOptimizer(learning_rate=self.learn_rate * 5000).minimize(self.loss,
self.global_step,
var_list = [self.geometry_variable]) | def make_backprop_optimizer(self):
"""
Make an Backproping optimizer with the learning rate defined when the class is initialized
:return: an AdamOptimizer
"""
return tf.train.AdamOptimizer(learning_rate=self.learn_rate * 5000).minimize(self.loss,
self.global_step,
var_list = [self.geometry_variable]) |
Python | def train(self, train_init_op, step_num, forward_hooks,\
write_summary=False,load_forward_ckpt = None):
"""
Train the model with step_num steps
First train the forward model and then the tandem part
:param train_init_op: training dataset init operation
:param step_num: number of steps to train
:param hooks: hooks for monitoring the training process
:param write_summary: write summary into tensorboard or not
:return:
"""
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if load_forward_ckpt != None:
self.load(sess, load_forward_ckpt)
if write_summary:
summary_writer = tf.summary.FileWriter(self.ckpt_dir, sess.graph)
else:
summary_writer = None
print("Training forward model now:")
assign_true_op = self.train_Forward.assign(True)
sess.run([train_init_op, assign_true_op])
##Train the forward model
for i in range(int(step_num)):
sess.run([train_init_op, assign_true_op])
[feature, optm_out] = sess.run([self.features ,self.optm])
if (i % 500 == 0):
print("Feature now is:", feature[0,:])
for hook in forward_hooks:
hook.run(sess, writer=summary_writer)
if forward_hooks[-1].save: #If the hook tells to save the model, then save it
self.save(sess)
self.best_validation_loss = forward_hooks[-1].best_validation_loss
if forward_hooks[-1].stop:
break | def train(self, train_init_op, step_num, forward_hooks,\
write_summary=False,load_forward_ckpt = None):
"""
Train the model with step_num steps
First train the forward model and then the tandem part
:param train_init_op: training dataset init operation
:param step_num: number of steps to train
:param hooks: hooks for monitoring the training process
:param write_summary: write summary into tensorboard or not
:return:
"""
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if load_forward_ckpt != None:
self.load(sess, load_forward_ckpt)
if write_summary:
summary_writer = tf.summary.FileWriter(self.ckpt_dir, sess.graph)
else:
summary_writer = None
print("Training forward model now:")
assign_true_op = self.train_Forward.assign(True)
sess.run([train_init_op, assign_true_op])
##Train the forward model
for i in range(int(step_num)):
sess.run([train_init_op, assign_true_op])
[feature, optm_out] = sess.run([self.features ,self.optm])
if (i % 500 == 0):
print("Feature now is:", feature[0,:])
for hook in forward_hooks:
hook.run(sess, writer=summary_writer)
if forward_hooks[-1].save: #If the hook tells to save the model, then save it
self.save(sess)
self.best_validation_loss = forward_hooks[-1].best_validation_loss
if forward_hooks[-1].stop:
break |
Python | def evaluate_one(self, target_spectra, back_prop_epoch, sess, verb_step, stop_thres, point_index):
"""
The function that evaluate one single given target spectra and return the results
:param target_spectra: The target spectra to back prop towards. Should be only 1 row
:param back_prop_epoch: #epochs to do the gradient descend
:param sess: The current session to do the back prop
"""
#Set up target output
print("shape before repeat",np.shape(target_spectra.values))
target_spectra_repeat = np.repeat(np.reshape(target_spectra.values,(1,-1)), self.batch_size, axis = 0)
print("Size of the target spectra repeat", np.shape(target_spectra_repeat))
#target_spectra_dataset = tf.data.Dataset.from_tensor_slices(target_spectra_repeat)
#target_spectra_dataset = target_spectra_dataset.repeat()
for i in range(back_prop_epoch):
loss_back_prop, optm_out, inferred_spectra = sess.run([self.loss, self.backprop_optm, self.logits],
feed_dict={self.labels: target_spectra_repeat})
if (i % verb_step == 0):
print("Loss at inference step{} : {}".format(i,loss_back_prop))
if (loss_back_prop < stop_thres):
print("Loss is lower than the threshold{}, inference stop".format(stop_thres))
break
#Then it is time to get the best performing one
Xpred, Ypred, loss = sess.run([self.forward_in, self.logits, self.loss], feed_dict={self.labels: target_spectra_repeat})
loss_list = np.sum(np.square(Ypred - target_spectra_repeat), axis = 1) / self.batch_size
best_estimate_index = np.argmin(loss_list)
print('best error is {}, in best estimate-indx is {}, squared loss is {}'.format(min(loss_list),
loss_list[best_estimate_index],
loss))
print('Best error for point {} is having absolute error of {}'.format(point_index, loss_list[best_estimate_index]))
Xpred_best = Xpred[best_estimate_index,:]
Ypred_best = Ypred[best_estimate_index,:]
# print("Xpred_best:", Xpred_best)
return Xpred_best, Ypred_best | def evaluate_one(self, target_spectra, back_prop_epoch, sess, verb_step, stop_thres, point_index):
"""
The function that evaluate one single given target spectra and return the results
:param target_spectra: The target spectra to back prop towards. Should be only 1 row
:param back_prop_epoch: #epochs to do the gradient descend
:param sess: The current session to do the back prop
"""
#Set up target output
print("shape before repeat",np.shape(target_spectra.values))
target_spectra_repeat = np.repeat(np.reshape(target_spectra.values,(1,-1)), self.batch_size, axis = 0)
print("Size of the target spectra repeat", np.shape(target_spectra_repeat))
#target_spectra_dataset = tf.data.Dataset.from_tensor_slices(target_spectra_repeat)
#target_spectra_dataset = target_spectra_dataset.repeat()
for i in range(back_prop_epoch):
loss_back_prop, optm_out, inferred_spectra = sess.run([self.loss, self.backprop_optm, self.logits],
feed_dict={self.labels: target_spectra_repeat})
if (i % verb_step == 0):
print("Loss at inference step{} : {}".format(i,loss_back_prop))
if (loss_back_prop < stop_thres):
print("Loss is lower than the threshold{}, inference stop".format(stop_thres))
break
#Then it is time to get the best performing one
Xpred, Ypred, loss = sess.run([self.forward_in, self.logits, self.loss], feed_dict={self.labels: target_spectra_repeat})
loss_list = np.sum(np.square(Ypred - target_spectra_repeat), axis = 1) / self.batch_size
best_estimate_index = np.argmin(loss_list)
print('best error is {}, in best estimate-indx is {}, squared loss is {}'.format(min(loss_list),
loss_list[best_estimate_index],
loss))
print('Best error for point {} is having absolute error of {}'.format(point_index, loss_list[best_estimate_index]))
Xpred_best = Xpred[best_estimate_index,:]
Ypred_best = Ypred[best_estimate_index,:]
# print("Xpred_best:", Xpred_best)
return Xpred_best, Ypred_best |
Python | def unpack_Xpred(Xpred_file, batch_size):
"""
THis is the function which unpacks the Xpred file from VAE evaluation to a long file
Since VAE prediction gives #batch_size of Geometry each time, unpack them into a long list for Tandem inference
"""
Xpred = np.loadtxt(Xpred_file, delimiter=' ')
h,w = np.shape(Xpred)
with open("data/Unpackinformation.txt",'w') as f1:
f1.write('The number of data point is {}, each with {} predicted geometries'.format(h, w/8))
Xpred_reshaped = np.transpose(np.reshape(Xpred, (8, -1)))
h,w = np.shape(Xpred_reshaped)
assert w == 8, "Your unpack function didn't work, check again what was wrong in the evaluateion output and unpack"
with open(Xpred_file,'w') as f:
np.savetxt(f, Xpred_reshaped, fmt='%.3f') | def unpack_Xpred(Xpred_file, batch_size):
"""
THis is the function which unpacks the Xpred file from VAE evaluation to a long file
Since VAE prediction gives #batch_size of Geometry each time, unpack them into a long list for Tandem inference
"""
Xpred = np.loadtxt(Xpred_file, delimiter=' ')
h,w = np.shape(Xpred)
with open("data/Unpackinformation.txt",'w') as f1:
f1.write('The number of data point is {}, each with {} predicted geometries'.format(h, w/8))
Xpred_reshaped = np.transpose(np.reshape(Xpred, (8, -1)))
h,w = np.shape(Xpred_reshaped)
assert w == 8, "Your unpack function didn't work, check again what was wrong in the evaluateion output and unpack"
with open(Xpred_file,'w') as f:
np.savetxt(f, Xpred_reshaped, fmt='%.3f') |
Python | def after_Tandem_pred():
"""
This function handles the rest of the evaluation after the Ypred has been generated by the Tandem model (forward model)
"""
data_dir = 'data'
Ypred_file = get_pred_truth_file.get_Ypred(data_dir)
Xpred_file = get_pred_truth_file.get_Xpred(data_dir)
Ytruth_file = get_pred_truth_file.get_Ytruth(data_dir)
Ypred = np.loadtxt(Ypred_file, delimiter = ' ')
Ytruth = np.loadtxt(Ytruth_file, delimiter = ' ')
Xpred = np.loadtxt(Xpred_file, delimiter = ' ')
l_Ypred = len(Ypred)
l_Ytruth = len(Ytruth)
k = l_Ypred / l_Ytruth
print("l_Ypred",l_Ypred)
print("l_Ytruth",l_Ytruth)
print("k",k)
assert k - int(k) < 0.001,"Check you length, the divide result k is not an int!!"
print('For each data point in your truth file, the VAE generated {} data points'.format(k))
k = int(k)
#best_index_list = np.zeros([1,l_Ytruth])
Xpred_new = np.zeros([l_Ytruth,8])
Ypred_new = np.zeros(np.shape(Ytruth))
for i in range(l_Ytruth):
diff_mat = Ypred[i*k:(i+1)*k,:] - Ytruth[i,:]
distance_mat = np.linalg.norm(diff_mat, axis = 1)
best_index = np.argmax(distance_mat)
#best_index_list[i] = best_index
Xpred_new[i,:] = Xpred[i*k + best_index,:]
Ypred_new[i,:] = Ypred[i*k + best_index,:]
with open(Xpred_file, 'w') as f1:
np.savetxt(f1, Xpred_new, fmt='%.3f')
with open(Ypred_file, 'w') as f2:
np.savetxt(f2, Ypred_new, fmt='%.3f')
mae, mse = compare_truth_pred(Ypred_file, Ytruth_file)
plt.figure(figsize=(12, 6))
plt.hist(mse, bins=100)
plt.xlabel('Mean Squared Error')
plt.ylabel('cnt')
plt.suptitle('VAE (Avg MSE={:.4e})'.format(np.mean(mse)))
plt.savefig(os.path.join(os.path.abspath(''), 'data',
'VAE.png'))
plt.show()
print('VAE (Avg MSE={:.4e})'.format(np.mean(mse))) | def after_Tandem_pred():
"""
This function handles the rest of the evaluation after the Ypred has been generated by the Tandem model (forward model)
"""
data_dir = 'data'
Ypred_file = get_pred_truth_file.get_Ypred(data_dir)
Xpred_file = get_pred_truth_file.get_Xpred(data_dir)
Ytruth_file = get_pred_truth_file.get_Ytruth(data_dir)
Ypred = np.loadtxt(Ypred_file, delimiter = ' ')
Ytruth = np.loadtxt(Ytruth_file, delimiter = ' ')
Xpred = np.loadtxt(Xpred_file, delimiter = ' ')
l_Ypred = len(Ypred)
l_Ytruth = len(Ytruth)
k = l_Ypred / l_Ytruth
print("l_Ypred",l_Ypred)
print("l_Ytruth",l_Ytruth)
print("k",k)
assert k - int(k) < 0.001,"Check you length, the divide result k is not an int!!"
print('For each data point in your truth file, the VAE generated {} data points'.format(k))
k = int(k)
#best_index_list = np.zeros([1,l_Ytruth])
Xpred_new = np.zeros([l_Ytruth,8])
Ypred_new = np.zeros(np.shape(Ytruth))
for i in range(l_Ytruth):
diff_mat = Ypred[i*k:(i+1)*k,:] - Ytruth[i,:]
distance_mat = np.linalg.norm(diff_mat, axis = 1)
best_index = np.argmax(distance_mat)
#best_index_list[i] = best_index
Xpred_new[i,:] = Xpred[i*k + best_index,:]
Ypred_new[i,:] = Ypred[i*k + best_index,:]
with open(Xpred_file, 'w') as f1:
np.savetxt(f1, Xpred_new, fmt='%.3f')
with open(Ypred_file, 'w') as f2:
np.savetxt(f2, Ypred_new, fmt='%.3f')
mae, mse = compare_truth_pred(Ypred_file, Ytruth_file)
plt.figure(figsize=(12, 6))
plt.hist(mse, bins=100)
plt.xlabel('Mean Squared Error')
plt.ylabel('cnt')
plt.suptitle('VAE (Avg MSE={:.4e})'.format(np.mean(mse)))
plt.savefig(os.path.join(os.path.abspath(''), 'data',
'VAE.png'))
plt.show()
print('VAE (Avg MSE={:.4e})'.format(np.mean(mse))) |
Python | def forward(self, G):
"""
The forward function which defines how the network is connected
:param G: The input geometry (Since this is a forward network)
:return: S: The 300 dimension spectra
"""
out = G # initialize the out
# For the linear part
for ind, (fc, bn) in enumerate(zip(self.linears, self.bn_linears)):
#print(out.size())
out = F.relu(bn(fc(out))) # ReLU + BN + Linear
out = out.unsqueeze(1) # Add 1 dimension to get N,L_in, H
# For the conv part
for ind, conv in enumerate(self.convs):
#print(out.size())
out = conv(out)
# Final touch, because the input is normalized to [-1,1]
S = tanh(out.squeeze())
#print(S.size())
return S | def forward(self, G):
"""
The forward function which defines how the network is connected
:param G: The input geometry (Since this is a forward network)
:return: S: The 300 dimension spectra
"""
out = G # initialize the out
# For the linear part
for ind, (fc, bn) in enumerate(zip(self.linears, self.bn_linears)):
#print(out.size())
out = F.relu(bn(fc(out))) # ReLU + BN + Linear
out = out.unsqueeze(1) # Add 1 dimension to get N,L_in, H
# For the conv part
for ind, conv in enumerate(self.convs):
#print(out.size())
out = conv(out)
# Final touch, because the input is normalized to [-1,1]
S = tanh(out.squeeze())
#print(S.size())
return S |
Python | def InferenceAccuracyExamplePlot(model_name, save_name, title, sample_num=10, fig_size=(15,5), random_seed=1,
target_region=[0,300 ]):
"""
The function to plot the Inference accuracy and compare with FFDS algorithm.
It takes the Ypred and Ytruth file as input and plot the first <sample_num> of spectras.
It also takes a random of 10 points to at as the target points.
:param model_name: The model name as the postfix for the Ytruth file
:param save_name: The saving name of the figure
:param title: The saving title of the figure
:param sample_num: The number of sample to plot for comparison
:param fig_size: The size of the figure
:param random_seed: The random seed value
:param target_region: The region that the targets get
:return:
"""
# Get the prediction and truth file first
Ytruth_file = os.path.join('data','test_Ytruth_{}.csv'.format(model_name))
Ypred_file = os.path.join('data','test_Ypred_{}.csv'.format(model_name))
Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=' ').values
Ypred = pd.read_csv(Ypred_file, header=None, delimiter=' ').values
# Draw uniform random distribution for the reference points
np.random.seed(random_seed) # To make sure each time we have same target points
targets = target_region[0] + (target_region[1] - target_region[0]) * np.random.uniform(low=0, high=1, size=10) # Cap the random numbers within 0-299
targets = targets.astype("int")
# Make the frequency into real frequency in THz
fre_low = 0.86
fre_high = 1.5
frequency = fre_low + (fre_high - fre_low)/len(Ytruth[0, :]) * np.arange(300)
for i in range(sample_num):
# Start the plotting
f = plt.figure(figsize=fig_size)
plt.title(title)
plt.scatter(frequency[targets], Ytruth[i,targets], label='S*')
plt.plot(frequency, Ytruth[i,:], label='FFDS')
plt.plot(frequency, Ypred[i,:], label='Candidate')
plt.legend()
plt.ylim([0,1])
plt.xlim([fre_low, fre_high])
plt.grid()
plt.xlabel("Frequency (THz)")
plt.ylabel("Transmittance")
plt.savefig(os.path.join('data',save_name + str(i) + '.png')) | def InferenceAccuracyExamplePlot(model_name, save_name, title, sample_num=10, fig_size=(15,5), random_seed=1,
target_region=[0,300 ]):
"""
The function to plot the Inference accuracy and compare with FFDS algorithm.
It takes the Ypred and Ytruth file as input and plot the first <sample_num> of spectras.
It also takes a random of 10 points to at as the target points.
:param model_name: The model name as the postfix for the Ytruth file
:param save_name: The saving name of the figure
:param title: The saving title of the figure
:param sample_num: The number of sample to plot for comparison
:param fig_size: The size of the figure
:param random_seed: The random seed value
:param target_region: The region that the targets get
:return:
"""
# Get the prediction and truth file first
Ytruth_file = os.path.join('data','test_Ytruth_{}.csv'.format(model_name))
Ypred_file = os.path.join('data','test_Ypred_{}.csv'.format(model_name))
Ytruth = pd.read_csv(Ytruth_file, header=None, delimiter=' ').values
Ypred = pd.read_csv(Ypred_file, header=None, delimiter=' ').values
# Draw uniform random distribution for the reference points
np.random.seed(random_seed) # To make sure each time we have same target points
targets = target_region[0] + (target_region[1] - target_region[0]) * np.random.uniform(low=0, high=1, size=10) # Cap the random numbers within 0-299
targets = targets.astype("int")
# Make the frequency into real frequency in THz
fre_low = 0.86
fre_high = 1.5
frequency = fre_low + (fre_high - fre_low)/len(Ytruth[0, :]) * np.arange(300)
for i in range(sample_num):
# Start the plotting
f = plt.figure(figsize=fig_size)
plt.title(title)
plt.scatter(frequency[targets], Ytruth[i,targets], label='S*')
plt.plot(frequency, Ytruth[i,:], label='FFDS')
plt.plot(frequency, Ypred[i,:], label='Candidate')
plt.legend()
plt.ylim([0,1])
plt.xlim([fre_low, fre_high])
plt.grid()
plt.xlabel("Frequency (THz)")
plt.ylabel("Transmittance")
plt.savefig(os.path.join('data',save_name + str(i) + '.png')) |
Python | def RetrieveFeaturePredictionNMse(model_name):
"""
Retrieve the Feature and Prediciton values and place in a np array
:param model_name: the name of the model
return Xtruth, Xpred, Ytruth, Ypred
"""
##Retrieve the prediction and truth and prediction first
feature_file = os.path.join('data', 'test_Xtruth_{}.csv'.format(model_name))
pred_file = os.path.join('data', 'test_Ypred_{}.csv'.format(model_name))
truth_file = os.path.join('data', 'test_Ytruth_{}.csv'.format(model_name))
feat_file = os.path.join('data', 'test_Xpred_{}.csv'.format(model_name))
#Getting the files from file name
Xtruth = pd.read_csv(feature_file,header=None, delimiter=' ')
Xpred = pd.read_csv(feat_file,header=None, delimiter=' ')
Ytruth = pd.read_csv(truth_file,header=None, delimiter=' ')
Ypred = pd.read_csv(pred_file,header=None, delimiter=' ')
#retrieve mse, mae
Ymae, Ymse = evaluate.compare_truth_pred(pred_file, truth_file) #get the maes of y
print(Xtruth.shape)
return Xtruth.values, Xpred.values, Ytruth.values, Ypred.values, Ymae, Ymse | def RetrieveFeaturePredictionNMse(model_name):
"""
Retrieve the Feature and Prediciton values and place in a np array
:param model_name: the name of the model
return Xtruth, Xpred, Ytruth, Ypred
"""
##Retrieve the prediction and truth and prediction first
feature_file = os.path.join('data', 'test_Xtruth_{}.csv'.format(model_name))
pred_file = os.path.join('data', 'test_Ypred_{}.csv'.format(model_name))
truth_file = os.path.join('data', 'test_Ytruth_{}.csv'.format(model_name))
feat_file = os.path.join('data', 'test_Xpred_{}.csv'.format(model_name))
#Getting the files from file name
Xtruth = pd.read_csv(feature_file,header=None, delimiter=' ')
Xpred = pd.read_csv(feat_file,header=None, delimiter=' ')
Ytruth = pd.read_csv(truth_file,header=None, delimiter=' ')
Ypred = pd.read_csv(pred_file,header=None, delimiter=' ')
#retrieve mse, mae
Ymae, Ymse = evaluate.compare_truth_pred(pred_file, truth_file) #get the maes of y
print(Xtruth.shape)
return Xtruth.values, Xpred.values, Ytruth.values, Ypred.values, Ymae, Ymse |
Python | def ImportColorBarLib():
"""
Import some libraries that used in a colorbar plot
"""
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib as mpl
print("import sucessful")
return mpl | def ImportColorBarLib():
"""
Import some libraries that used in a colorbar plot
"""
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib as mpl
print("import sucessful")
return mpl |
Python | def SpectrumComparisonNGeometryComparison(rownum, colnum, Figsize, model_name, boundary = [-1,1,-1,1]):
"""
Read the Prediction files and plot the spectra comparison plots
:param SubplotArray: 2x2 array indicating the arrangement of the subplots
:param Figsize: the size of the figure
:param Figname: the name of the figures to save
:param model_name: model name (typically a list of numebr containing date and time)
"""
mpl = ImportColorBarLib() #import lib
Xtruth, Xpred, Ytruth, Ypred, Ymae, Ymse = RetrieveFeaturePredictionNMse(model_name) #retrieve features
print("Ymse shape:",Ymse.shape)
print("Xpred shape:", Xpred.shape)
print("Xtrth shape:", Xtruth.shape)
#Plotting the spectrum comaprison
f = plt.figure(figsize=Figsize)
fignum = rownum * colnum
for i in range(fignum):
ax = plt.subplot(rownum, colnum, i+1)
plt.ylabel('Transmission rate')
plt.xlabel('frequency')
plt.plot(Ytruth[i], label = 'Truth',linestyle = '--')
plt.plot(Ypred[i], label = 'Prediction',linestyle = '-')
plt.legend()
plt.ylim([0,1])
f.savefig('Spectrum Comparison_{}'.format(model_name))
"""
Plotting the geometry comparsion, there are fignum points in each plot
each representing a data point with a unique marker
8 dimension therefore 4 plots, 2x2 arrangement
"""
#for j in range(fignum):
pointnum = fignum #change #fig to #points in comparison
f = plt.figure(figsize = Figsize)
ax0 = plt.gca()
for i in range(4):
truthmarkers = UniqueMarkers() #Get some unique markers
predmarkers = UniqueMarkers() #Get some unique markers
ax = plt.subplot(2, 2, i+1)
#plt.xlim([29,56]) #setting the heights limit, abandoned because sometime can't see prediciton
#plt.ylim([41,53]) #setting the radius limits
for j in range(pointnum):
#Since the colored scatter only takes 2+ arguments, plot 2 same points to circumvent this problem
predArr = [[Xpred[j, i], Xpred[j, i]] ,[Xpred[j, i + 4], Xpred[j, i + 4]]]
predC = [Ymse[j], Ymse[j]]
truthplot = plt.scatter(Xtruth[j,i],Xtruth[j,i+4],label = 'Xtruth{}'.format(j),
marker = next(truthmarkers),c = 'm',s = 40)
predplot = plt.scatter(predArr[0],predArr[1],label = 'Xpred{}'.format(j),
c =predC ,cmap = 'jet',marker = next(predmarkers), s = 60)
plt.xlabel('h{}'.format(i))
plt.ylabel('r{}'.format(i))
rect = mpl.patches.Rectangle((boundary[0],boundary[2]),boundary[1] - boundary[0], boundary[3] - boundary[2],
linewidth=1,edgecolor='r',
facecolor='none',linestyle = '--',label = 'data region')
ax.add_patch(rect)
plt.autoscale()
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102),
mode="expand",ncol = 6, prop={'size': 5})#, bbox_to_anchor=(1,0.5))
cb_ax = f.add_axes([0.93, 0.1, 0.02, 0.8])
cbar = f.colorbar(predplot, cax=cb_ax)
#f.colorbar(predplot)
f.savefig('Geometry Comparison_{}'.format(model_name)) | def SpectrumComparisonNGeometryComparison(rownum, colnum, Figsize, model_name, boundary = [-1,1,-1,1]):
"""
Read the Prediction files and plot the spectra comparison plots
:param SubplotArray: 2x2 array indicating the arrangement of the subplots
:param Figsize: the size of the figure
:param Figname: the name of the figures to save
:param model_name: model name (typically a list of numebr containing date and time)
"""
mpl = ImportColorBarLib() #import lib
Xtruth, Xpred, Ytruth, Ypred, Ymae, Ymse = RetrieveFeaturePredictionNMse(model_name) #retrieve features
print("Ymse shape:",Ymse.shape)
print("Xpred shape:", Xpred.shape)
print("Xtrth shape:", Xtruth.shape)
#Plotting the spectrum comaprison
f = plt.figure(figsize=Figsize)
fignum = rownum * colnum
for i in range(fignum):
ax = plt.subplot(rownum, colnum, i+1)
plt.ylabel('Transmission rate')
plt.xlabel('frequency')
plt.plot(Ytruth[i], label = 'Truth',linestyle = '--')
plt.plot(Ypred[i], label = 'Prediction',linestyle = '-')
plt.legend()
plt.ylim([0,1])
f.savefig('Spectrum Comparison_{}'.format(model_name))
"""
Plotting the geometry comparsion, there are fignum points in each plot
each representing a data point with a unique marker
8 dimension therefore 4 plots, 2x2 arrangement
"""
#for j in range(fignum):
pointnum = fignum #change #fig to #points in comparison
f = plt.figure(figsize = Figsize)
ax0 = plt.gca()
for i in range(4):
truthmarkers = UniqueMarkers() #Get some unique markers
predmarkers = UniqueMarkers() #Get some unique markers
ax = plt.subplot(2, 2, i+1)
#plt.xlim([29,56]) #setting the heights limit, abandoned because sometime can't see prediciton
#plt.ylim([41,53]) #setting the radius limits
for j in range(pointnum):
#Since the colored scatter only takes 2+ arguments, plot 2 same points to circumvent this problem
predArr = [[Xpred[j, i], Xpred[j, i]] ,[Xpred[j, i + 4], Xpred[j, i + 4]]]
predC = [Ymse[j], Ymse[j]]
truthplot = plt.scatter(Xtruth[j,i],Xtruth[j,i+4],label = 'Xtruth{}'.format(j),
marker = next(truthmarkers),c = 'm',s = 40)
predplot = plt.scatter(predArr[0],predArr[1],label = 'Xpred{}'.format(j),
c =predC ,cmap = 'jet',marker = next(predmarkers), s = 60)
plt.xlabel('h{}'.format(i))
plt.ylabel('r{}'.format(i))
rect = mpl.patches.Rectangle((boundary[0],boundary[2]),boundary[1] - boundary[0], boundary[3] - boundary[2],
linewidth=1,edgecolor='r',
facecolor='none',linestyle = '--',label = 'data region')
ax.add_patch(rect)
plt.autoscale()
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102),
mode="expand",ncol = 6, prop={'size': 5})#, bbox_to_anchor=(1,0.5))
cb_ax = f.add_axes([0.93, 0.1, 0.02, 0.8])
cbar = f.colorbar(predplot, cax=cb_ax)
#f.colorbar(predplot)
f.savefig('Geometry Comparison_{}'.format(model_name)) |
Python | def HeatMapBVL(plot_x_name, plot_y_name, title, save_name='HeatMap.png', HeatMap_dir = 'HeatMap',
feature_1_name=None, feature_2_name=None,
heat_value_name = 'best_validation_loss'):
"""
Plotting a HeatMap of the Best Validation Loss for a batch of hyperswiping thing
First, copy those models to a folder called "HeatMap"
Algorithm: Loop through the directory using os.look and find the parameters.txt files that stores the
:param HeatMap_dir: The directory where the checkpoint folders containing the parameters.txt files are located
:param feature_1_name: The name of the first feature that you would like to plot on the feature map
:param feature_2_name: If you only want to draw the heatmap using 1 single dimension, just leave it as None
"""
one_dimension_flag = False #indication flag of whether it is a 1d or 2d plot to plot
#Check the data integrity
if (feature_1_name == None):
print("Please specify the feature that you want to plot the heatmap");
return
if (feature_2_name == None):
one_dimension_flag = True
print("You are plotting feature map with only one feature, plotting loss curve instead")
#Get all the parameters.txt running related data and make HMpoint objects
HMpoint_list = []
df_list = [] #make a list of data frame for further use
for subdir, dirs, files in os.walk(HeatMap_dir):
for file_name in files:
if (file_name == 'parameters.txt'):
file_path = os.path.join(subdir, file_name) #Get the file relative path from
df = pd.read_csv(file_path, index_col = 0)
#df = df.reset_index() #reset the index to get ride of
print(df.T)
if (one_dimension_flag):
#print(df[[heat_value_name, feature_1_name]])
#print(df[heat_value_name][0])
#print(df[heat_value_name].iloc[0])
df_list.append(df[[heat_value_name, feature_1_name]])
HMpoint_list.append(HMpoint(float(df[heat_value_name][0]), eval(str(df[feature_1_name][0])),
f1_name = feature_1_name))
else:
df_list.append(df[[heat_value_name, feature_1_name, feature_2_name]])
HMpoint_list.append(HMpoint(float(df[heat_value_name][0]),eval(str(df[feature_1_name][0])),
eval(str(df[feature_2_name][0])), feature_1_name, feature_2_name))
#Concatenate all the dfs into a single aggregate one for 2 dimensional usee
df_aggregate = pd.concat(df_list, ignore_index = True, sort = False)
#print(df_aggregate[heat_value_name])
#print(type(df_aggregate[heat_value_name]))
df_aggregate.astype({heat_value_name: 'float'})
#print(type(df_aggregate[heat_value_name]))
#df_aggregate = df_aggregate.reset_index()
print("before transformation:", df_aggregate)
[h, w] = df_aggregate.shape
for i in range(h):
for j in range(w):
#print(i,j, df_aggregate.iloc[i,j])
if (isinstance(df_aggregate.iloc[i,j],str)):
ij_tuple = eval(df_aggregate.iloc[i,j])
df_aggregate.iloc[i,j] = len(ij_tuple)
print("after transoformation:",df_aggregate)
#Change the feature if it is a tuple, change to length of it
for cnt, point in enumerate(HMpoint_list):
print("For point {} , it has {} loss, {} for feature 1 and {} for feature 2".format(cnt,
point.bv_loss, point.feature_1, point.feature_2))
assert(isinstance(point.bv_loss, float)) #make sure this is a floating number
if (isinstance(point.feature_1, tuple)):
point.feature_1 = len(point.feature_1)
if (isinstance(point.feature_2, tuple)):
point.feature_2 = len(point.feature_2)
f = plt.figure()
#After we get the full list of HMpoint object, we can start drawing
if (feature_2_name == None):
print("plotting 1 dimension HeatMap (which is actually a line)")
HMpoint_list_sorted = sorted(HMpoint_list, key = lambda x: x.feature_1)
#Get the 2 lists of plot
bv_loss_list = []
feature_1_list = []
for point in HMpoint_list_sorted:
bv_loss_list.append(point.bv_loss)
feature_1_list.append(point.feature_1)
print("bv_loss_list:", bv_loss_list)
print("feature_1_list:",feature_1_list)
#start plotting
plt.plot(feature_1_list, bv_loss_list,'o-')
else: #Or this is a 2 dimension HeatMap
print("plotting 2 dimension HeatMap")
#point_df = pd.DataFrame.from_records([point.to_dict() for point in HMpoint_list])
df_aggregate = df_aggregate.reset_index()
df_aggregate.sort_values(feature_1_name, axis = 0, inplace = True)
df_aggregate.sort_values(feature_2_name, axis = 0, inplace = True)
print(df_aggregate)
point_df_pivot = df_aggregate.reset_index().pivot(feature_1_name, feature_2_name, heat_value_name)
sns.heatmap(point_df_pivot, vmin = 1.24e-3,cmap = "YlGnBu")
plt.xlabel(plot_x_name)
plt.ylabel(plot_y_name)
plt.title(title)
plt.savefig(save_name) | def HeatMapBVL(plot_x_name, plot_y_name, title, save_name='HeatMap.png', HeatMap_dir = 'HeatMap',
feature_1_name=None, feature_2_name=None,
heat_value_name = 'best_validation_loss'):
"""
Plotting a HeatMap of the Best Validation Loss for a batch of hyperswiping thing
First, copy those models to a folder called "HeatMap"
Algorithm: Loop through the directory using os.look and find the parameters.txt files that stores the
:param HeatMap_dir: The directory where the checkpoint folders containing the parameters.txt files are located
:param feature_1_name: The name of the first feature that you would like to plot on the feature map
:param feature_2_name: If you only want to draw the heatmap using 1 single dimension, just leave it as None
"""
one_dimension_flag = False #indication flag of whether it is a 1d or 2d plot to plot
#Check the data integrity
if (feature_1_name == None):
print("Please specify the feature that you want to plot the heatmap");
return
if (feature_2_name == None):
one_dimension_flag = True
print("You are plotting feature map with only one feature, plotting loss curve instead")
#Get all the parameters.txt running related data and make HMpoint objects
HMpoint_list = []
df_list = [] #make a list of data frame for further use
for subdir, dirs, files in os.walk(HeatMap_dir):
for file_name in files:
if (file_name == 'parameters.txt'):
file_path = os.path.join(subdir, file_name) #Get the file relative path from
df = pd.read_csv(file_path, index_col = 0)
#df = df.reset_index() #reset the index to get ride of
print(df.T)
if (one_dimension_flag):
#print(df[[heat_value_name, feature_1_name]])
#print(df[heat_value_name][0])
#print(df[heat_value_name].iloc[0])
df_list.append(df[[heat_value_name, feature_1_name]])
HMpoint_list.append(HMpoint(float(df[heat_value_name][0]), eval(str(df[feature_1_name][0])),
f1_name = feature_1_name))
else:
df_list.append(df[[heat_value_name, feature_1_name, feature_2_name]])
HMpoint_list.append(HMpoint(float(df[heat_value_name][0]),eval(str(df[feature_1_name][0])),
eval(str(df[feature_2_name][0])), feature_1_name, feature_2_name))
#Concatenate all the dfs into a single aggregate one for 2 dimensional usee
df_aggregate = pd.concat(df_list, ignore_index = True, sort = False)
#print(df_aggregate[heat_value_name])
#print(type(df_aggregate[heat_value_name]))
df_aggregate.astype({heat_value_name: 'float'})
#print(type(df_aggregate[heat_value_name]))
#df_aggregate = df_aggregate.reset_index()
print("before transformation:", df_aggregate)
[h, w] = df_aggregate.shape
for i in range(h):
for j in range(w):
#print(i,j, df_aggregate.iloc[i,j])
if (isinstance(df_aggregate.iloc[i,j],str)):
ij_tuple = eval(df_aggregate.iloc[i,j])
df_aggregate.iloc[i,j] = len(ij_tuple)
print("after transoformation:",df_aggregate)
#Change the feature if it is a tuple, change to length of it
for cnt, point in enumerate(HMpoint_list):
print("For point {} , it has {} loss, {} for feature 1 and {} for feature 2".format(cnt,
point.bv_loss, point.feature_1, point.feature_2))
assert(isinstance(point.bv_loss, float)) #make sure this is a floating number
if (isinstance(point.feature_1, tuple)):
point.feature_1 = len(point.feature_1)
if (isinstance(point.feature_2, tuple)):
point.feature_2 = len(point.feature_2)
f = plt.figure()
#After we get the full list of HMpoint object, we can start drawing
if (feature_2_name == None):
print("plotting 1 dimension HeatMap (which is actually a line)")
HMpoint_list_sorted = sorted(HMpoint_list, key = lambda x: x.feature_1)
#Get the 2 lists of plot
bv_loss_list = []
feature_1_list = []
for point in HMpoint_list_sorted:
bv_loss_list.append(point.bv_loss)
feature_1_list.append(point.feature_1)
print("bv_loss_list:", bv_loss_list)
print("feature_1_list:",feature_1_list)
#start plotting
plt.plot(feature_1_list, bv_loss_list,'o-')
else: #Or this is a 2 dimension HeatMap
print("plotting 2 dimension HeatMap")
#point_df = pd.DataFrame.from_records([point.to_dict() for point in HMpoint_list])
df_aggregate = df_aggregate.reset_index()
df_aggregate.sort_values(feature_1_name, axis = 0, inplace = True)
df_aggregate.sort_values(feature_2_name, axis = 0, inplace = True)
print(df_aggregate)
point_df_pivot = df_aggregate.reset_index().pivot(feature_1_name, feature_2_name, heat_value_name)
sns.heatmap(point_df_pivot, vmin = 1.24e-3,cmap = "YlGnBu")
plt.xlabel(plot_x_name)
plt.ylabel(plot_y_name)
plt.title(title)
plt.savefig(save_name) |
Python | def PlotPossibleGeoSpace(figname, Xpred_dir, compare_original = False,calculate_diversity = None):
"""
Function to plot the possible geometry space for a model evaluation result.
It reads from Xpred_dir folder and finds the Xpred result insdie and plot that result
:params figname: The name of the figure to save
:params Xpred_dir: The directory to look for Xpred file which is the source of plotting
:output A plot containing 4 subplots showing the 8 geomoetry dimensions
"""
Xpredfile = get_pred_truth_file.get_Xpred(Xpred_dir)
Xpred = pd.read_csv(Xpredfile, header=None, delimiter=' ').values
Xtruthfile = get_pred_truth_file.get_Xtruth(Xpred_dir)
Xtruth = pd.read_csv(Xtruthfile, header=None, delimiter=' ').values
f = plt.figure()
ax0 = plt.gca()
print(np.shape(Xpred))
#print(Xpred)
#plt.title(figname)
if (calculate_diversity == 'MST'):
diversity_Xpred, diversity_Xtruth = calculate_MST(Xpred, Xtruth)
elif (calculate_diversity == 'AREA'):
diversity_Xpred, diversity_Xtruth = calculate_AREA(Xpred, Xtruth)
for i in range(4):
ax = plt.subplot(2, 2, i+1)
ax.scatter(Xpred[:,i], Xpred[:,i + 4],s = 3,label = "Xpred")
if (compare_original):
ax.scatter(Xtruth[:,i], Xtruth[:,i+4],s = 3, label = "Xtruth")
plt.xlabel('h{}'.format(i))
plt.ylabel('r{}'.format(i))
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.legend()
if (calculate_diversity != None):
plt.text(-4, 3.5,'Div_Xpred = {}, Div_Xtruth = {}, under criteria {}'.format(diversity_Xpred, diversity_Xtruth, calculate_diversity), zorder = 1)
plt.suptitle(figname)
f.savefig(figname+'.png') | def PlotPossibleGeoSpace(figname, Xpred_dir, compare_original = False,calculate_diversity = None):
"""
Function to plot the possible geometry space for a model evaluation result.
It reads from Xpred_dir folder and finds the Xpred result insdie and plot that result
:params figname: The name of the figure to save
:params Xpred_dir: The directory to look for Xpred file which is the source of plotting
:output A plot containing 4 subplots showing the 8 geomoetry dimensions
"""
Xpredfile = get_pred_truth_file.get_Xpred(Xpred_dir)
Xpred = pd.read_csv(Xpredfile, header=None, delimiter=' ').values
Xtruthfile = get_pred_truth_file.get_Xtruth(Xpred_dir)
Xtruth = pd.read_csv(Xtruthfile, header=None, delimiter=' ').values
f = plt.figure()
ax0 = plt.gca()
print(np.shape(Xpred))
#print(Xpred)
#plt.title(figname)
if (calculate_diversity == 'MST'):
diversity_Xpred, diversity_Xtruth = calculate_MST(Xpred, Xtruth)
elif (calculate_diversity == 'AREA'):
diversity_Xpred, diversity_Xtruth = calculate_AREA(Xpred, Xtruth)
for i in range(4):
ax = plt.subplot(2, 2, i+1)
ax.scatter(Xpred[:,i], Xpred[:,i + 4],s = 3,label = "Xpred")
if (compare_original):
ax.scatter(Xtruth[:,i], Xtruth[:,i+4],s = 3, label = "Xtruth")
plt.xlabel('h{}'.format(i))
plt.ylabel('r{}'.format(i))
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.legend()
if (calculate_diversity != None):
plt.text(-4, 3.5,'Div_Xpred = {}, Div_Xtruth = {}, under criteria {}'.format(diversity_Xpred, diversity_Xtruth, calculate_diversity), zorder = 1)
plt.suptitle(figname)
f.savefig(figname+'.png') |
Python | def PlotPairwiseGeometry(figname, Xpred_dir):
"""
Function to plot the pair-wise scattering plot of the geometery file to show
the correlation between the geometry that the network learns
"""
Xpredfile = get_pred_truth_file.get_Xpred(Xpred_dir)
Xpred = pd.read_csv(Xpredfile, header=None, delimiter=' ')
f=plt.figure()
axes = pd.plotting.scatter_matrix(Xpred, alpha = 0.2)
#plt.tight_layout()
plt.title("Pair-wise scattering of Geometery predictions")
plt.savefig(figname) | def PlotPairwiseGeometry(figname, Xpred_dir):
"""
Function to plot the pair-wise scattering plot of the geometery file to show
the correlation between the geometry that the network learns
"""
Xpredfile = get_pred_truth_file.get_Xpred(Xpred_dir)
Xpred = pd.read_csv(Xpredfile, header=None, delimiter=' ')
f=plt.figure()
axes = pd.plotting.scatter_matrix(Xpred, alpha = 0.2)
#plt.tight_layout()
plt.title("Pair-wise scattering of Geometery predictions")
plt.savefig(figname) |
Python | def calculate_AREA(Xpred, Xtruth):
"""
Function to calculate the area for both Xpred and Xtruth under using the segmentation of 0.01
"""
area_list = np.zeros([2,4])
X_list = [Xpred, Xtruth]
binwidth = 0.05
for cnt, X in enumerate(X_list):
for i in range(4):
hist, xedges, yedges = np.histogram2d(X[:,i],X[:,i+4], bins = np.arange(-1,1+binwidth,binwidth))
area_list[cnt, i] = np.mean(hist > 0)
X_histgt0 = np.mean(area_list, axis = 1)
assert len(X_histgt0) == 2
return X_histgt0[0], X_histgt0[1] | def calculate_AREA(Xpred, Xtruth):
"""
Function to calculate the area for both Xpred and Xtruth under using the segmentation of 0.01
"""
area_list = np.zeros([2,4])
X_list = [Xpred, Xtruth]
binwidth = 0.05
for cnt, X in enumerate(X_list):
for i in range(4):
hist, xedges, yedges = np.histogram2d(X[:,i],X[:,i+4], bins = np.arange(-1,1+binwidth,binwidth))
area_list[cnt, i] = np.mean(hist > 0)
X_histgt0 = np.mean(area_list, axis = 1)
assert len(X_histgt0) == 2
return X_histgt0[0], X_histgt0[1] |
Python | def calculate_MST(Xpred, Xtruth):
"""
Function to calculate the MST for both Xpred and Xtruth under using the segmentation of 0.01
"""
MST_list = np.zeros([2,4])
X_list = [Xpred, Xtruth]
for cnt, X in enumerate(X_list):
for i in range(4):
points = X[:,i:i+5:4]
distance_matrix_points = distance_matrix(points,points, p = 2)
csr_mat = csr_matrix(distance_matrix_points)
Tree = minimum_spanning_tree(csr_mat)
MST_list[cnt,i] = np.sum(Tree.toarray().astype(float))
X_MST = np.mean(MST_list, axis = 1)
return X_MST[0], X_MST[1] | def calculate_MST(Xpred, Xtruth):
"""
Function to calculate the MST for both Xpred and Xtruth under using the segmentation of 0.01
"""
MST_list = np.zeros([2,4])
X_list = [Xpred, Xtruth]
for cnt, X in enumerate(X_list):
for i in range(4):
points = X[:,i:i+5:4]
distance_matrix_points = distance_matrix(points,points, p = 2)
csr_mat = csr_matrix(distance_matrix_points)
Tree = minimum_spanning_tree(csr_mat)
MST_list[cnt,i] = np.sum(Tree.toarray().astype(float))
X_MST = np.mean(MST_list, axis = 1)
return X_MST[0], X_MST[1] |
Python | def make_loss(self):
"""
Make cross entropy loss for forward part of the model
:return: total_loss: The total loss
:return: mse_loss: The mean squared error loss for reconstruction
:return: reg_loss: The regularization loss to prevent overfitting
:return: bdy_loss: Boundary loss that confines the geometry inside the boundary
:return: kl_loss: the KL_divergence loss that tells how far the latent distribution is compared with a normal one
"""
with tf.variable_scope('loss'):
mse_loss = tf.losses.mean_squared_error(self.features, self.logits) #reconstruction loss
reg_loss = tf.losses.get_regularization_loss() #regularizaiton loss
bdy_loss = self.Boundary_loss #boundary loss
kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var)
kl_loss = K.sum(kl_loss, axis = -1)
kl_loss = K.sum(kl_loss, axis = -1) / self.batch_size
kl_loss *= -0.5
total_loss = kl_loss + mse_loss + reg_loss + bdy_loss
return total_loss, mse_loss, reg_loss, bdy_loss, kl_loss | def make_loss(self):
"""
Make cross entropy loss for forward part of the model
:return: total_loss: The total loss
:return: mse_loss: The mean squared error loss for reconstruction
:return: reg_loss: The regularization loss to prevent overfitting
:return: bdy_loss: Boundary loss that confines the geometry inside the boundary
:return: kl_loss: the KL_divergence loss that tells how far the latent distribution is compared with a normal one
"""
with tf.variable_scope('loss'):
mse_loss = tf.losses.mean_squared_error(self.features, self.logits) #reconstruction loss
reg_loss = tf.losses.get_regularization_loss() #regularizaiton loss
bdy_loss = self.Boundary_loss #boundary loss
kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var)
kl_loss = K.sum(kl_loss, axis = -1)
kl_loss = K.sum(kl_loss, axis = -1) / self.batch_size
kl_loss *= -0.5
total_loss = kl_loss + mse_loss + reg_loss + bdy_loss
return total_loss, mse_loss, reg_loss, bdy_loss, kl_loss |
Python | def train(self, train_init_op, step_num, forward_hooks, write_summary=False):
"""
Train the model with step_num steps
First train the forward model and then the tandem part
:param train_init_op: training dataset init operation
:param step_num: number of steps to train
:param hooks: hooks for monitoring the training process !!!ALWASYS PUT VALIDATION HOOK THE LAST ONE
:param write_summary: write summary into tensorboard or not
:return:
"""
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if write_summary:
summary_writer = tf.summary.FileWriter(self.ckpt_dir, sess.graph)
else:
summary_writer = None
print("Training forward model now:")
#assign_true_op = self.train_Forward.assign(True)
##Train the forward model
for i in range(int(step_num)):
sess.run([train_init_op])#, assign_true_op])
sess.run(self.optm)
for hook in forward_hooks:
hook.run(sess, writer=summary_writer)
if forward_hooks[-1].save: #If the hook tells to save the model, then save it
self.save(sess)
self.best_validation_loss = forward_hooks[-1].best_validation_loss
if forward_hooks[-1].stop: #if it either trains to the threshold or have NAN value, stop here
break
self.save(sess) | def train(self, train_init_op, step_num, forward_hooks, write_summary=False):
"""
Train the model with step_num steps
First train the forward model and then the tandem part
:param train_init_op: training dataset init operation
:param step_num: number of steps to train
:param hooks: hooks for monitoring the training process !!!ALWASYS PUT VALIDATION HOOK THE LAST ONE
:param write_summary: write summary into tensorboard or not
:return:
"""
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if write_summary:
summary_writer = tf.summary.FileWriter(self.ckpt_dir, sess.graph)
else:
summary_writer = None
print("Training forward model now:")
#assign_true_op = self.train_Forward.assign(True)
##Train the forward model
for i in range(int(step_num)):
sess.run([train_init_op])#, assign_true_op])
sess.run(self.optm)
for hook in forward_hooks:
hook.run(sess, writer=summary_writer)
if forward_hooks[-1].save: #If the hook tells to save the model, then save it
self.save(sess)
self.best_validation_loss = forward_hooks[-1].best_validation_loss
if forward_hooks[-1].stop: #if it either trains to the threshold or have NAN value, stop here
break
self.save(sess) |
Python | def evaluate_one(self, target_spectra, sess):
"""
The function that return the result of evaluation of one target spectra
:param target_spectra: The targe spectra to VAE decode, should be only one row
:param sess: current tf session
:return Xpred: the row of X predictions that the VAE gives
"""
#Create random variable for latent variable
latent_z = np.random.normal(0, 1, (self.batch_size, self.latent_dim))
target_spectra_repeat = np.repeat(np.reshape(target_spectra.values, (1, -1)), self.batch_size, axis=0)
Xpred = sess.run(self.logits, feed_dict = {self.z : latent_z, self.labels: target_spectra_repeat})
Xpred = np.reshape(Xpred, (1,-1)) #Put Xpred into a long row and output that row
return Xpred | def evaluate_one(self, target_spectra, sess):
"""
The function that return the result of evaluation of one target spectra
:param target_spectra: The targe spectra to VAE decode, should be only one row
:param sess: current tf session
:return Xpred: the row of X predictions that the VAE gives
"""
#Create random variable for latent variable
latent_z = np.random.normal(0, 1, (self.batch_size, self.latent_dim))
target_spectra_repeat = np.repeat(np.reshape(target_spectra.values, (1, -1)), self.batch_size, axis=0)
Xpred = sess.run(self.logits, feed_dict = {self.z : latent_z, self.labels: target_spectra_repeat})
Xpred = np.reshape(Xpred, (1,-1)) #Put Xpred into a long row and output that row
return Xpred |
Python | def MakeBoundaryLoss(Geometry_tensor, boundary):
"""
Make the boundary loss using boundary given
:param Geometry_tensor: 8 element geometry h0 h1 h2 h3 r0 r1 r2 r3
:param boundary: 4 element numpy array representing [h_low, h_high, r_low, r_high]
return Boundary_loss: loss that depend on the boundary loss
"""
tolerance = 0
print("Geometry_tensor_shape",Geometry_tensor.shape)
#Make constants
h_low = tf.constant((boundary[0] - tolerance) * np.ones([1,4]), name= 'h_low',dtype=tf.float32)
h_high = tf.constant((boundary[1] + tolerance) * np.ones([1,4]), name= 'h_high',dtype=tf.float32)
r_low = tf.constant((boundary[2] - tolerance) * np.ones([1,4]), name= 'r_low',dtype=tf.float32)
r_high = tf.constant((boundary[3] + tolerance) * np.ones([1,4]), name= 'r_high',dtype=tf.float32)
#Get the 2 separate parts
h = Geometry_tensor[:,0:4]
r = Geometry_tensor[:,4:]
zero = tf.constant(0,dtype=tf.float32,name='zero')
print("shape of h:",h.shape)
print("shape of r:",r.shape)
print("shape of h_low:",h_low.shape)
Boundary_loss = tf.reduce_sum(tf.math.maximum(zero, tf.math.subtract(h, h_high)) + tf.math.maximum(zero, tf.math.subtract(h_low, h) ) +\
tf.math.maximum(zero, tf.math.subtract(r, r_high)) + tf.math.maximum(zero, tf.math.subtract(r_low, r) ))
return Boundary_loss | def MakeBoundaryLoss(Geometry_tensor, boundary):
"""
Make the boundary loss using boundary given
:param Geometry_tensor: 8 element geometry h0 h1 h2 h3 r0 r1 r2 r3
:param boundary: 4 element numpy array representing [h_low, h_high, r_low, r_high]
return Boundary_loss: loss that depend on the boundary loss
"""
tolerance = 0
print("Geometry_tensor_shape",Geometry_tensor.shape)
#Make constants
h_low = tf.constant((boundary[0] - tolerance) * np.ones([1,4]), name= 'h_low',dtype=tf.float32)
h_high = tf.constant((boundary[1] + tolerance) * np.ones([1,4]), name= 'h_high',dtype=tf.float32)
r_low = tf.constant((boundary[2] - tolerance) * np.ones([1,4]), name= 'r_low',dtype=tf.float32)
r_high = tf.constant((boundary[3] + tolerance) * np.ones([1,4]), name= 'r_high',dtype=tf.float32)
#Get the 2 separate parts
h = Geometry_tensor[:,0:4]
r = Geometry_tensor[:,4:]
zero = tf.constant(0,dtype=tf.float32,name='zero')
print("shape of h:",h.shape)
print("shape of r:",r.shape)
print("shape of h_low:",h_low.shape)
Boundary_loss = tf.reduce_sum(tf.math.maximum(zero, tf.math.subtract(h, h_high)) + tf.math.maximum(zero, tf.math.subtract(h_low, h) ) +\
tf.math.maximum(zero, tf.math.subtract(r, r_high)) + tf.math.maximum(zero, tf.math.subtract(r_low, r) ))
return Boundary_loss |
Python | def tandem_model(features,labels, backward_fc, batch_size, clip,
fc_filters, tconv_fNums, tconv_dims, tconv_filters,
n_filter, n_branch, reg_scale, geoboundary, conv1d_filters, filter_channel_list):
"""
Customized tandem model which combines 2 model
"""
backward_out, summary_out,BackCollectionName, BeforeBackCollectionName =\
my_model_backward(labels, backward_fc, reg_scale, conv1d_filters,filter_channel_list)
forward_in, up, preconv, preTconv, merged_summary_op, ForwardCollectionName, train_Forward, Boundary_loss = \
my_model_fn_tens(backward_out,features,batch_size, clip,
fc_filters, tconv_fNums, tconv_dims, tconv_filters,
n_filter, n_branch, reg_scale, BackCollectionName, geoboundary)
return forward_in, up, merged_summary_op, ForwardCollectionName,\
BackCollectionName, backward_out, train_Forward, Boundary_loss | def tandem_model(features,labels, backward_fc, batch_size, clip,
fc_filters, tconv_fNums, tconv_dims, tconv_filters,
n_filter, n_branch, reg_scale, geoboundary, conv1d_filters, filter_channel_list):
"""
Customized tandem model which combines 2 model
"""
backward_out, summary_out,BackCollectionName, BeforeBackCollectionName =\
my_model_backward(labels, backward_fc, reg_scale, conv1d_filters,filter_channel_list)
forward_in, up, preconv, preTconv, merged_summary_op, ForwardCollectionName, train_Forward, Boundary_loss = \
my_model_fn_tens(backward_out,features,batch_size, clip,
fc_filters, tconv_fNums, tconv_dims, tconv_filters,
n_filter, n_branch, reg_scale, BackCollectionName, geoboundary)
return forward_in, up, merged_summary_op, ForwardCollectionName,\
BackCollectionName, backward_out, train_Forward, Boundary_loss |
Python | def run(self, sess, writer=None):
"""
Run the hook at each step
:param sess: current session
:param writer: summary writer used to write variables into tensorboard, default to None
:return:
"""
self.step += 1
if self.step % self.verb_step == 0:
loss_val = sess.run(self.loss)
if self.verb:
print('Step {},{} loss: {:.2E}'.format(self.step, self.name, loss_val))
if self.write_summary:
self.train_mse_summary.log(loss_val, self.step, sess, writer) | def run(self, sess, writer=None):
"""
Run the hook at each step
:param sess: current session
:param writer: summary writer used to write variables into tensorboard, default to None
:return:
"""
self.step += 1
if self.step % self.verb_step == 0:
loss_val = sess.run(self.loss)
if self.verb:
print('Step {},{} loss: {:.2E}'.format(self.step, self.name, loss_val))
if self.write_summary:
self.train_mse_summary.log(loss_val, self.step, sess, writer) |
Python | def run(self, sess, writer=None):
"""
Run the hook at each step
:param sess: current session
:param writer: summary writer used to write variables into tensorboard, default to None
:return:
"""
self.step += 1
self.save = False
if self.step % self.valid_step == 0 and self.step != 0:
sess.run(self.valid_init_op)
loss_val = []
truth = None
try:
while True:
loss, truth, pred= sess.run([self.loss,self.truth,self.pred,])
loss_val.append(loss)
except tf.errors.OutOfRangeError:
pass
loss_mean = np.mean(loss_val)
print('Eval @ Step {}, loss: {:.2E}, duration {:.3f}s'.\
format(self.step, loss_mean, time.time()-self.time_cnt))
if math.isnan(loss_mean): #If the loss is NAN, then Stop
print("The validation loss is NAN, please adjust (Lower) your learning rate and retrain. Aborting now")
self.stop = True
else: #If the loss is not NAN
if loss_mean < self.stop_threshold:
print('Validation loss is lower than threshold{}, training is stopped'.format(self.stop_threshold))
self.stop = True
if loss_mean < self.best_validation_loss: #If the loss is smaller than the best, then save this one now
self.best_validation_loss = loss_mean
self.save = True
self.time_cnt = time.time()
if self.write_summary:
self.valid_mse_summary.log(loss_mean, self.step, sess, writer)
#self.valid_curve_summary.log(pred=pred,
# step=self.step,
# writer=writer,
# curve_num=self.curve_num,
# truth=truth)
# self.valid_preconv_summary.log(pred=preconv,
# step=self.step,
# writer=writer,
# curve_num=self.curve_num)
# self.valid_preTconv_summary.log(pred=preTconv,
# step=self.step,
# writer=writer,
# curve_num=self.curve_num) | def run(self, sess, writer=None):
"""
Run the hook at each step
:param sess: current session
:param writer: summary writer used to write variables into tensorboard, default to None
:return:
"""
self.step += 1
self.save = False
if self.step % self.valid_step == 0 and self.step != 0:
sess.run(self.valid_init_op)
loss_val = []
truth = None
try:
while True:
loss, truth, pred= sess.run([self.loss,self.truth,self.pred,])
loss_val.append(loss)
except tf.errors.OutOfRangeError:
pass
loss_mean = np.mean(loss_val)
print('Eval @ Step {}, loss: {:.2E}, duration {:.3f}s'.\
format(self.step, loss_mean, time.time()-self.time_cnt))
if math.isnan(loss_mean): #If the loss is NAN, then Stop
print("The validation loss is NAN, please adjust (Lower) your learning rate and retrain. Aborting now")
self.stop = True
else: #If the loss is not NAN
if loss_mean < self.stop_threshold:
print('Validation loss is lower than threshold{}, training is stopped'.format(self.stop_threshold))
self.stop = True
if loss_mean < self.best_validation_loss: #If the loss is smaller than the best, then save this one now
self.best_validation_loss = loss_mean
self.save = True
self.time_cnt = time.time()
if self.write_summary:
self.valid_mse_summary.log(loss_mean, self.step, sess, writer)
#self.valid_curve_summary.log(pred=pred,
# step=self.step,
# writer=writer,
# curve_num=self.curve_num,
# truth=truth)
# self.valid_preconv_summary.log(pred=preconv,
# step=self.step,
# writer=writer,
# curve_num=self.curve_num)
# self.valid_preTconv_summary.log(pred=preTconv,
# step=self.step,
# writer=writer,
# curve_num=self.curve_num) |
Python | def make_tandem_optimizer(self):
"""
Make an Adam optimizer with the learning rate defined when the class is initialized
:return: an AdamOptimizer
"""
varlist = tf.get_collection(self.BackCollectionName)
print(varlist)
return tf.train.AdamOptimizer(learning_rate=self.learn_rate).minimize(self.loss,
self.global_step,
var_list = varlist) | def make_tandem_optimizer(self):
"""
Make an Adam optimizer with the learning rate defined when the class is initialized
:return: an AdamOptimizer
"""
varlist = tf.get_collection(self.BackCollectionName)
print(varlist)
return tf.train.AdamOptimizer(learning_rate=self.learn_rate).minimize(self.loss,
self.global_step,
var_list = varlist) |
Python | def train(self, train_init_op, step_num,backward_step_num, forward_hooks, tandem_hooks,\
write_summary=False,load_forward_ckpt = None):
"""
Train the model with step_num steps
First train the forward model and then the tandem part
:param train_init_op: training dataset init operation
:param step_num: number of steps to train
:param hooks: hooks for monitoring the training process !!!ALWASYS PUT VALIDATION HOOK THE LAST ONE
:param write_summary: write summary into tensorboard or not
:return:
"""
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if write_summary:
summary_writer = tf.summary.FileWriter(self.ckpt_dir, sess.graph)
else:
summary_writer = None
if load_forward_ckpt == None: #If choose not to load the forward model
print("Training forward model now:")
assign_true_op = self.train_Forward.assign(True)
##Train the forward model
for i in range(int(step_num)):
sess.run([train_init_op, assign_true_op])
sess.run(self.optm)
for hook in forward_hooks:
hook.run(sess, writer=summary_writer)
if forward_hooks[-1].stop: #if it either trains to the threshold or have NAN value, stop here
break
else:
print("Loading forward model now:")
self.load(sess, load_forward_ckpt)
print("Training tandem model now:")
assign_false_op = self.train_Forward.assign(False)
##Train the tandem model
for i in range(int(backward_step_num)):
sess.run([train_init_op,assign_false_op])
sess.run(self.tandem_optm)
for hook in tandem_hooks:
hook.run(sess, writer = summary_writer)
if tandem_hooks[-1].save: #If the hook tells to save the model, then save it
self.save(sess)
self.best_validation_loss = tandem_hooks[-1].best_validation_loss
if tandem_hooks[-1].stop: #If it either trains to threshold or have NAN appear, stop here
break
#self.save(sess) | def train(self, train_init_op, step_num,backward_step_num, forward_hooks, tandem_hooks,\
write_summary=False,load_forward_ckpt = None):
"""
Train the model with step_num steps
First train the forward model and then the tandem part
:param train_init_op: training dataset init operation
:param step_num: number of steps to train
:param hooks: hooks for monitoring the training process !!!ALWASYS PUT VALIDATION HOOK THE LAST ONE
:param write_summary: write summary into tensorboard or not
:return:
"""
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
if write_summary:
summary_writer = tf.summary.FileWriter(self.ckpt_dir, sess.graph)
else:
summary_writer = None
if load_forward_ckpt == None: #If choose not to load the forward model
print("Training forward model now:")
assign_true_op = self.train_Forward.assign(True)
##Train the forward model
for i in range(int(step_num)):
sess.run([train_init_op, assign_true_op])
sess.run(self.optm)
for hook in forward_hooks:
hook.run(sess, writer=summary_writer)
if forward_hooks[-1].stop: #if it either trains to the threshold or have NAN value, stop here
break
else:
print("Loading forward model now:")
self.load(sess, load_forward_ckpt)
print("Training tandem model now:")
assign_false_op = self.train_Forward.assign(False)
##Train the tandem model
for i in range(int(backward_step_num)):
sess.run([train_init_op,assign_false_op])
sess.run(self.tandem_optm)
for hook in tandem_hooks:
hook.run(sess, writer = summary_writer)
if tandem_hooks[-1].save: #If the hook tells to save the model, then save it
self.save(sess)
self.best_validation_loss = tandem_hooks[-1].best_validation_loss
if tandem_hooks[-1].stop: #If it either trains to threshold or have NAN appear, stop here
break
#self.save(sess) |
Python | def make_loss(self, logit=None, labels=None):
"""
Create a tensor that represents the loss. This is consistant both at training time \
and inference time for Backward model
:param logit: The output of the network
:return: the total loss
"""
if logit is None:
return None
MSE_loss = nn.functional.mse_loss(logit, labels) # The MSE Loss of the
BDY_loss = 0 # Implemenation later
return MSE_loss + BDY_loss | def make_loss(self, logit=None, labels=None):
"""
Create a tensor that represents the loss. This is consistant both at training time \
and inference time for Backward model
:param logit: The output of the network
:return: the total loss
"""
if logit is None:
return None
MSE_loss = nn.functional.mse_loss(logit, labels) # The MSE Loss of the
BDY_loss = 0 # Implemenation later
return MSE_loss + BDY_loss |
Python | def save(self):
"""
Saving the model to the current check point folder with name best_model.pt
:return: None
"""
#torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model.pt')) | def save(self):
"""
Saving the model to the current check point folder with name best_model.pt
:return: None
"""
#torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model.pt')) |
Python | def load(self):
"""
Loading the model from the check point folder with name best_model.pt
:return:
"""
#self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))
self.model.load(torch.load(os.path.join(self.ckpt_dir, 'best_model.pt'))) | def load(self):
"""
Loading the model from the check point folder with name best_model.pt
:return:
"""
#self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))
self.model.load(torch.load(os.path.join(self.ckpt_dir, 'best_model.pt'))) |
Python | def train(self):
"""
The major training function. This would start the training using information given in the flags
:return: None
"""
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
for epoch in range(self.flags.train_step):
# Set to Training Mode
train_loss = 0
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda() # Put data onto GPU
spectra = spectra.cuda() # Put data onto GPU
self.optm.zero_grad() # Zero the gradient first
logit = self.model(geometry) # Get the output
loss = self.make_loss(logit, spectra) # Get the loss tensor
loss.backward() # Calculate the backward gradients
self.optm.step() # Move one step the optimizer
train_loss += loss # Aggregate the loss
if epoch % self.flags.eval_step: # For eval steps, do the evaluations and tensor board
# Record the training loss to the tensorboard
train_avg_loss = train_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
# Set to Evaluation Mode
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader): # Loop through the eval set
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra) # compute the loss
test_loss += loss # Aggregate the loss
# Record the testing loss to the tensorboard
test_avg_loss = test_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
% (epoch, train_avg_loss, test_avg_loss ))
# Model improving, save the model down
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None | def train(self):
"""
The major training function. This would start the training using information given in the flags
:return: None
"""
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
for epoch in range(self.flags.train_step):
# Set to Training Mode
train_loss = 0
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda() # Put data onto GPU
spectra = spectra.cuda() # Put data onto GPU
self.optm.zero_grad() # Zero the gradient first
logit = self.model(geometry) # Get the output
loss = self.make_loss(logit, spectra) # Get the loss tensor
loss.backward() # Calculate the backward gradients
self.optm.step() # Move one step the optimizer
train_loss += loss # Aggregate the loss
if epoch % self.flags.eval_step: # For eval steps, do the evaluations and tensor board
# Record the training loss to the tensorboard
train_avg_loss = train_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
# Set to Evaluation Mode
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader): # Loop through the eval set
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra) # compute the loss
test_loss += loss # Aggregate the loss
# Record the testing loss to the tensorboard
test_avg_loss = test_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
% (epoch, train_avg_loss, test_avg_loss ))
# Model improving, save the model down
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None |
Python | def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', default=0.1, type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', default=0., type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', default=0., type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N',
help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', default=False, action='store_true',
help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST',
default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]',
help='size of character embeddings')
parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4,
help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2,
help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', default=False, action='store_true',
help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR',
help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true',
help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true',
help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
# fmt: on | def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', default=0.1, type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', default=0., type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', default=0., type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N',
help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', default=False, action='store_true',
help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST',
default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]',
help='size of character embeddings')
parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4,
help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2,
help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', default=False, action='store_true',
help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR',
help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true',
help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true',
help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
# fmt: on |
Python | def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if hasattr(args, 'no_tie_adaptive_proj') and args.no_tie_adaptive_proj == False:
# backward compatibility
args.tie_adaptive_proj = True
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = args.tokens_per_sample
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim,
args.adaptive_input_factor, args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert args.adaptive_softmax_cutoff == args.adaptive_input_cutoff, '{} != {}'.format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = TransformerDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False)
return TransformerLanguageModel(decoder) | def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if hasattr(args, 'no_tie_adaptive_proj') and args.no_tie_adaptive_proj == False:
# backward compatibility
args.tie_adaptive_proj = True
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = args.tokens_per_sample
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = args.tokens_per_sample
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim,
args.adaptive_input_factor, args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert args.adaptive_softmax_cutoff == args.adaptive_input_cutoff, '{} != {}'.format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = TransformerDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False)
return TransformerLanguageModel(decoder) |
Python | def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
parser.add_argument('--lmdropout', default=0.1, type=float, metavar='D',
help='dropout probability')
parser.add_argument('--lmattention-dropout', default=0., type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--lmrelu-dropout', default=0., type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--lmdecoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--lmdecoder-output-dim', type=int, metavar='N',
help='decoder output dimension')
parser.add_argument('--lmdecoder-input-dim', type=int, metavar='N',
help='decoder input dimension')
parser.add_argument('--lmdecoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--lmdecoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--lmdecoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--lmdecoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--lmadaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--lmadaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--lmadaptive-softmax-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--lmno-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--lmshare-decoder-input-output-embed', default=False, action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--lmcharacter-embeddings', default=False, action='store_true',
help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--lmcharacter-filters', type=str, metavar='LIST',
default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]',
help='size of character embeddings')
parser.add_argument('--lmcharacter-embedding-dim', type=int, metavar='N', default=4,
help='size of character embeddings')
parser.add_argument('--lmchar-embedder-highway-layers', type=int, metavar='N', default=2,
help='number of highway layers for character token embeddder')
parser.add_argument('--lmadaptive-input', default=False, action='store_true',
help='if set, uses adaptive input')
parser.add_argument('--lmadaptive-input-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--lmadaptive-input-cutoff', metavar='EXPR',
help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--lmtie-adaptive-weights', action='store_true',
help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--lmtie-adaptive-proj', action='store_true',
help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--lmdecoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder') | def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
# fmt: on
parser.add_argument('--lmdropout', default=0.1, type=float, metavar='D',
help='dropout probability')
parser.add_argument('--lmattention-dropout', default=0., type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--lmrelu-dropout', default=0., type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--lmdecoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--lmdecoder-output-dim', type=int, metavar='N',
help='decoder output dimension')
parser.add_argument('--lmdecoder-input-dim', type=int, metavar='N',
help='decoder input dimension')
parser.add_argument('--lmdecoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--lmdecoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--lmdecoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--lmdecoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--lmadaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--lmadaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--lmadaptive-softmax-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--lmno-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--lmshare-decoder-input-output-embed', default=False, action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--lmcharacter-embeddings', default=False, action='store_true',
help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--lmcharacter-filters', type=str, metavar='LIST',
default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]',
help='size of character embeddings')
parser.add_argument('--lmcharacter-embedding-dim', type=int, metavar='N', default=4,
help='size of character embeddings')
parser.add_argument('--lmchar-embedder-highway-layers', type=int, metavar='N', default=2,
help='number of highway layers for character token embeddder')
parser.add_argument('--lmadaptive-input', default=False, action='store_true',
help='if set, uses adaptive input')
parser.add_argument('--lmadaptive-input-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--lmadaptive-input-cutoff', metavar='EXPR',
help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--lmtie-adaptive-weights', action='store_true',
help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--lmtie-adaptive-proj', action='store_true',
help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--lmdecoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder') |
Python | def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lmnmt_atchitecture(args)
#NMT model
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = TransformerEncoderModified(args, src_dict, encoder_embed_tokens)
decoder = TransformerDecoderModified(args, tgt_dict, decoder_embed_tokens)
#LM
if hasattr(args, 'lmno_tie_adaptive_proj') and args.lmno_tie_adaptive_proj == False:
# backward compatibility
args.lntie_adaptive_proj = True
if not hasattr(args, 'lmmax_source_positions'):
args.lmmax_source_positions = args.lmtokens_per_sample
if not hasattr(args, 'lmmax_target_positions'):
args.lmmax_target_positions = args.lmtokens_per_sample
if args.lmcharacter_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.lmadaptive_input:
embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim,
args.adaptive_input_factor, args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
src_embed_tokens = Embedding(len(src_dict), args.lmdecoder_input_dim, src_dict.pad())
tgt_embed_tokens = Embedding(len(tgt_dict), args.lmdecoder_input_dim, tgt_dict.pad())
if args.lmtie_adaptive_weights:
assert args.lmadaptive_input
assert args.lmadaptive_input_factor == args.lmadaptive_softmax_factor
assert args.adaptive_softmax_cutoff == args.adaptive_input_cutoff, '{} != {}'.format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert args.decoder_input_dim == args.decoder_output_dim
def prepare_lm_args(srcargs):
tgtargs = lmargs()
for k, v in srcargs.__dict__.items():
if k.startswith('lm'):
tgtargs.__setattr__(k[2:], v)
return tgtargs
newlmargs = prepare_lm_args(args)
srclm_decoder = TransformerDecoder(newlmargs, task.source_dictionary, src_embed_tokens, no_encoder_attn=True, final_norm=False)
tgtlm_decoder = TransformerDecoder(newlmargs, task.target_dictionary, tgt_embed_tokens, no_encoder_attn=True, final_norm=False)
return cls( srclm_decoder, tgtlm_decoder, encoder, decoder, args) | def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lmnmt_atchitecture(args)
#NMT model
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
# if provided, load from preloaded dictionaries
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(
src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = build_embedding(
tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = TransformerEncoderModified(args, src_dict, encoder_embed_tokens)
decoder = TransformerDecoderModified(args, tgt_dict, decoder_embed_tokens)
#LM
if hasattr(args, 'lmno_tie_adaptive_proj') and args.lmno_tie_adaptive_proj == False:
# backward compatibility
args.lntie_adaptive_proj = True
if not hasattr(args, 'lmmax_source_positions'):
args.lmmax_source_positions = args.lmtokens_per_sample
if not hasattr(args, 'lmmax_target_positions'):
args.lmmax_target_positions = args.lmtokens_per_sample
if args.lmcharacter_embeddings:
embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters),
args.character_embedding_dim,
args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.lmadaptive_input:
embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim,
args.adaptive_input_factor, args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int))
else:
src_embed_tokens = Embedding(len(src_dict), args.lmdecoder_input_dim, src_dict.pad())
tgt_embed_tokens = Embedding(len(tgt_dict), args.lmdecoder_input_dim, tgt_dict.pad())
if args.lmtie_adaptive_weights:
assert args.lmadaptive_input
assert args.lmadaptive_input_factor == args.lmadaptive_softmax_factor
assert args.adaptive_softmax_cutoff == args.adaptive_input_cutoff, '{} != {}'.format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert args.decoder_input_dim == args.decoder_output_dim
def prepare_lm_args(srcargs):
tgtargs = lmargs()
for k, v in srcargs.__dict__.items():
if k.startswith('lm'):
tgtargs.__setattr__(k[2:], v)
return tgtargs
newlmargs = prepare_lm_args(args)
srclm_decoder = TransformerDecoder(newlmargs, task.source_dictionary, src_embed_tokens, no_encoder_attn=True, final_norm=False)
tgtlm_decoder = TransformerDecoder(newlmargs, task.target_dictionary, tgt_embed_tokens, no_encoder_attn=True, final_norm=False)
return cls( srclm_decoder, tgtlm_decoder, encoder, decoder, args) |
Python | def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.tradeoff_step:
self.tradeoff = self.warmup_init_tradeoff + self.lr_step * num_updates
else:
self.tradeoff = self.decay_factor * num_updates ** -2.
self.tradeoff = float(np.clip(np.random.normal(self.tradeoff, self.sigma), 0., 1.))
self.set_tradeoff()
return self.tradeoff | def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.tradeoff_step:
self.tradeoff = self.warmup_init_tradeoff + self.lr_step * num_updates
else:
self.tradeoff = self.decay_factor * num_updates ** -2.
self.tradeoff = float(np.clip(np.random.normal(self.tradeoff, self.sigma), 0., 1.))
self.set_tradeoff()
return self.tradeoff |
Python | def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.tradeoff_step:
self.tradeoff = self.warmup_init_tradeoff + self.lr_step * num_updates
else:
self.tradeoff = self.decay_factor * num_updates ** -0.5
self.clip()
self.set_tradeoff()
return self.tradeoff | def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.tradeoff_step:
self.tradeoff = self.warmup_init_tradeoff + self.lr_step * num_updates
else:
self.tradeoff = self.decay_factor * num_updates ** -0.5
self.clip()
self.set_tradeoff()
return self.tradeoff |
Python | def register_lmoutschedule(name):
"""Decorator to register a new criterion."""
def register_lmoutschedule_cls(cls):
if name in LMOUTSCHEDULE_REGISTRY:
raise ValueError('Cannot register duplicate criterion ({})'.format(name))
LMOUTSCHEDULE_REGISTRY[name] = cls
LMOUTSCHEDULE_CLASS_NAMES.add(cls.__name__)
return cls
return register_lmoutschedule_cls | def register_lmoutschedule(name):
"""Decorator to register a new criterion."""
def register_lmoutschedule_cls(cls):
if name in LMOUTSCHEDULE_REGISTRY:
raise ValueError('Cannot register duplicate criterion ({})'.format(name))
LMOUTSCHEDULE_REGISTRY[name] = cls
LMOUTSCHEDULE_CLASS_NAMES.add(cls.__name__)
return cls
return register_lmoutschedule_cls |
Python | def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(args.save_dir, exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, args.restore_file)
if args.load_nmt:
checkpoint_path = os.path.join(args.save_dir, args.load_nmt_file)
assert os.path.exists(checkpoint_path), 'You have specified --load-nmt flag, but there is no nmt model checkpoint.'
print("Notice that your model will load from a NMT ckt,\nIf you do not want it to happen, please cancel the --load-nmt flag")
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,
eval(args.optimizer_overrides), strict=False if args.load_nmt else True)
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
return True
return False | def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(args.save_dir, exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, args.restore_file)
if args.load_nmt:
checkpoint_path = os.path.join(args.save_dir, args.load_nmt_file)
assert os.path.exists(checkpoint_path), 'You have specified --load-nmt flag, but there is no nmt model checkpoint.'
print("Notice that your model will load from a NMT ckt,\nIf you do not want it to happen, please cancel the --load-nmt flag")
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler,
eval(args.optimizer_overrides), strict=False if args.load_nmt else True)
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
return True
return False |
Python | def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.tradeoff_step:
self.tradeoff = self.warmup_init_tradeoff + self.lr_step * num_updates
else:
self.tradeoff = self.decay_factor / num_updates
self.clip()
self.set_tradeoff()
return self.tradeoff | def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates <= self.tradeoff_step:
self.tradeoff = self.warmup_init_tradeoff + self.lr_step * num_updates
else:
self.tradeoff = self.decay_factor / num_updates
self.clip()
self.set_tradeoff()
return self.tradeoff |
Python | def type_to_int(param):
"""Converts the type to an integer."""
type: Union[_type, int, OptionType] = get_type(param)
if isinstance(type, int):
return type
if type in (str, int, float, bool):
if type is str:
return OptionType.STRING
if type is int:
return OptionType.INTEGER
if type is float:
return OptionType.NUMBER
if type is bool:
return OptionType.BOOLEAN
elif isinstance(type, OptionType):
return type
elif type is User or type is Member:
return OptionType.USER
elif type is Channel:
return OptionType.CHANNEL
elif type is Role:
return OptionType.ROLE
else:
raise TypeError(f"Invalid type: {type}") | def type_to_int(param):
"""Converts the type to an integer."""
type: Union[_type, int, OptionType] = get_type(param)
if isinstance(type, int):
return type
if type in (str, int, float, bool):
if type is str:
return OptionType.STRING
if type is int:
return OptionType.INTEGER
if type is float:
return OptionType.NUMBER
if type is bool:
return OptionType.BOOLEAN
elif isinstance(type, OptionType):
return type
elif type is User or type is Member:
return OptionType.USER
elif type is Channel:
return OptionType.CHANNEL
elif type is Role:
return OptionType.ROLE
else:
raise TypeError(f"Invalid type: {type}") |
Python | def loop_params(params: dict, stop: int) -> dict:
"""Loops through the parameters and deletes until stop index."""
print("params:", params)
for i, key in enumerate(params.copy()):
if i > stop:
break
del params[key]
print("params:", params)
return params | def loop_params(params: dict, stop: int) -> dict:
"""Loops through the parameters and deletes until stop index."""
print("params:", params)
for i, key in enumerate(params.copy()):
if i > stop:
break
del params[key]
print("params:", params)
return params |
Python | def format_parameters(coro: Coroutine):
"""Formats the parameters of a function."""
params: OrderedDict = signature(coro).parameters
_params: dict = dict(params.items())
if coro.__name__ == "eeeeee":
print(coro.__qualname__)
if "." in coro.__qualname__:
return loop_params(_params, 1)
else:
return loop_params(_params, 0) | def format_parameters(coro: Coroutine):
"""Formats the parameters of a function."""
params: OrderedDict = signature(coro).parameters
_params: dict = dict(params.items())
if coro.__name__ == "eeeeee":
print(coro.__qualname__)
if "." in coro.__qualname__:
return loop_params(_params, 1)
else:
return loop_params(_params, 0) |
Python | def sync_subcommands(self: Extension, client: Client) -> Optional[dict]:
"""Syncs the subcommands in the extension."""
if not any(
hasattr(func, "__subcommand__")
for _, func in getmembers(self, predicate=iscoroutinefunction)
):
return
bases = {
func.__base__: func.__data__
for _, func in getmembers(self, predicate=iscoroutinefunction)
if hasattr(func, "__subcommand__")
}
if not bases:
return
for base, subcommand in bases.items():
base: str
subcommand: ExternalSubcommandSetup
subcommand.inner.__func__._command_data = subcommand.raw_commands
client._Client__command_coroutines.append(subcommand.inner)
client.event(subcommand.inner, name=f"command_{base}")
for subcommand in bases.values():
scope = subcommand.scope
if scope is not MISSING:
if isinstance(scope, list):
[client._scopes.add(_ if isinstance(_, int) else _.id) for _ in scope]
else:
client._scopes.add(scope if isinstance(scope, int) else scope.id)
for base, subcommand in bases.items():
base: str
subcommand: ExternalSubcommandSetup
subcommand._super_autocomplete(client)
return bases | def sync_subcommands(self: Extension, client: Client) -> Optional[dict]:
"""Syncs the subcommands in the extension."""
if not any(
hasattr(func, "__subcommand__")
for _, func in getmembers(self, predicate=iscoroutinefunction)
):
return
bases = {
func.__base__: func.__data__
for _, func in getmembers(self, predicate=iscoroutinefunction)
if hasattr(func, "__subcommand__")
}
if not bases:
return
for base, subcommand in bases.items():
base: str
subcommand: ExternalSubcommandSetup
subcommand.inner.__func__._command_data = subcommand.raw_commands
client._Client__command_coroutines.append(subcommand.inner)
client.event(subcommand.inner, name=f"command_{base}")
for subcommand in bases.values():
scope = subcommand.scope
if scope is not MISSING:
if isinstance(scope, list):
[client._scopes.add(_ if isinstance(_, int) else _.id) for _ in scope]
else:
client._scopes.add(scope if isinstance(scope, int) else scope.id)
for base, subcommand in bases.items():
base: str
subcommand: ExternalSubcommandSetup
subcommand._super_autocomplete(client)
return bases |
Python | async def _on_component(self, ctx: ComponentContext):
"""on_component callback for modified callbacks."""
websocket = self.client._websocket
if any(
any(hasattr(func, "startswith") or hasattr(func, "regex") for func in funcs)
for _, funcs in websocket._dispatch.events.items()
):
for decorator_custom_id, funcs in websocket._dispatch.events.items():
for func in funcs:
if hasattr(func, "startswith"):
if ctx.data.custom_id.startswith(
decorator_custom_id.replace("component_startswith_", "")
):
log.info(f"{func} startswith {func.startswith} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx)
elif hasattr(func, "regex") and fullmatch(
func.regex,
ctx.data.custom_id.replace("component_regex_", ""),
):
log.info(f"{func} regex {func.regex} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx) | async def _on_component(self, ctx: ComponentContext):
"""on_component callback for modified callbacks."""
websocket = self.client._websocket
if any(
any(hasattr(func, "startswith") or hasattr(func, "regex") for func in funcs)
for _, funcs in websocket._dispatch.events.items()
):
for decorator_custom_id, funcs in websocket._dispatch.events.items():
for func in funcs:
if hasattr(func, "startswith"):
if ctx.data.custom_id.startswith(
decorator_custom_id.replace("component_startswith_", "")
):
log.info(f"{func} startswith {func.startswith} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx)
elif hasattr(func, "regex") and fullmatch(
func.regex,
ctx.data.custom_id.replace("component_regex_", ""),
):
log.info(f"{func} regex {func.regex} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx) |
Python | async def _on_modal(self, ctx: CommandContext):
"""on_modal callback for modified callbacks."""
websocket = self.client._websocket
if any(
any(hasattr(func, "startswith") or hasattr(func, "regex") for func in funcs)
for _, funcs in websocket._dispatch.events.items()
):
for decorator_custom_id, funcs in websocket._dispatch.events.items():
for func in funcs:
if hasattr(func, "startswith"):
if ctx.data.custom_id.startswith(
decorator_custom_id.replace("modal_startswith_", "")
):
log.info(f"{func} startswith {func.startswith} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx)
elif hasattr(func, "regex") and fullmatch(
func.regex,
ctx.data.custom_id.replace("modal_regex_", ""),
):
log.info(f"{func} regex {func.regex} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx) | async def _on_modal(self, ctx: CommandContext):
"""on_modal callback for modified callbacks."""
websocket = self.client._websocket
if any(
any(hasattr(func, "startswith") or hasattr(func, "regex") for func in funcs)
for _, funcs in websocket._dispatch.events.items()
):
for decorator_custom_id, funcs in websocket._dispatch.events.items():
for func in funcs:
if hasattr(func, "startswith"):
if ctx.data.custom_id.startswith(
decorator_custom_id.replace("modal_startswith_", "")
):
log.info(f"{func} startswith {func.startswith} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx)
elif hasattr(func, "regex") and fullmatch(
func.regex,
ctx.data.custom_id.replace("modal_regex_", ""),
):
log.info(f"{func} regex {func.regex} matched")
return websocket._dispatch.dispatch(decorator_custom_id, ctx) |
Python | def _options(self) -> Option:
"""
Returns the subcommand group as an option.
The subcommands of the group are in the ``options=`` field of the option.
"""
return Option(
type=OptionType.SUB_COMMAND_GROUP,
name=self.group,
description=self.description,
options=[subcommand._options for subcommand in self.subcommands],
) | def _options(self) -> Option:
"""
Returns the subcommand group as an option.
The subcommands of the group are in the ``options=`` field of the option.
"""
return Option(
type=OptionType.SUB_COMMAND_GROUP,
name=self.group,
description=self.description,
options=[subcommand._options for subcommand in self.subcommands],
) |
Python | def finish(self) -> Callable[..., Any]:
"""
Function that finishes the setup of the base command.
Use this when you are done creating subcommands for a specified base.
```py
base_var.finish()
```
"""
log.debug(f"SubcommandSetup.finish: {self.base=}")
group_options = [group._options for group in self.groups.values()] if self.groups else []
subcommand_options = (
[subcommand._options for subcommand in self.subcommands.values()]
if self.subcommands
else []
)
options = (group_options + subcommand_options) or None
self.commands: List[ApplicationCommand] = command(
type=ApplicationCommandType.CHAT_INPUT,
name=self.base,
description=self.description,
scope=self.scope,
options=options,
)
async def inner(ctx, *args, sub_command_group=None, sub_command=None, **kwargs) -> None:
if sub_command_group:
group = self.groups[sub_command_group]
subcommand = next(
(sub for sub in group.subcommands if sub.name == sub_command), None
)
else:
subcommand = self.subcommands[sub_command]
return await subcommand.coro(ctx, *args, **kwargs)
inner._command_data = self.commands
self.client._Client__command_coroutines.append(inner)
if self.scope is not MISSING:
if isinstance(self.scope, list):
[self.client._scopes.add(_ if isinstance(_, int) else _.id) for _ in self.scope]
else:
self.client._scopes.add(
self.scope if isinstance(self.scope, int) else self.scope.id
)
return self.client.event(inner, name=f"command_{self.base}") | def finish(self) -> Callable[..., Any]:
"""
Function that finishes the setup of the base command.
Use this when you are done creating subcommands for a specified base.
```py
base_var.finish()
```
"""
log.debug(f"SubcommandSetup.finish: {self.base=}")
group_options = [group._options for group in self.groups.values()] if self.groups else []
subcommand_options = (
[subcommand._options for subcommand in self.subcommands.values()]
if self.subcommands
else []
)
options = (group_options + subcommand_options) or None
self.commands: List[ApplicationCommand] = command(
type=ApplicationCommandType.CHAT_INPUT,
name=self.base,
description=self.description,
scope=self.scope,
options=options,
)
async def inner(ctx, *args, sub_command_group=None, sub_command=None, **kwargs) -> None:
if sub_command_group:
group = self.groups[sub_command_group]
subcommand = next(
(sub for sub in group.subcommands if sub.name == sub_command), None
)
else:
subcommand = self.subcommands[sub_command]
return await subcommand.coro(ctx, *args, **kwargs)
inner._command_data = self.commands
self.client._Client__command_coroutines.append(inner)
if self.scope is not MISSING:
if isinstance(self.scope, list):
[self.client._scopes.add(_ if isinstance(_, int) else _.id) for _ in self.scope]
else:
self.client._scopes.add(
self.scope if isinstance(self.scope, int) else self.scope.id
)
return self.client.event(inner, name=f"command_{self.base}") |
Python | def finish(self) -> Callable[..., Any]:
"""
Function that finishes the setup of the base command.
Use this when you are done creating subcommands for a specified base.
```py
base_var.finish()
```
"""
log.debug(f"ExternalSubcommandSetup.finish: {self.base=}")
group_options = [group._options for group in self.groups.values()] if self.groups else []
subcommand_options = (
[subcommand._options for subcommand in self.subcommands.values()]
if self.subcommands
else []
)
options = (group_options + subcommand_options) or MISSING
self.commands: List[ApplicationCommand] = command(
type=ApplicationCommandType.CHAT_INPUT,
name=self.base,
description=self.description,
scope=self.scope,
options=options,
)
self.raw_commands = self.commands | def finish(self) -> Callable[..., Any]:
"""
Function that finishes the setup of the base command.
Use this when you are done creating subcommands for a specified base.
```py
base_var.finish()
```
"""
log.debug(f"ExternalSubcommandSetup.finish: {self.base=}")
group_options = [group._options for group in self.groups.values()] if self.groups else []
subcommand_options = (
[subcommand._options for subcommand in self.subcommands.values()]
if self.subcommands
else []
)
options = (group_options + subcommand_options) or MISSING
self.commands: List[ApplicationCommand] = command(
type=ApplicationCommandType.CHAT_INPUT,
name=self.base,
description=self.description,
scope=self.scope,
options=options,
)
self.raw_commands = self.commands |
Python | def ext_subcommand_base(
base: str,
*,
description: Optional[str] = "No description",
scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING,
) -> ExternalSubcommandSetup:
"""
Use this function to initialize a base for future subcommands inside extensions.
Kwargs are optional.
```py
base_name = ext_subcommand_base(
"base_name",
description="Description of the base",
scope=123456789,
)
```
Parameters:
* `base: str`: The base name of the base.
* `?description: str`: The description of the base.
* `?scope: int | Guild | list[int] | list[Guild]`: The scope of the base.
"""
log.debug(f"extension_base: {base=}")
return ExternalSubcommandSetup(base, description, scope) | def ext_subcommand_base(
base: str,
*,
description: Optional[str] = "No description",
scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING,
) -> ExternalSubcommandSetup:
"""
Use this function to initialize a base for future subcommands inside extensions.
Kwargs are optional.
```py
base_name = ext_subcommand_base(
"base_name",
description="Description of the base",
scope=123456789,
)
```
Parameters:
* `base: str`: The base name of the base.
* `?description: str`: The description of the base.
* `?scope: int | Guild | list[int] | list[Guild]`: The scope of the base.
"""
log.debug(f"extension_base: {base=}")
return ExternalSubcommandSetup(base, description, scope) |
Python | def full_data(self) -> Union[dict, List[dict]]:
"""Returns the command in JSON format."""
data: List[Dict[str, List[dict]]] = command(
type=self.type,
name=self.base,
description=self.description if self.type == 1 else None,
options=self.data,
scope=self.scope,
)
if self.scope in {None, MISSING}:
data: Dict[str, List[dict]] = data[0]
if isinstance(data, list):
for i, cmd in enumerate(data.copy()):
for j, opt in enumerate(cmd.get("options", [])):
for key, value in opt.copy().items():
if value is None or value is MISSING:
del data[i]["options"][j][key]
else:
for j, opt in enumerate(data.copy().get("options", [])):
for key, value in opt.copy().items():
if value is None or value is MISSING:
del data["options"][j][key]
return data | def full_data(self) -> Union[dict, List[dict]]:
"""Returns the command in JSON format."""
data: List[Dict[str, List[dict]]] = command(
type=self.type,
name=self.base,
description=self.description if self.type == 1 else None,
options=self.data,
scope=self.scope,
)
if self.scope in {None, MISSING}:
data: Dict[str, List[dict]] = data[0]
if isinstance(data, list):
for i, cmd in enumerate(data.copy()):
for j, opt in enumerate(cmd.get("options", [])):
for key, value in opt.copy().items():
if value is None or value is MISSING:
del data[i]["options"][j][key]
else:
for j, opt in enumerate(data.copy().get("options", [])):
for key, value in opt.copy().items():
if value is None or value is MISSING:
del data["options"][j][key]
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.