code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def clear(self):
"""Remove all paragraphs except one empty one."""
for p in self._txBody.p_lst[1:]:
self._txBody.remove(p)
p = self.paragraphs[0]
p.clear()
|
Remove all paragraphs except one empty one.
|
clear
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def fit_text(
self,
font_family: str = "Calibri",
max_size: int = 18,
bold: bool = False,
italic: bool = False,
font_file: str | None = None,
):
"""Fit text-frame text entirely within bounds of its shape.
Make the text in this text frame fit entirely within the bounds of its shape by setting
word wrap on and applying the "best-fit" font size to all the text it contains.
:attr:`TextFrame.auto_size` is set to :attr:`MSO_AUTO_SIZE.NONE`. The font size will not
be set larger than `max_size` points. If the path to a matching TrueType font is provided
as `font_file`, that font file will be used for the font metrics. If `font_file` is |None|,
best efforts are made to locate a font file with matchhing `font_family`, `bold`, and
`italic` installed on the current system (usually succeeds if the font is installed).
"""
# ---no-op when empty as fit behavior not defined for that case---
if self.text == "":
return # pragma: no cover
font_size = self._best_fit_font_size(font_family, max_size, bold, italic, font_file)
self._apply_fit(font_family, font_size, bold, italic)
|
Fit text-frame text entirely within bounds of its shape.
Make the text in this text frame fit entirely within the bounds of its shape by setting
word wrap on and applying the "best-fit" font size to all the text it contains.
:attr:`TextFrame.auto_size` is set to :attr:`MSO_AUTO_SIZE.NONE`. The font size will not
be set larger than `max_size` points. If the path to a matching TrueType font is provided
as `font_file`, that font file will be used for the font metrics. If `font_file` is |None|,
best efforts are made to locate a font file with matchhing `font_family`, `bold`, and
`italic` installed on the current system (usually succeeds if the font is installed).
|
fit_text
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def word_wrap(self) -> bool | None:
"""`True` when lines of text in this shape are wrapped to fit within the shape's width.
Read-write. Valid values are True, False, or None. True and False turn word wrap on and
off, respectively. Assigning None to word wrap causes any word wrap setting to be removed
from the text frame, causing it to inherit this setting from its style hierarchy.
"""
return {
ST_TextWrappingType.SQUARE: True,
ST_TextWrappingType.NONE: False,
None: None,
}[self._txBody.bodyPr.wrap]
|
`True` when lines of text in this shape are wrapped to fit within the shape's width.
Read-write. Valid values are True, False, or None. True and False turn word wrap on and
off, respectively. Assigning None to word wrap causes any word wrap setting to be removed
from the text frame, causing it to inherit this setting from its style hierarchy.
|
word_wrap
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def _apply_fit(self, font_family: str, font_size: int, is_bold: bool, is_italic: bool):
"""Arrange text in this text frame to fit inside its extents.
This is accomplished by setting auto size off, wrap on, and setting the font of
all its text to `font_family`, `font_size`, `is_bold`, and `is_italic`.
"""
self.auto_size = MSO_AUTO_SIZE.NONE
self.word_wrap = True
self._set_font(font_family, font_size, is_bold, is_italic)
|
Arrange text in this text frame to fit inside its extents.
This is accomplished by setting auto size off, wrap on, and setting the font of
all its text to `font_family`, `font_size`, `is_bold`, and `is_italic`.
|
_apply_fit
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def _best_fit_font_size(
self, family: str, max_size: int, bold: bool, italic: bool, font_file: str | None
) -> int:
"""Return font-size in points that best fits text in this text-frame.
The best-fit font size is the largest integer point size not greater than `max_size` that
allows all the text in this text frame to fit inside its extents when rendered using the
font described by `family`, `bold`, and `italic`. If `font_file` is specified, it is used
to calculate the fit, whether or not it matches `family`, `bold`, and `italic`.
"""
if font_file is None:
font_file = FontFiles.find(family, bold, italic)
return TextFitter.best_fit_font_size(self.text, self._extents, max_size, font_file)
|
Return font-size in points that best fits text in this text-frame.
The best-fit font size is the largest integer point size not greater than `max_size` that
allows all the text in this text frame to fit inside its extents when rendered using the
font described by `family`, `bold`, and `italic`. If `font_file` is specified, it is used
to calculate the fit, whether or not it matches `family`, `bold`, and `italic`.
|
_best_fit_font_size
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def _extents(self) -> tuple[Length, Length]:
"""(cx, cy) 2-tuple representing the effective rendering area of this text-frame.
Margins are taken into account.
"""
parent = cast("ProvidesExtents", self._parent)
return (
Length(parent.width - self.margin_left - self.margin_right),
Length(parent.height - self.margin_top - self.margin_bottom),
)
|
(cx, cy) 2-tuple representing the effective rendering area of this text-frame.
Margins are taken into account.
|
_extents
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def _set_font(self, family: str, size: int, bold: bool, italic: bool):
"""Set the font properties of all the text in this text frame."""
def iter_rPrs(txBody: CT_TextBody) -> Iterator[CT_TextCharacterProperties]:
for p in txBody.p_lst:
for elm in p.content_children:
yield elm.get_or_add_rPr()
# generate a:endParaRPr for each <a:p> element
yield p.get_or_add_endParaRPr()
def set_rPr_font(
rPr: CT_TextCharacterProperties, name: str, size: int, bold: bool, italic: bool
):
f = Font(rPr)
f.name, f.size, f.bold, f.italic = family, Pt(size), bold, italic
txBody = self._element
for rPr in iter_rPrs(txBody):
set_rPr_font(rPr, family, size, bold, italic)
|
Set the font properties of all the text in this text frame.
|
_set_font
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def color(self) -> ColorFormat:
"""The |ColorFormat| instance that provides access to the color settings for this font."""
if self.fill.type != MSO_FILL.SOLID:
self.fill.solid()
return self.fill.fore_color
|
The |ColorFormat| instance that provides access to the color settings for this font.
|
color
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def language_id(self) -> MSO_LANGUAGE_ID | None:
"""Get or set the language id of this |Font| instance.
The language id is a member of the :ref:`MsoLanguageId` enumeration. Assigning |None|
removes any language setting, the same behavior as assigning `MSO_LANGUAGE_ID.NONE`.
"""
lang = self._rPr.lang
if lang is None:
return MSO_LANGUAGE_ID.NONE
return self._rPr.lang
|
Get or set the language id of this |Font| instance.
The language id is a member of the :ref:`MsoLanguageId` enumeration. Assigning |None|
removes any language setting, the same behavior as assigning `MSO_LANGUAGE_ID.NONE`.
|
language_id
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def name(self) -> str | None:
"""Get or set the typeface name for this |Font| instance.
Causes the text it controls to appear in the named font, if a matching font is found.
Returns |None| if the typeface is currently inherited from the theme. Setting it to |None|
removes any override of the theme typeface.
"""
latin = self._rPr.latin
if latin is None:
return None
return latin.typeface
|
Get or set the typeface name for this |Font| instance.
Causes the text it controls to appear in the named font, if a matching font is found.
Returns |None| if the typeface is currently inherited from the theme. Setting it to |None|
removes any override of the theme typeface.
|
name
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def size(self) -> Length | None:
"""Indicates the font height in English Metric Units (EMU).
Read/write. |None| indicates the font size should be inherited from its style hierarchy,
such as a placeholder or document defaults (usually 18pt). |Length| is a subclass of |int|
having properties for convenient conversion into points or other length units. Likewise,
the :class:`pptx.util.Pt` class allows convenient specification of point values::
>>> font.size = Pt(24)
>>> font.size
304800
>>> font.size.pt
24.0
"""
sz = self._rPr.sz
if sz is None:
return None
return Centipoints(sz)
|
Indicates the font height in English Metric Units (EMU).
Read/write. |None| indicates the font size should be inherited from its style hierarchy,
such as a placeholder or document defaults (usually 18pt). |Length| is a subclass of |int|
having properties for convenient conversion into points or other length units. Likewise,
the :class:`pptx.util.Pt` class allows convenient specification of point values::
>>> font.size = Pt(24)
>>> font.size
304800
>>> font.size.pt
24.0
|
size
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def underline(self) -> bool | MSO_TEXT_UNDERLINE_TYPE | None:
"""Indicaties the underline setting for this font.
Value is |True|, |False|, |None|, or a member of the :ref:`MsoTextUnderlineType`
enumeration. |None| is the default and indicates the underline setting should be inherited
from the style hierarchy, such as from a placeholder. |True| indicates single underline.
|False| indicates no underline. Other settings such as double and wavy underlining are
indicated with members of the :ref:`MsoTextUnderlineType` enumeration.
"""
u = self._rPr.u
if u is MSO_UNDERLINE.NONE:
return False
if u is MSO_UNDERLINE.SINGLE_LINE:
return True
return u
|
Indicaties the underline setting for this font.
Value is |True|, |False|, |None|, or a member of the :ref:`MsoTextUnderlineType`
enumeration. |None| is the default and indicates the underline setting should be inherited
from the style hierarchy, such as from a placeholder. |True| indicates single underline.
|False| indicates no underline. Other settings such as double and wavy underlining are
indicated with members of the :ref:`MsoTextUnderlineType` enumeration.
|
underline
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def address(self) -> str | None:
"""The URL of the hyperlink.
Read/write. URL can be on http, https, mailto, or file scheme; others may work.
"""
if self._hlinkClick is None:
return None
return self.part.target_ref(self._hlinkClick.rId)
|
The URL of the hyperlink.
Read/write. URL can be on http, https, mailto, or file scheme; others may work.
|
address
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def add_run(self) -> _Run:
"""Return a new run appended to the runs in this paragraph."""
r = self._p.add_r()
return _Run(r, self)
|
Return a new run appended to the runs in this paragraph.
|
add_run
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def clear(self):
"""Remove all content from this paragraph.
Paragraph properties are preserved. Content includes runs, line breaks, and fields.
"""
for elm in self._element.content_children:
self._element.remove(elm)
return self
|
Remove all content from this paragraph.
Paragraph properties are preserved. Content includes runs, line breaks, and fields.
|
clear
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def line_spacing(self) -> int | float | Length | None:
"""The space between baselines in successive lines of this paragraph.
A value of |None| indicates no explicit value is assigned and its effective value is
inherited from the paragraph's style hierarchy. A numeric value, e.g. `2` or `1.5`,
indicates spacing is applied in multiples of line heights. A |Length| value such as
`Pt(12)` indicates spacing is a fixed height. The |Pt| value class is a convenient way to
apply line spacing in units of points.
"""
pPr = self._p.pPr
if pPr is None:
return None
return pPr.line_spacing
|
The space between baselines in successive lines of this paragraph.
A value of |None| indicates no explicit value is assigned and its effective value is
inherited from the paragraph's style hierarchy. A numeric value, e.g. `2` or `1.5`,
indicates spacing is applied in multiples of line heights. A |Length| value such as
`Pt(12)` indicates spacing is a fixed height. The |Pt| value class is a convenient way to
apply line spacing in units of points.
|
line_spacing
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def space_after(self) -> Length | None:
"""The spacing to appear between this paragraph and the subsequent paragraph.
A value of |None| indicates no explicit value is assigned and its effective value is
inherited from the paragraph's style hierarchy. |Length| objects provide convenience
properties, such as `.pt` and `.inches`, that allow easy conversion to various length
units.
"""
pPr = self._p.pPr
if pPr is None:
return None
return pPr.space_after
|
The spacing to appear between this paragraph and the subsequent paragraph.
A value of |None| indicates no explicit value is assigned and its effective value is
inherited from the paragraph's style hierarchy. |Length| objects provide convenience
properties, such as `.pt` and `.inches`, that allow easy conversion to various length
units.
|
space_after
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def space_before(self) -> Length | None:
"""The spacing to appear between this paragraph and the prior paragraph.
A value of |None| indicates no explicit value is assigned and its effective value is
inherited from the paragraph's style hierarchy. |Length| objects provide convenience
properties, such as `.pt` and `.cm`, that allow easy conversion to various length units.
"""
pPr = self._p.pPr
if pPr is None:
return None
return pPr.space_before
|
The spacing to appear between this paragraph and the prior paragraph.
A value of |None| indicates no explicit value is assigned and its effective value is
inherited from the paragraph's style hierarchy. |Length| objects provide convenience
properties, such as `.pt` and `.cm`, that allow easy conversion to various length units.
|
space_before
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def hyperlink(self) -> _Hyperlink:
"""Proxy for any `a:hlinkClick` element under the run properties element.
Created on demand, the hyperlink object is available whether an `a:hlinkClick` element is
present or not, and creates or deletes that element as appropriate in response to actions
on its methods and attributes.
"""
rPr = self._r.get_or_add_rPr()
return _Hyperlink(rPr, self)
|
Proxy for any `a:hlinkClick` element under the run properties element.
Created on demand, the hyperlink object is available whether an `a:hlinkClick` element is
present or not, and creates or deletes that element as appropriate in response to actions
on its methods and attributes.
|
hyperlink
|
python
|
scanny/python-pptx
|
src/pptx/text/text.py
|
https://github.com/scanny/python-pptx/blob/master/src/pptx/text/text.py
|
MIT
|
def __getattr__(self, name):
"""
Intercept attribute access to generalize "with_{xmlattr_name}()"
methods.
"""
if name in self._xmlattr_method_map:
def with_xmlattr(value):
xmlattr_name = self._xmlattr_method_map[name]
self._set_xmlattr(xmlattr_name, value)
return self
return with_xmlattr
|
Intercept attribute access to generalize "with_{xmlattr_name}()"
methods.
|
__getattr__
|
python
|
scanny/python-pptx
|
tests/unitdata.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitdata.py
|
MIT
|
def with_nsdecls(self, *nspfxs):
"""
Cause the element to contain namespace declarations. By default, the
namespace prefixes defined in the Builder class are used. These can
be overridden by providing exlicit prefixes, e.g.
``with_nsdecls('a', 'r')``.
"""
if not nspfxs:
nspfxs = self.__nspfxs__
self._nsdecls = " %s" % nsdecls(*nspfxs)
return self
|
Cause the element to contain namespace declarations. By default, the
namespace prefixes defined in the Builder class are used. These can
be overridden by providing exlicit prefixes, e.g.
``with_nsdecls('a', 'r')``.
|
with_nsdecls
|
python
|
scanny/python-pptx
|
tests/unitdata.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitdata.py
|
MIT
|
def xml(self, indent=0):
"""
Return element XML based on attribute settings
"""
indent_str = " " * indent
if self._is_empty:
xml = "%s%s\n" % (indent_str, self._empty_element_tag)
else:
xml = "%s\n" % self._non_empty_element_xml(indent)
return xml
|
Return element XML based on attribute settings
|
xml
|
python
|
scanny/python-pptx
|
tests/unitdata.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitdata.py
|
MIT
|
def make_bubble_chart_data(ser_count, point_count):
"""
Return an |BubbleChartData| object populated with *ser_count* series,
each having *point_count* data points.
"""
points = (
(1.1, 11.1, 10.0),
(2.1, 12.1, 20.0),
(3.1, 13.1, 30.0),
(1.2, 11.2, 40.0),
(2.2, 12.2, 50.0),
(3.2, 13.2, 60.0),
)
chart_data = BubbleChartData()
for i in range(ser_count):
series_label = "Series %d" % (i + 1)
series = chart_data.add_series(series_label)
for j in range(point_count):
point_idx = (i * point_count) + j
x, y, size = points[point_idx]
series.add_data_point(x, y, size)
return chart_data
|
Return an |BubbleChartData| object populated with *ser_count* series,
each having *point_count* data points.
|
make_bubble_chart_data
|
python
|
scanny/python-pptx
|
tests/chart/test_xmlwriter.py
|
https://github.com/scanny/python-pptx/blob/master/tests/chart/test_xmlwriter.py
|
MIT
|
def make_category_chart_data(cat_count, cat_type, ser_count):
"""
Return a |CategoryChartData| instance populated with *cat_count*
categories of type *cat_type* and *ser_count* series. Values are
auto-generated.
"""
category_labels = {
date: (date(2016, 12, 27), date(2016, 12, 28), date(2016, 12, 29)),
float: (1.1, 2.2, 3.3, 4.4, 5.5),
str: ("Foo", "Bar", "Baz", "Boo", "Far", "Faz"),
}[cat_type]
point_values = count(1.1, 1.1)
chart_data = CategoryChartData()
chart_data.categories = category_labels[:cat_count]
for idx in range(ser_count):
series_title = "Series %d" % (idx + 1)
series_values = tuple(islice(point_values, cat_count))
series_values = [round(x * 10) / 10.0 for x in series_values]
chart_data.add_series(series_title, series_values)
return chart_data
|
Return a |CategoryChartData| instance populated with *cat_count*
categories of type *cat_type* and *ser_count* series. Values are
auto-generated.
|
make_category_chart_data
|
python
|
scanny/python-pptx
|
tests/chart/test_xmlwriter.py
|
https://github.com/scanny/python-pptx/blob/master/tests/chart/test_xmlwriter.py
|
MIT
|
def make_xy_chart_data(ser_count, point_count):
"""
Return an |XyChartData| object populated with *ser_count* series each
having *point_count* data points. Values are auto-generated.
"""
points = (
(1.1, 11.1),
(2.1, 12.1),
(3.1, 13.1),
(1.2, 11.2),
(2.2, 12.2),
(3.2, 13.2),
)
chart_data = XyChartData()
for i in range(ser_count):
series_label = "Series %d" % (i + 1)
series = chart_data.add_series(series_label)
for j in range(point_count):
point_idx = (i * point_count) + j
x, y = points[point_idx]
series.add_data_point(x, y)
return chart_data
|
Return an |XyChartData| object populated with *ser_count* series each
having *point_count* data points. Values are auto-generated.
|
make_xy_chart_data
|
python
|
scanny/python-pptx
|
tests/chart/test_xmlwriter.py
|
https://github.com/scanny/python-pptx/blob/master/tests/chart/test_xmlwriter.py
|
MIT
|
def and_it_knows_the_relative_partname_for_an_internal_rel(self, request):
"""Internal relationships have a relative reference for `.target_ref`.
A relative reference looks like "../slideLayouts/slideLayout1.xml". This form
is suitable for writing to a .rels file.
"""
property_mock(
request,
_Relationship,
"target_partname",
return_value=PackURI("/ppt/media/image1.png"),
)
relationship = _Relationship("/ppt/slides", None, None, None, None)
assert relationship.target_ref == "../media/image1.png"
|
Internal relationships have a relative reference for `.target_ref`.
A relative reference looks like "../slideLayouts/slideLayout1.xml". This form
is suitable for writing to a .rels file.
|
and_it_knows_the_relative_partname_for_an_internal_rel
|
python
|
scanny/python-pptx
|
tests/opc/test_package.py
|
https://github.com/scanny/python-pptx/blob/master/tests/opc/test_package.py
|
MIT
|
def it_can_create_a_new_tbl_element_tree(self):
"""
Indirectly tests that column widths are a proportional split of total
width and that row heights a proportional split of total height.
"""
expected_xml = (
'<a:tbl %s>\n <a:tblPr firstRow="1" bandRow="1">\n <a:tableSt'
"yleId>{5C22544A-7EE6-4342-B048-85BDC9FD1C3A}</a:tableStyleId>\n "
' </a:tblPr>\n <a:tblGrid>\n <a:gridCol w="111"/>\n <a:gri'
'dCol w="111"/>\n <a:gridCol w="112"/>\n </a:tblGrid>\n <a:t'
'r h="222">\n <a:tc>\n <a:txBody>\n <a:bodyPr/>\n '
" <a:lstStyle/>\n <a:p/>\n </a:txBody>\n <"
"a:tcPr/>\n </a:tc>\n <a:tc>\n <a:txBody>\n <a:"
"bodyPr/>\n <a:lstStyle/>\n <a:p/>\n </a:txBod"
"y>\n <a:tcPr/>\n </a:tc>\n <a:tc>\n <a:txBody>\n"
" <a:bodyPr/>\n <a:lstStyle/>\n <a:p/>\n "
" </a:txBody>\n <a:tcPr/>\n </a:tc>\n </a:tr>\n <a:tr "
'h="223">\n <a:tc>\n <a:txBody>\n <a:bodyPr/>\n '
" <a:lstStyle/>\n <a:p/>\n </a:txBody>\n <a:"
"tcPr/>\n </a:tc>\n <a:tc>\n <a:txBody>\n <a:bo"
"dyPr/>\n <a:lstStyle/>\n <a:p/>\n </a:txBody>"
"\n <a:tcPr/>\n </a:tc>\n <a:tc>\n <a:txBody>\n "
" <a:bodyPr/>\n <a:lstStyle/>\n <a:p/>\n "
"</a:txBody>\n <a:tcPr/>\n </a:tc>\n </a:tr>\n</a:tbl>\n" % nsdecls("a")
)
tbl = CT_Table.new_tbl(2, 3, 334, 445)
assert tbl.xml == expected_xml
|
Indirectly tests that column widths are a proportional split of total
width and that row heights a proportional split of total height.
|
it_can_create_a_new_tbl_element_tree
|
python
|
scanny/python-pptx
|
tests/oxml/test_table.py
|
https://github.com/scanny/python-pptx/blob/master/tests/oxml/test_table.py
|
MIT
|
def it_can_create_a_new_pic_element(self, desc, xml_desc):
"""`desc` attr (often filename) is XML-escaped to handle special characters.
In particular, ampersand ('&'), less/greater-than ('</>') etc.
"""
pic = CT_Picture.new_pic(
shape_id=9, name="Picture 8", desc=desc, rId="rId42", x=1, y=2, cx=3, cy=4
)
assert pic.xml == (
"<p:pic %s>\n"
" <p:nvPicPr>\n"
' <p:cNvPr id="9" name="Picture 8" descr="%s"/>\n'
" <p:cNvPicPr>\n"
' <a:picLocks noChangeAspect="1"/>\n'
" </p:cNvPicPr>\n"
" <p:nvPr/>\n"
" </p:nvPicPr>\n"
" <p:blipFill>\n"
' <a:blip r:embed="rId42"/>\n'
" <a:stretch>\n"
" <a:fillRect/>\n"
" </a:stretch>\n"
" </p:blipFill>\n"
" <p:spPr>\n"
" <a:xfrm>\n"
' <a:off x="1" y="2"/>\n'
' <a:ext cx="3" cy="4"/>\n'
" </a:xfrm>\n"
' <a:prstGeom prst="rect">\n'
" <a:avLst/>\n"
" </a:prstGeom>\n"
" </p:spPr>\n"
"</p:pic>\n" % (nsdecls("a", "p", "r"), xml_desc)
)
|
`desc` attr (often filename) is XML-escaped to handle special characters.
In particular, ampersand ('&'), less/greater-than ('</>') etc.
|
it_can_create_a_new_pic_element
|
python
|
scanny/python-pptx
|
tests/oxml/shapes/test_picture.py
|
https://github.com/scanny/python-pptx/blob/master/tests/oxml/shapes/test_picture.py
|
MIT
|
def it_provides_access_to_an_existing_notes_master_part(
self, notes_master_part_, part_related_by_
):
"""This is the first of a two-part test to cover the existing notes master case.
The notes master not-present case follows.
"""
prs_part = PresentationPart(None, None, None, None)
part_related_by_.return_value = notes_master_part_
notes_master_part = prs_part.notes_master_part
prs_part.part_related_by.assert_called_once_with(prs_part, RT.NOTES_MASTER)
assert notes_master_part is notes_master_part_
|
This is the first of a two-part test to cover the existing notes master case.
The notes master not-present case follows.
|
it_provides_access_to_an_existing_notes_master_part
|
python
|
scanny/python-pptx
|
tests/parts/test_presentation.py
|
https://github.com/scanny/python-pptx/blob/master/tests/parts/test_presentation.py
|
MIT
|
def but_it_adds_a_notes_master_part_when_needed(
self, request, package_, notes_master_part_, part_related_by_, relate_to_
):
"""This is the second of a two-part test to cover notes-master-not-present case.
The notes master present case is just above.
"""
NotesMasterPart_ = class_mock(request, "pptx.parts.presentation.NotesMasterPart")
NotesMasterPart_.create_default.return_value = notes_master_part_
part_related_by_.side_effect = KeyError
prs_part = PresentationPart(None, None, package_, None)
notes_master_part = prs_part.notes_master_part
NotesMasterPart_.create_default.assert_called_once_with(package_)
relate_to_.assert_called_once_with(prs_part, notes_master_part_, RT.NOTES_MASTER)
assert notes_master_part is notes_master_part_
|
This is the second of a two-part test to cover notes-master-not-present case.
The notes master present case is just above.
|
but_it_adds_a_notes_master_part_when_needed
|
python
|
scanny/python-pptx
|
tests/parts/test_presentation.py
|
https://github.com/scanny/python-pptx/blob/master/tests/parts/test_presentation.py
|
MIT
|
def it_should_update_actual_value_on_indexed_assignment(self, indexed_assignment_fixture_):
"""
Assignment to AdjustmentCollection[n] updates nth actual
"""
adjs, idx, new_val, expected = indexed_assignment_fixture_
adjs[idx] = new_val
assert adjs._adjustments[idx].actual == expected
|
Assignment to AdjustmentCollection[n] updates nth actual
|
it_should_update_actual_value_on_indexed_assignment
|
python
|
scanny/python-pptx
|
tests/shapes/test_autoshape.py
|
https://github.com/scanny/python-pptx/blob/master/tests/shapes/test_autoshape.py
|
MIT
|
def it_should_raise_on_assigned_bad_value(self, adjustments):
"""
AdjustmentCollection[n] = val raises on val is not number
"""
with pytest.raises(ValueError):
adjustments[0] = "1.0"
|
AdjustmentCollection[n] = val raises on val is not number
|
it_should_raise_on_assigned_bad_value
|
python
|
scanny/python-pptx
|
tests/shapes/test_autoshape.py
|
https://github.com/scanny/python-pptx/blob/master/tests/shapes/test_autoshape.py
|
MIT
|
def in_order(node):
"""
Traverse the tree depth first to produce a list of its values,
in order.
"""
result = []
if node is None:
return result
result.extend(in_order(node._lesser))
result.append(node.value)
result.extend(in_order(node._greater))
return result
|
Traverse the tree depth first to produce a list of its values,
in order.
|
in_order
|
python
|
scanny/python-pptx
|
tests/text/test_layout.py
|
https://github.com/scanny/python-pptx/blob/master/tests/text/test_layout.py
|
MIT
|
def part_(self, request, url, rId):
"""
Mock Part instance suitable for patching into _Hyperlink.part
property. It returns url for target_ref() and rId for relate_to().
"""
part_ = instance_mock(request, XmlPart)
part_.target_ref.return_value = url
part_.relate_to.return_value = rId
return part_
|
Mock Part instance suitable for patching into _Hyperlink.part
property. It returns url for target_ref() and rId for relate_to().
|
part_
|
python
|
scanny/python-pptx
|
tests/text/test_text.py
|
https://github.com/scanny/python-pptx/blob/master/tests/text/test_text.py
|
MIT
|
def nsdecls(*nspfxs):
"""
Return a string containing a namespace declaration for each of *nspfxs*,
in the order they are specified.
"""
nsdecls = ""
for nspfx in nspfxs:
nsdecls += ' xmlns:%s="%s"' % (nspfx, nsmap[nspfx])
return nsdecls
|
Return a string containing a namespace declaration for each of *nspfxs*,
in the order they are specified.
|
nsdecls
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def connect_children(self, child_node_list):
"""
Make each of the elements appearing in *child_node_list* a child of
this element.
"""
for node in child_node_list:
child = node.element
self._children.append(child)
|
Make each of the elements appearing in *child_node_list* a child of
this element.
|
connect_children
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def from_token(cls, token):
"""
Return an ``Element`` object constructed from a parser element token.
"""
tagname = token.tagname
attrs = [(name, value) for name, value in token.attr_list]
text = token.text
return cls(tagname, attrs, text)
|
Return an ``Element`` object constructed from a parser element token.
|
from_token
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def local_nspfxs(self):
"""
The namespace prefixes local to this element, both on the tagname and
all of its attributes. An empty string (``''``) is used to represent
the default namespace for an element tag having no prefix.
"""
def nspfx(name, is_element=False):
idx = name.find(":")
if idx == -1:
return "" if is_element else None
return name[:idx]
nspfxs = [nspfx(self._tagname, True)]
for name, val in self._attrs:
pfx = nspfx(name)
if pfx is None or pfx in nspfxs:
continue # pragma: no cover
nspfxs.append(pfx)
return nspfxs
|
The namespace prefixes local to this element, both on the tagname and
all of its attributes. An empty string (``''``) is used to represent
the default namespace for an element tag having no prefix.
|
local_nspfxs
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def nspfxs(self):
"""
A sequence containing each of the namespace prefixes appearing in
this tree. Each prefix appears once and only once, and in document
order.
"""
def merge(seq, seq_2):
for item in seq_2:
if item in seq:
continue
seq.append(item)
nspfxs = self.local_nspfxs
for child in self._children:
merge(nspfxs, child.nspfxs)
return nspfxs
|
A sequence containing each of the namespace prefixes appearing in
this tree. Each prefix appears once and only once, and in document
order.
|
nspfxs
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def _xml(self, indent):
"""
Return a string containing the XML of this element and all its
children with a starting indent of *indent* spaces.
"""
self._indent_str = " " * indent
xml = self._start_tag
for child in self._children:
xml += child._xml(indent + 2)
xml += self._end_tag
return xml
|
Return a string containing the XML of this element and all its
children with a starting indent of *indent* spaces.
|
_xml
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def _start_tag(self):
"""
The text of the opening tag of this element, including attributes. If
this is the root element, a namespace declaration for each of the
namespace prefixes that occur in this tree is added in front of any
attributes. If this element contains text, that text follows the
start tag. If not, and this element has no children, an empty tag is
returned. Otherwise, an opening tag is returned, followed by
a newline. The tag is indented by this element's indent value in all
cases.
"""
_nsdecls = nsdecls(*self.nspfxs) if self.is_root else ""
tag = "%s<%s%s" % (self._indent_str, self._tagname, _nsdecls)
for attr in self._attrs:
name, value = attr
tag += ' %s="%s"' % (name, value)
if self._text:
tag += ">%s" % self._text
elif self._children:
tag += ">\n"
else:
tag += "/>\n"
return tag
|
The text of the opening tag of this element, including attributes. If
this is the root element, a namespace declaration for each of the
namespace prefixes that occur in this tree is added in front of any
attributes. If this element contains text, that text follows the
start tag. If not, and this element has no children, an empty tag is
returned. Otherwise, an opening tag is returned, followed by
a newline. The tag is indented by this element's indent value in all
cases.
|
_start_tag
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def _end_tag(self):
"""
The text of the closing tag of this element, if there is one. If the
element contains text, no leading indentation is included.
"""
if self._text:
tag = "</%s>\n" % self._tagname
elif self._children:
tag = "%s</%s>\n" % (self._indent_str, self._tagname)
else:
tag = ""
return tag
|
The text of the closing tag of this element, if there is one. If the
element contains text, no leading indentation is included.
|
_end_tag
|
python
|
scanny/python-pptx
|
tests/unitutil/cxml.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/cxml.py
|
MIT
|
def snippet_bytes(snippet_file_name: str):
"""Return bytes read from snippet file having `snippet_file_name`."""
snippet_file_path = os.path.join(test_file_dir, "snippets", "%s.txt" % snippet_file_name)
with open(snippet_file_path, "rb") as f:
return f.read().strip()
|
Return bytes read from snippet file having `snippet_file_name`.
|
snippet_bytes
|
python
|
scanny/python-pptx
|
tests/unitutil/file.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/file.py
|
MIT
|
def snippet_text(snippet_file_name: str):
"""
Return the unicode text read from the test snippet file having
*snippet_file_name*.
"""
snippet_file_path = os.path.join(test_file_dir, "snippets", "%s.txt" % snippet_file_name)
with open(snippet_file_path, "rb") as f:
snippet_bytes = f.read()
return snippet_bytes.decode("utf-8")
|
Return the unicode text read from the test snippet file having
*snippet_file_name*.
|
snippet_text
|
python
|
scanny/python-pptx
|
tests/unitutil/file.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/file.py
|
MIT
|
def testfile_bytes(*segments: str):
"""Return bytes of file at path formed by adding `segments` to test file dir."""
path = os.path.join(test_file_dir, *segments)
with open(path, "rb") as f:
return f.read()
|
Return bytes of file at path formed by adding `segments` to test file dir.
|
testfile_bytes
|
python
|
scanny/python-pptx
|
tests/unitutil/file.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/file.py
|
MIT
|
def class_mock(
request: FixtureRequest, q_class_name: str, autospec: bool = True, **kwargs: Any
) -> Mock:
"""Return a mock patching the class with qualified name *q_class_name*.
The mock is autospec'ed based on the patched class unless the optional argument
*autospec* is set to False. Any other keyword arguments are passed through to
Mock(). Patch is reversed after calling test returns.
"""
_patch = patch(q_class_name, autospec=autospec, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return a mock patching the class with qualified name *q_class_name*.
The mock is autospec'ed based on the patched class unless the optional argument
*autospec* is set to False. Any other keyword arguments are passed through to
Mock(). Patch is reversed after calling test returns.
|
class_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def cls_attr_mock(
request: FixtureRequest, cls: type, attr_name: str, name: str | None = None, **kwargs: Any
) -> Mock:
"""Return a mock for an attribute (class variable) `attr_name` on `cls`.
Patch is reversed after pytest uses it.
"""
name = request.fixturename if name is None else name
_patch = patch.object(cls, attr_name, name=name, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return a mock for an attribute (class variable) `attr_name` on `cls`.
Patch is reversed after pytest uses it.
|
cls_attr_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def function_mock(
request: FixtureRequest, q_function_name: str, autospec: bool = True, **kwargs: Any
):
"""Return mock patching function with qualified name `q_function_name`.
Patch is reversed after calling test returns.
"""
_patch = patch(q_function_name, autospec=autospec, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return mock patching function with qualified name `q_function_name`.
Patch is reversed after calling test returns.
|
function_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def initializer_mock(request: FixtureRequest, cls: type, autospec: bool = True, **kwargs: Any):
"""Return mock for __init__() method on `cls`.
The patch is reversed after pytest uses it.
"""
_patch = patch.object(cls, "__init__", autospec=autospec, return_value=None, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return mock for __init__() method on `cls`.
The patch is reversed after pytest uses it.
|
initializer_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def instance_mock(
request: FixtureRequest,
cls: type,
name: str | None = None,
spec_set: bool = True,
**kwargs: Any,
) -> Mock:
"""Return mock for instance of `cls` that draws its spec from that class.
The mock does not allow new attributes to be set on the instance. If `name` is
missing or |None|, the name of the returned |Mock| instance is set to
`request.fixturename`. Additional keyword arguments are passed through to the Mock()
call that creates the mock.
"""
name = name if name is not None else request.fixturename
return create_autospec(cls, _name=name, spec_set=spec_set, instance=True, **kwargs)
|
Return mock for instance of `cls` that draws its spec from that class.
The mock does not allow new attributes to be set on the instance. If `name` is
missing or |None|, the name of the returned |Mock| instance is set to
`request.fixturename`. Additional keyword arguments are passed through to the Mock()
call that creates the mock.
|
instance_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def method_mock(
request: FixtureRequest, cls: type, method_name: str, autospec: bool = True, **kwargs: Any
):
"""Return mock for method `method_name` on `cls`.
The patch is reversed after pytest uses it.
"""
_patch = patch.object(cls, method_name, autospec=autospec, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return mock for method `method_name` on `cls`.
The patch is reversed after pytest uses it.
|
method_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def open_mock(request: FixtureRequest, module_name: str, **kwargs: Any):
"""Return a mock for the builtin `open()` method in `module_name`."""
target = "%s.open" % module_name
_patch = patch(target, mock_open(), create=True, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return a mock for the builtin `open()` method in `module_name`.
|
open_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def property_mock(request: FixtureRequest, cls: type, prop_name: str, **kwargs: Any):
"""Return a mock for property `prop_name` on class `cls`."""
_patch = patch.object(cls, prop_name, new_callable=PropertyMock, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return a mock for property `prop_name` on class `cls`.
|
property_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def var_mock(request: FixtureRequest, q_var_name: str, **kwargs: Any):
"""Return mock patching the variable with qualified name *q_var_name*."""
_patch = patch(q_var_name, **kwargs)
request.addfinalizer(_patch.stop)
return _patch.start()
|
Return mock patching the variable with qualified name *q_var_name*.
|
var_mock
|
python
|
scanny/python-pptx
|
tests/unitutil/mock.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/mock.py
|
MIT
|
def count(start=0, step=1):
"""
Local implementation of `itertools.count()` to allow v2.6 compatibility.
"""
n = start
while True:
yield n
n += step
|
Local implementation of `itertools.count()` to allow v2.6 compatibility.
|
count
|
python
|
scanny/python-pptx
|
tests/unitutil/__init__.py
|
https://github.com/scanny/python-pptx/blob/master/tests/unitutil/__init__.py
|
MIT
|
def _open(filename=None, mode="r"):
"""Open a file or ``sys.stdout`` depending on the provided filename.
Args:
filename (str): The path to the file that should be opened. If
``None`` or ``'-'``, ``sys.stdout`` or ``sys.stdin`` is
returned depending on the desired mode. Defaults to ``None``.
mode (str): The mode that should be used to open the file.
Yields:
A file handle.
"""
if not filename or filename == "-":
if not mode or "r" in mode:
file = sys.stdin
elif "w" in mode:
file = sys.stdout
else:
raise ValueError("Invalid mode for file: {}".format(mode))
else:
file = open(filename, mode)
try:
yield file
finally:
if file not in (sys.stdin, sys.stdout):
file.close()
|
Open a file or ``sys.stdout`` depending on the provided filename.
Args:
filename (str): The path to the file that should be opened. If
``None`` or ``'-'``, ``sys.stdout`` or ``sys.stdin`` is
returned depending on the desired mode. Defaults to ``None``.
mode (str): The mode that should be used to open the file.
Yields:
A file handle.
|
_open
|
python
|
bndr/pipreqs
|
pipreqs/pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/pipreqs/pipreqs.py
|
Apache-2.0
|
def ipynb_2_py(file_name, encoding="utf-8"):
"""
Args:
file_name (str): notebook file path to parse as python script
encoding (str): encoding of file
Returns:
str: parsed string
"""
exporter = PythonExporter()
(body, _) = exporter.from_filename(file_name)
return body.encode(encoding)
|
Args:
file_name (str): notebook file path to parse as python script
encoding (str): encoding of file
Returns:
str: parsed string
|
ipynb_2_py
|
python
|
bndr/pipreqs
|
pipreqs/pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/pipreqs/pipreqs.py
|
Apache-2.0
|
def get_pkg_names(pkgs):
"""Get PyPI package names from a list of imports.
Args:
pkgs (List[str]): List of import names.
Returns:
List[str]: The corresponding PyPI package names.
"""
result = set()
with open(join("mapping"), "r") as f:
data = dict(x.strip().split(":") for x in f)
for pkg in pkgs:
# Look up the mapped requirement. If a mapping isn't found,
# simply use the package name.
result.add(data.get(pkg, pkg))
# Return a sorted list for backward compatibility.
return sorted(result, key=lambda s: s.lower())
|
Get PyPI package names from a list of imports.
Args:
pkgs (List[str]): List of import names.
Returns:
List[str]: The corresponding PyPI package names.
|
get_pkg_names
|
python
|
bndr/pipreqs
|
pipreqs/pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/pipreqs/pipreqs.py
|
Apache-2.0
|
def compare_modules(file_, imports):
"""Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
set: The modules not imported in the project, but do exist in the
specified file.
"""
modules = parse_requirements(file_)
imports = [imports[i]["name"] for i in range(len(imports))]
modules = [modules[i]["name"] for i in range(len(modules))]
modules_not_imported = set(modules) - set(imports)
return modules_not_imported
|
Compare modules in a file to imported modules in a project.
Args:
file_ (str): File to parse for modules to be compared.
imports (tuple): Modules being imported in the project.
Returns:
set: The modules not imported in the project, but do exist in the
specified file.
|
compare_modules
|
python
|
bndr/pipreqs
|
pipreqs/pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/pipreqs/pipreqs.py
|
Apache-2.0
|
def diff(file_, imports):
"""Display the difference between modules in a file and imported modules.""" # NOQA
modules_not_imported = compare_modules(file_, imports)
logging.info(
"The following modules are in {} but do not seem to be imported: "
"{}".format(file_, ", ".join(x for x in modules_not_imported))
)
|
Display the difference between modules in a file and imported modules.
|
diff
|
python
|
bndr/pipreqs
|
pipreqs/pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/pipreqs/pipreqs.py
|
Apache-2.0
|
def clean(file_, imports):
"""Remove modules that aren't imported in project from file."""
modules_not_imported = compare_modules(file_, imports)
if len(modules_not_imported) == 0:
logging.info("Nothing to clean in " + file_)
return
re_remove = re.compile("|".join(modules_not_imported))
to_write = []
try:
f = open(file_, "r+")
except OSError:
logging.error("Failed on file: {}".format(file_))
raise
else:
try:
for i in f.readlines():
if re_remove.match(i) is None:
to_write.append(i)
f.seek(0)
f.truncate()
for i in to_write:
f.write(i)
finally:
f.close()
logging.info("Successfully cleaned up requirements in " + file_)
|
Remove modules that aren't imported in project from file.
|
clean
|
python
|
bndr/pipreqs
|
pipreqs/pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/pipreqs/pipreqs.py
|
Apache-2.0
|
def dynamic_versioning(scheme, imports):
"""Enables dynamic versioning with <compat>, <gt> or <non-pin> schemes."""
if scheme == "no-pin":
imports = [{"name": item["name"], "version": ""} for item in imports]
symbol = ""
elif scheme == "gt":
symbol = ">="
elif scheme == "compat":
symbol = "~="
return imports, symbol
|
Enables dynamic versioning with <compat>, <gt> or <non-pin> schemes.
|
dynamic_versioning
|
python
|
bndr/pipreqs
|
pipreqs/pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/pipreqs/pipreqs.py
|
Apache-2.0
|
def test_get_imports_info(self):
"""
Test to see that the right number of packages were found on PyPI
"""
imports = pipreqs.get_all_imports(self.project)
with_info = pipreqs.get_imports_info(imports)
# Should contain 10 items without the "nonexistendmodule" and
# "after_method_is_valid_even_if_not_pep8"
self.assertEqual(len(with_info), 13)
for item in with_info:
self.assertTrue(
item["name"].lower() in self.modules,
"Import item appears to be missing " + item["name"],
)
|
Test to see that the right number of packages were found on PyPI
|
test_get_imports_info
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_get_use_local_only(self):
"""
Test without checking PyPI, check to see if names of local
imports matches what we expect
- Note even though pyflakes isn't in requirements.txt,
It's added to locals since it is a development dependency
for testing
"""
# should find only docopt and requests
imports_with_info = pipreqs.get_import_local(self.modules)
for item in imports_with_info:
self.assertTrue(item["name"].lower() in self.local)
|
Test without checking PyPI, check to see if names of local
imports matches what we expect
- Note even though pyflakes isn't in requirements.txt,
It's added to locals since it is a development dependency
for testing
|
test_get_use_local_only
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_init(self):
"""
Test that all modules we will test upon are in requirements file
"""
pipreqs.init(
{
"<path>": self.project,
"--savepath": None,
"--print": False,
"--use-local": None,
"--force": True,
"--proxy": None,
"--pypi-server": None,
"--diff": None,
"--clean": None,
"--mode": None,
}
)
assert os.path.exists(self.requirements_path) == 1
with open(self.requirements_path, "r") as f:
data = f.read().lower()
for item in self.modules[:-3]:
self.assertTrue(item.lower() in data)
# It should be sorted based on names.
data = data.strip().split("\n")
self.assertEqual(data, sorted(data))
|
Test that all modules we will test upon are in requirements file
|
test_init
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_init_local_only(self):
"""
Test that items listed in requirements.text are the same
as locals expected
"""
pipreqs.init(
{
"<path>": self.project,
"--savepath": None,
"--print": False,
"--use-local": True,
"--force": True,
"--proxy": None,
"--pypi-server": None,
"--diff": None,
"--clean": None,
"--mode": None,
}
)
assert os.path.exists(self.requirements_path) == 1
with open(self.requirements_path, "r") as f:
data = f.readlines()
for item in data:
item = item.strip().split("==")
self.assertTrue(item[0].lower() in self.local)
|
Test that items listed in requirements.text are the same
as locals expected
|
test_init_local_only
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_init_savepath(self):
"""
Test that we can save requirements.txt correctly
to a different path
"""
pipreqs.init(
{
"<path>": self.project,
"--savepath": self.alt_requirement_path,
"--use-local": None,
"--proxy": None,
"--pypi-server": None,
"--print": False,
"--diff": None,
"--clean": None,
"--mode": None,
}
)
assert os.path.exists(self.alt_requirement_path) == 1
with open(self.alt_requirement_path, "r") as f:
data = f.read().lower()
for item in self.modules[:-3]:
self.assertTrue(item.lower() in data)
for item in self.modules2:
self.assertTrue(item.lower() in data)
|
Test that we can save requirements.txt correctly
to a different path
|
test_init_savepath
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_init_overwrite(self):
"""
Test that if requiremnts.txt exists, it will not be
automatically overwritten
"""
with open(self.requirements_path, "w") as f:
f.write("should_not_be_overwritten")
pipreqs.init(
{
"<path>": self.project,
"--savepath": None,
"--use-local": None,
"--force": None,
"--proxy": None,
"--pypi-server": None,
"--print": False,
"--diff": None,
"--clean": None,
"--mode": None,
}
)
assert os.path.exists(self.requirements_path) == 1
with open(self.requirements_path, "r") as f:
data = f.read().lower()
self.assertEqual(data, "should_not_be_overwritten")
|
Test that if requiremnts.txt exists, it will not be
automatically overwritten
|
test_init_overwrite
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_get_import_name_without_alias(self):
"""
Test that function get_name_without_alias()
will work on a string.
- Note: This isn't truly needed when pipreqs is walking
the AST to find imports
"""
import_name_with_alias = "requests as R"
expected_import_name_without_alias = "requests"
import_name_without_aliases = pipreqs.get_name_without_alias(import_name_with_alias)
self.assertEqual(import_name_without_aliases, expected_import_name_without_alias)
|
Test that function get_name_without_alias()
will work on a string.
- Note: This isn't truly needed when pipreqs is walking
the AST to find imports
|
test_get_import_name_without_alias
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_custom_pypi_server(self):
"""
Test that trying to get a custom pypi sever fails correctly
"""
self.assertRaises(
requests.exceptions.MissingSchema,
pipreqs.init,
{
"<path>": self.project,
"--savepath": None,
"--print": False,
"--use-local": None,
"--force": True,
"--proxy": None,
"--pypi-server": "nonexistent",
},
)
|
Test that trying to get a custom pypi sever fails correctly
|
test_custom_pypi_server
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_clean_with_imports_to_clean(self):
"""
Test --clean parameter when there are imports to clean
"""
cleaned_module = "sqlalchemy"
pipreqs.init(
{
"<path>": self.project,
"--savepath": None,
"--print": False,
"--use-local": None,
"--force": True,
"--proxy": None,
"--pypi-server": None,
"--diff": None,
"--clean": None,
"--mode": None,
}
)
assert os.path.exists(self.requirements_path) == 1
pipreqs.init(
{
"<path>": self.project_clean,
"--savepath": None,
"--print": False,
"--use-local": None,
"--force": None,
"--proxy": None,
"--pypi-server": None,
"--diff": None,
"--clean": self.requirements_path,
"--mode": "non-pin",
}
)
with open(self.requirements_path, "r") as f:
data = f.read().lower()
self.assertTrue(cleaned_module not in data)
|
Test --clean parameter when there are imports to clean
|
test_clean_with_imports_to_clean
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_output_requirements(self):
"""
Test --print parameter
It should print to stdout the same content as requeriments.txt
"""
capturedOutput = StringIO()
sys.stdout = capturedOutput
pipreqs.init(
{
"<path>": self.project,
"--savepath": None,
"--print": True,
"--use-local": None,
"--force": None,
"--proxy": None,
"--pypi-server": None,
"--diff": None,
"--clean": None,
"--mode": None,
}
)
pipreqs.init(
{
"<path>": self.project,
"--savepath": None,
"--print": False,
"--use-local": None,
"--force": True,
"--proxy": None,
"--pypi-server": None,
"--diff": None,
"--clean": None,
"--mode": None,
}
)
with open(self.requirements_path, "r") as f:
file_content = f.read().lower()
stdout_content = capturedOutput.getvalue().lower()
self.assertTrue(file_content == stdout_content)
|
Test --print parameter
It should print to stdout the same content as requeriments.txt
|
test_output_requirements
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_import_notebooks(self):
"""
Test the function get_all_imports() using .ipynb file
"""
self.mock_scan_notebooks()
imports = pipreqs.get_all_imports(self.project_with_notebooks)
for item in imports:
self.assertTrue(item.lower() in self.modules, "Import is missing: " + item)
not_desired_imports = ["time", "logging", "curses", "__future__", "django", "models", "FastAPI", "sklearn"]
for not_desired_import in not_desired_imports:
self.assertFalse(
not_desired_import in imports,
f"{not_desired_import} was imported, but it should not have been."
)
|
Test the function get_all_imports() using .ipynb file
|
test_import_notebooks
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_invalid_notebook(self):
"""
Test that invalid notebook files cannot be imported.
"""
self.mock_scan_notebooks()
self.assertRaises(SyntaxError, pipreqs.get_all_imports, self.project_with_invalid_notebooks)
|
Test that invalid notebook files cannot be imported.
|
test_invalid_notebook
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_ipynb_2_py(self):
"""
Test the function ipynb_2_py() which converts .ipynb file to .py format
"""
python_imports = pipreqs.get_all_imports(self.python_path_same_imports)
notebook_imports = pipreqs.get_all_imports(self.notebook_path_same_imports)
self.assertEqual(python_imports, notebook_imports)
|
Test the function ipynb_2_py() which converts .ipynb file to .py format
|
test_ipynb_2_py
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def test_ignore_notebooks(self):
"""
Test if notebooks are ignored when the scan-notebooks parameter is False
"""
notebook_requirement_path = os.path.join(self.project_with_notebooks, "requirements.txt")
pipreqs.init(
{
"<path>": self.project_with_notebooks,
"--savepath": None,
"--use-local": None,
"--force": True,
"--proxy": None,
"--pypi-server": None,
"--print": False,
"--diff": None,
"--clean": None,
"--mode": None,
"--scan-notebooks": False,
}
)
assert os.path.exists(notebook_requirement_path) == 1
assert os.path.getsize(notebook_requirement_path) == 1 # file only has a "\n", meaning it's empty
|
Test if notebooks are ignored when the scan-notebooks parameter is False
|
test_ignore_notebooks
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def tearDown(self):
"""
Remove requiremnts.txt files that were written
"""
try:
os.remove(self.requirements_path)
except OSError:
pass
try:
os.remove(self.alt_requirement_path)
except OSError:
pass
|
Remove requiremnts.txt files that were written
|
tearDown
|
python
|
bndr/pipreqs
|
tests/test_pipreqs.py
|
https://github.com/bndr/pipreqs/blob/master/tests/test_pipreqs.py
|
Apache-2.0
|
def find_risky_files(path: str):
"""Searching for risky text in yml files for given path."""
return {
str(file)
for file in Path(path).rglob("*.yml")
if risky_text in file.read_text() and str(file) not in ignore_files
}
|
Searching for risky text in yml files for given path.
|
find_risky_files
|
python
|
mckinsey/vizro
|
tools/scan_yaml_for_risky_text.py
|
https://github.com/mckinsey/vizro/blob/master/tools/scan_yaml_for_risky_text.py
|
Apache-2.0
|
def post_comment(pr_object, config: PyCafeConfig, comparison_urls_dict: dict[str, dict[str, str]]):
"""Post a comment on the pull request with the PyCafe dashboard links."""
template = """## View the example dashboards of the current commit live on PyCafe :coffee: :rocket:\n
Updated on: {current_utc_time}
Commit: {commit_sha}
Compare the examples using the commit's wheel file vs the latest released version:
{dashboards}
"""
# Find existing comments by the bot
comments = pr_object.get_issue_comments()
bot_comment = None
for comment in comments:
if comment.body.startswith("## View the example dashboards of the current commit live"):
bot_comment = comment
break
# Get current UTC datetime
current_utc_time = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
# Format the comparison links
dashboards = "\n\n".join(
f"### {directory}\n"
f"[View with commit's wheel]({urls['commit']}) vs [View with latest release]({urls['release']})"
for directory, urls in comparison_urls_dict.items()
)
# Update the existing comment or create a new one
comment_body = template.format(
current_utc_time=current_utc_time,
commit_sha=config.commit_sha,
dashboards=dashboards,
)
if bot_comment:
bot_comment.edit(comment_body)
print("Comment updated on the pull request.") # noqa
else:
pr_object.create_issue_comment(comment_body)
print("Comment added to the pull request.") # noqa
|
Post a comment on the pull request with the PyCafe dashboard links.
|
post_comment
|
python
|
mckinsey/vizro
|
tools/pycafe/create_pycafe_links_comments.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/create_pycafe_links_comments.py
|
Apache-2.0
|
def _get_vizro_requirement(config: PyCafeConfig, use_latest_release: bool = False) -> str:
"""Get the Vizro requirement string for PyCafe."""
if use_latest_release:
return "vizro"
return (
f"{config.pycafe_url}/gh/artifact/mckinsey/vizro/actions/runs/{config.run_id}/"
f"pip/vizro-{config.vizro_version}-py3-none-any.whl"
)
|
Get the Vizro requirement string for PyCafe.
|
_get_vizro_requirement
|
python
|
mckinsey/vizro
|
tools/pycafe/pycafe_utils.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/pycafe_utils.py
|
Apache-2.0
|
def _get_vizro_ai_requirement(config: PyCafeConfig, use_latest_release: bool = False) -> str:
"""Get the Vizro AI requirement string for PyCafe."""
if use_latest_release:
return "vizro-ai"
return (
f"{config.pycafe_url}/gh/artifact/mckinsey/vizro/actions/runs/{config.run_id}/"
f"pip2/vizro_ai-{config.vizro_ai_version}-py3-none-any.whl"
)
|
Get the Vizro AI requirement string for PyCafe.
|
_get_vizro_ai_requirement
|
python
|
mckinsey/vizro
|
tools/pycafe/pycafe_utils.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/pycafe_utils.py
|
Apache-2.0
|
def _fetch_app_content(base_url: str) -> str:
"""Fetch and process app.py content from the repository."""
response = requests.get(f"{base_url}/app.py", timeout=10)
response.raise_for_status()
app_content = response.text
app_content_split = app_content.split('if __name__ == "__main__":')
if len(app_content_split) > 1:
return app_content_split[0] + textwrap.dedent(app_content_split[1])
return app_content
|
Fetch and process app.py content from the repository.
|
_fetch_app_content
|
python
|
mckinsey/vizro
|
tools/pycafe/pycafe_utils.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/pycafe_utils.py
|
Apache-2.0
|
def _fetch_directory_files(config: PyCafeConfig, directory_path: str) -> list[dict]:
"""Fetch files in a directory from GitHub API."""
url = f"https://api.github.com/repos/{config.repo_name}/git/trees/{config.commit_sha}?recursive=1"
response = requests.get(url, timeout=20)
response.raise_for_status()
files = response.json().get("tree", [])
return [file for file in files if file["path"].startswith(directory_path)]
|
Fetch files in a directory from GitHub API.
|
_fetch_directory_files
|
python
|
mckinsey/vizro
|
tools/pycafe/pycafe_utils.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/pycafe_utils.py
|
Apache-2.0
|
def generate_link(
config: PyCafeConfig,
directory_path: str,
extra_requirements: Optional[list[str]] = None,
use_latest_release: bool = False,
) -> str:
"""Generate a PyCafe link for the example dashboard."""
base_url = f"{config.vizro_raw_url}/{config.commit_sha}/{directory_path}"
# Requirements - either use latest release or commit's wheel file
requirements = []
if directory_path.startswith("vizro-ai/"):
# An example in this folder may require the latest vizro-ai and vizro-core releases
requirements.extend(
[_get_vizro_ai_requirement(config, use_latest_release), _get_vizro_requirement(config, use_latest_release)]
)
else:
# All other examples do not require vizro-ai, but still the latest vizro-core release
requirements.extend([_get_vizro_requirement(config, use_latest_release)])
if extra_requirements:
requirements.extend(extra_requirements)
# App file - get current commit, and modify to remove if clause
app_content = _fetch_app_content(base_url)
# Get directory files
folder_files = _fetch_directory_files(config, directory_path)
# JSON object
json_object = {
"code": app_content,
"requirements": "\n".join(requirements),
"files": [
{
"name": file["path"].removeprefix(f"{directory_path}"),
"url": f"{base_url}{file['path'].removeprefix(f'{directory_path}')}",
}
for file in folder_files
if file["type"] == "blob"
and file["path"] not in {f"{directory_path}/app.py", f"{directory_path}/requirements.txt"}
],
}
json_text = json.dumps(json_object)
compressed_json_text = gzip.compress(json_text.encode("utf8"))
base64_text = base64.b64encode(compressed_json_text).decode("utf8")
query = urlencode({"c": base64_text}, quote_via=quote)
return f"{config.pycafe_url}/snippet/vizro/v1?{query}"
|
Generate a PyCafe link for the example dashboard.
|
generate_link
|
python
|
mckinsey/vizro
|
tools/pycafe/pycafe_utils.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/pycafe_utils.py
|
Apache-2.0
|
def create_status_check(
commit: Commit,
directory: str,
url: str,
state: str = "success",
description: str = "Test out the app live on PyCafe",
):
"""Create a GitHub status check for a PyCafe link."""
context = f"PyCafe Example ({directory})"
commit.create_status(state=state, target_url=url, description=description, context=context)
print(f"Status created for {context} with URL: {url}") # noqa
|
Create a GitHub status check for a PyCafe link.
|
create_status_check
|
python
|
mckinsey/vizro
|
tools/pycafe/pycafe_utils.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/pycafe_utils.py
|
Apache-2.0
|
def get_example_directories() -> dict[str, Optional[list[str]]]:
"""Return a dictionary of example directories and their requirements."""
return {
"vizro-core/examples/scratch_dev": None,
"vizro-core/examples/dev/": ["openpyxl"],
"vizro-core/examples/visual-vocabulary/": [
"autoflake==2.3.1",
"black==24.4.2",
"isort==5.13.2",
"plotly==5.24.1",
],
"vizro-core/examples/tutorial/": None,
"vizro-ai/examples/dashboard_ui/": [
"black",
"openpyxl",
"langchain_anthropic",
"langchain_mistralai",
"greenlet # mock",
"tiktoken @ https://py.cafe/files/maartenbreddels/tiktoken-demo/tiktoken-0.7.0-cp312-cp312-pyodide_2024_0_wasm32.whl",
"https://py.cafe/files/maartenbreddels/jiter-demo/jiter-0.6.1-cp312-cp312-pyodide_2024_0_wasm32.whl",
"https://py.cafe/files/maartenbreddels/tokenizers-demo/tokenizers-0.20.2.dev0-cp312-cp312-pyodide_2024_0_wasm32.whl",
],
}
|
Return a dictionary of example directories and their requirements.
|
get_example_directories
|
python
|
mckinsey/vizro
|
tools/pycafe/pycafe_utils.py
|
https://github.com/mckinsey/vizro/blob/master/tools/pycafe/pycafe_utils.py
|
Apache-2.0
|
def run_vizro_ai(user_prompt, n_clicks, data, model, api_key, api_base, vendor_input): # noqa: PLR0913
"""Gets the AI response and adds it to the text window."""
def create_response(ai_response, figure, ai_outputs):
return (ai_response, figure, {"ai_outputs": ai_outputs})
if not n_clicks:
raise PreventUpdate
if not data:
ai_response = "Please upload data to proceed!"
figure = go.Figure()
return create_response(ai_response, figure, ai_outputs=None)
if not api_key:
ai_response = "API key not found. Make sure you enter your API key!"
figure = go.Figure()
return create_response(ai_response, figure, ai_outputs=None)
if api_key.startswith('"'):
ai_response = "Make sure you enter your API key without quotes!"
figure = go.Figure()
return create_response(ai_response, figure, ai_outputs=None)
if api_base is not None and api_base.startswith('"'):
ai_response = "Make sure you enter your API base without quotes!"
figure = go.Figure()
return create_response(ai_response, figure, ai_outputs=None)
try:
logger.info("Attempting chart code.")
df = pd.DataFrame(data["data"])
ai_outputs = get_vizro_ai_plot(
user_prompt=user_prompt,
df=df,
model=model,
api_key=api_key,
api_base=api_base,
vendor_input=vendor_input,
)
ai_code = ai_outputs.code_vizro
figure_vizro = ai_outputs.get_fig_object(data_frame=df, vizro=True)
figure_plotly = ai_outputs.get_fig_object(data_frame=df, vizro=False)
formatted_code = black.format_str(ai_code, mode=black.Mode(line_length=100))
ai_code_outputs = {
"vizro": {"code": ai_outputs.code_vizro, "fig": figure_vizro.to_json()},
"plotly": {"code": ai_outputs.code, "fig": figure_plotly.to_json()},
}
ai_response = "\n".join(["```python", formatted_code, "```"])
logger.info("Successful query produced.")
return create_response(ai_response, figure_vizro, ai_outputs=ai_code_outputs)
except Exception as exc:
logger.debug(exc)
logger.info("Chart creation failed.")
ai_response = f"Sorry, I can't do that. Following Error occurred: {exc}"
figure = go.Figure()
return create_response(ai_response, figure, ai_outputs=None)
|
Gets the AI response and adds it to the text window.
|
run_vizro_ai
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/actions.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/actions.py
|
Apache-2.0
|
def display_filename(data):
"""Custom action to display uploaded filename."""
if data is None:
raise PreventUpdate
display_message = data.get("filename") or data.get("error_message")
return f"Uploaded file name: '{display_message}'" if "filename" in data else display_message
|
Custom action to display uploaded filename.
|
display_filename
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/actions.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/actions.py
|
Apache-2.0
|
def toggle_code(value, data):
"""Callback for switching between vizro and plotly code."""
if not data:
return dash.no_update
ai_code = data["ai_outputs"]["vizro"]["code"] if value else data["ai_outputs"]["plotly"]["code"]
formatted_code = black.format_str(ai_code, mode=black.Mode(line_length=100))
ai_response = "\n".join(["```python", formatted_code, "```"])
return ai_response
|
Callback for switching between vizro and plotly code.
|
toggle_code
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/app.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/app.py
|
Apache-2.0
|
def build(self):
"""Returns the text area component to display vizro-ai code output."""
return html.Div(
children=[
dcc.Textarea(
id=self.id,
placeholder="Describe the chart you want to create, e.g. "
"'Visualize the life expectancy per continent.'",
)
]
)
|
Returns the text area component to display vizro-ai code output.
|
build
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/components.py
|
Apache-2.0
|
def build(self):
"""Returns the upload component for data upload."""
return html.Div(
[
dcc.Upload(
id=self.id,
children=html.Div(["Drag and Drop or ", html.A("Select Files")], id="data-upload"),
),
]
)
|
Returns the upload component for data upload.
|
build
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/components.py
|
Apache-2.0
|
def build(self):
"""Returns the code clipboard component inside a output text area."""
code = black.format_str(self.code, mode=black.Mode(line_length=120))
code = code.strip("'\"")
markdown_code = "\n".join(["```python", code, "```"])
return dcc.Loading(
html.Div(
[
dcc.Clipboard(target_id=f"{self.id}-code-markdown", className="code-clipboard"),
dcc.Markdown(markdown_code, id=f"{self.id}-code-markdown"),
],
className="code-clipboard-container",
),
color="grey",
parent_className="loading-container",
overlay_style={"visibility": "visible", "opacity": 0.3},
)
|
Returns the code clipboard component inside a output text area.
|
build
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/components.py
|
Apache-2.0
|
def build(self):
"""Returns custom dropdown component that cannot be cleared."""
dropdown_build_obj = super().build()
dropdown_build_obj.id = f"{self.id}_outer_div"
dropdown_build_obj.children[1].clearable = False
return dropdown_build_obj
|
Returns custom dropdown component that cannot be cleared.
|
build
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/components.py
|
Apache-2.0
|
def build(self):
"""Returns the off canvas component for settings."""
input_groups = html.Div(
[
dbc.InputGroup(
[
dbc.InputGroupText("API Key"),
dbc.Input(placeholder="API key", type="password", id=f"{self.id}-api-key"),
html.Div(
dbc.Checklist(
id=f"{self.id}-api-key-toggle",
options=[{"label": "", "value": False}],
switch=True,
inline=True,
),
id="toggle-div-api-key",
),
],
className="mb-3",
),
dbc.InputGroup(
[
dbc.InputGroupText("API base"),
dbc.Input(placeholder="(optional) API base", type="password", id=f"{self.id}-api-base"),
html.Div(
dbc.Checklist(
id=f"{self.id}-api-base-toggle",
options=[{"label": "", "value": False}],
switch=True,
inline=True,
),
id="toggle-div-api-base",
),
],
className="mb-3",
),
dbc.InputGroup(
[
dbc.InputGroupText("Choose your vendor"),
dbc.Select(options=self.options, value=self.value, id=f"{self.id}-dropdown"),
],
className="mb-3",
),
],
className="mb-3",
)
providers = [
{"name": "OpenAI", "url": "https://openai.com/index/openai-api/"},
{"name": "Anthropic", "url": "https://docs.anthropic.com/en/api/getting-started"},
{"name": "Mistral", "url": "https://docs.mistral.ai/getting-started/quickstart/"},
{"name": "xAI", "url": "https://x.ai/api", "note": ""},
]
api_instructions = html.Div(
[
html.Hr(
style={
"margin": "2rem 0",
"border-color": "rgba(255, 255, 255, 0.1)",
"border-style": "solid",
"border-width": "0 0 1px 0",
}
),
html.Div("Get API Keys", className="mb-3", style={"color": "#ffffff"}),
dbc.ListGroup(
[
create_provider_item(name=provider["name"], url=provider["url"], note=provider.get("note"))
for provider in providers
],
flush=True,
className="border-0",
),
],
)
offcanvas = dbc.Offcanvas(
id=self.id,
children=[
html.Div(
children=[
input_groups,
api_instructions,
]
),
],
title="Settings",
is_open=True,
)
return offcanvas
|
Returns the off canvas component for settings.
|
build
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/components.py
|
Apache-2.0
|
def build(self):
"""Returns the icon for api settings."""
return html.Div(
children=[html.Span("settings", className="material-symbols-outlined", id=self.id)],
className="settings-div",
)
|
Returns the icon for api settings.
|
build
|
python
|
mckinsey/vizro
|
vizro-ai/examples/dashboard_ui/components.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/examples/dashboard_ui/components.py
|
Apache-2.0
|
def _get_llm_model(model: Optional[Union[BaseChatModel, str]] = None) -> BaseChatModel:
"""Fetches and initializes an instance of the LLM.
Args:
model: Model instance or model name.
Returns:
The initialized instance of the LLM.
Raises:
ValueError: If the provided model string does not match any pre-defined model
"""
if not model:
return ChatOpenAI(model_name=DEFAULT_MODEL, temperature=DEFAULT_TEMPERATURE)
if isinstance(model, BaseChatModel):
return model
if isinstance(model, str):
if any(model in model_list for model_list in SUPPORTED_MODELS.values()):
vendor = model_to_vendor[model]
if DEFAULT_WRAPPER_MAP.get(vendor) is None:
raise ValueError(f"Additional library to support {vendor} models is not installed.")
return DEFAULT_WRAPPER_MAP.get(vendor)(model_name=model, temperature=DEFAULT_TEMPERATURE)
raise ValueError(
f"Model {model} not found! List of available model can be found at https://vizro.readthedocs.io/projects/vizro-ai/en/latest/pages/user-guides/customize-vizro-ai/#supported-models"
)
|
Fetches and initializes an instance of the LLM.
Args:
model: Model instance or model name.
Returns:
The initialized instance of the LLM.
Raises:
ValueError: If the provided model string does not match any pre-defined model
|
_get_llm_model
|
python
|
mckinsey/vizro
|
vizro-ai/src/vizro_ai/_llm_models.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/_llm_models.py
|
Apache-2.0
|
def __init__(self, model: Optional[Union[BaseChatModel, str]] = None):
"""Initialization of VizroAI.
Args:
model: model instance or model name.
"""
self.model = _get_llm_model(model=model)
self.components_instances = {}
# TODO add pending URL link to docs
logger.info(
"Engaging with LLMs (Large Language Models) carries certain risks. "
"Users are advised to become familiar with these risks to make informed decisions, "
"and visit this page for detailed information: "
"https://vizro-ai.readthedocs.io/en/latest/pages/explanation/disclaimer/"
)
|
Initialization of VizroAI.
Args:
model: model instance or model name.
|
__init__
|
python
|
mckinsey/vizro
|
vizro-ai/src/vizro_ai/_vizro_ai.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/_vizro_ai.py
|
Apache-2.0
|
def plot(
self,
df: pd.DataFrame,
user_input: str,
max_debug_retry: int = 1,
return_elements: bool = False,
validate_code: bool = True,
_minimal_output: bool = False,
) -> Union[go.Figure, ChartPlan]:
"""Plot visuals using vizro via english descriptions, english to chart translation.
Args:
df: The dataframe to be analyzed.
user_input: User questions or descriptions of the desired visual.
max_debug_retry: Maximum number of retries to debug errors. Defaults to `1`.
return_elements: Flag to return ChartPlan pydantic model that includes all
possible elements generated. Defaults to `False`.
validate_code: Flag if produced code should be executed to validate it. Defaults to `True`.
_minimal_output: Internal flag to exclude chart insights and code explanations and
skip validation. Defaults to `False`.
Returns:
go.Figure or ChartPlan pydantic model
"""
chart_plan = BaseChartPlan if _minimal_output else ChartPlan
response_model = ChartPlanFactory(data_frame=df, chart_plan=chart_plan) if validate_code else chart_plan
_, df_sample = _get_df_info(df, n_sample=10)
response = _get_pydantic_model(
query=user_input,
llm_model=self.model,
response_model=response_model,
df_info=df_sample,
max_retry=max_debug_retry,
)
if return_elements:
return response
else:
return response.get_fig_object(data_frame=df)
|
Plot visuals using vizro via english descriptions, english to chart translation.
Args:
df: The dataframe to be analyzed.
user_input: User questions or descriptions of the desired visual.
max_debug_retry: Maximum number of retries to debug errors. Defaults to `1`.
return_elements: Flag to return ChartPlan pydantic model that includes all
possible elements generated. Defaults to `False`.
validate_code: Flag if produced code should be executed to validate it. Defaults to `True`.
_minimal_output: Internal flag to exclude chart insights and code explanations and
skip validation. Defaults to `False`.
Returns:
go.Figure or ChartPlan pydantic model
|
plot
|
python
|
mckinsey/vizro
|
vizro-ai/src/vizro_ai/_vizro_ai.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/_vizro_ai.py
|
Apache-2.0
|
def dashboard(
self,
dfs: list[pd.DataFrame],
user_input: str,
return_elements: bool = False,
) -> Union[DashboardOutputs, vm.Dashboard]:
"""Creates a Vizro dashboard using english descriptions.
Args:
dfs: The dataframes to be analyzed.
user_input: User questions or descriptions of the desired visual.
return_elements: Flag to return DashboardOutputs dataclass that includes all possible elements generated.
Returns:
vm.Dashboard or DashboardOutputs dataclass.
"""
runnable = _create_and_compile_graph()
config = {"configurable": {"model": self.model}}
message_res = runnable.invoke(
{
"dfs": dfs,
"all_df_metadata": AllDfMetadata(),
"dashboard_plan": None,
"pages": [],
"dashboard": None,
"messages": [HumanMessage(content=user_input)],
"custom_charts_code": [],
"custom_charts_imports": [],
},
config=config,
)
dashboard = message_res["dashboard"]
_register_data(all_df_metadata=message_res["all_df_metadata"])
if return_elements:
chart_code, imports = _extract_overall_imports_and_code(
message_res["custom_charts_code"], message_res["custom_charts_imports"]
)
code = dashboard._to_python(extra_callable_defs=chart_code, extra_imports=imports)
dashboard_output = DashboardOutputs(dashboard=dashboard, code=code)
return dashboard_output
else:
return dashboard
|
Creates a Vizro dashboard using english descriptions.
Args:
dfs: The dataframes to be analyzed.
user_input: User questions or descriptions of the desired visual.
return_elements: Flag to return DashboardOutputs dataclass that includes all possible elements generated.
Returns:
vm.Dashboard or DashboardOutputs dataclass.
|
dashboard
|
python
|
mckinsey/vizro
|
vizro-ai/src/vizro_ai/_vizro_ai.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/_vizro_ai.py
|
Apache-2.0
|
def get_schemas_and_samples(self) -> dict[str, dict[str, str]]:
"""Retrieve only the df_schema and df_sample for all datasets."""
return {
name: {"df_schema": metadata.df_schema, "df_sample": metadata.df_sample}
for name, metadata in self.all_df_metadata.items()
}
|
Retrieve only the df_schema and df_sample for all datasets.
|
get_schemas_and_samples
|
python
|
mckinsey/vizro
|
vizro-ai/src/vizro_ai/dashboard/utils.py
|
https://github.com/mckinsey/vizro/blob/master/vizro-ai/src/vizro_ai/dashboard/utils.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.