Spaces:
Running
Running
Pylint & CVE fix
#1
by
barunsaha
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- .codecov.yml +0 -10
- .coveragerc +0 -15
- .env.example +0 -10
- .gitattributes +0 -2
- .github/copilot-instructions.md +0 -14
- .github/workflows/codeql.yml +0 -98
- .github/workflows/pr-workflow.yml +0 -51
- .github/workflows/publish-to-pypi.yml +0 -47
- .gitignore +0 -2
- .idea/.gitignore +3 -0
- .idea/inspectionProfiles/Project_Default.xml +14 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +7 -0
- .idea/modules.xml +8 -0
- .idea/slide-deck-ai.iml +10 -0
- .idea/vcs.xml +6 -0
- .readthedocs.yaml +0 -17
- .streamlit/config.toml +0 -10
- LITELLM_MIGRATION_SUMMARY.md +0 -145
- MANIFEST.in +0 -6
- README.md +24 -175
- app.py +237 -458
- clarifai_grpc_helper.py +71 -0
- docs/_templates/module.rst +0 -25
- docs/api.rst +0 -18
- docs/conf.py +0 -50
- docs/generated/slidedeckai.cli.CustomArgumentParser.rst +0 -40
- docs/generated/slidedeckai.cli.CustomHelpFormatter.rst +0 -29
- docs/generated/slidedeckai.cli.format_model_help.rst +0 -6
- docs/generated/slidedeckai.cli.format_models_as_bullets.rst +0 -6
- docs/generated/slidedeckai.cli.format_models_list.rst +0 -6
- docs/generated/slidedeckai.cli.group_models_by_provider.rst +0 -6
- docs/generated/slidedeckai.cli.main.rst +0 -6
- docs/generated/slidedeckai.cli.rst +0 -36
- docs/generated/slidedeckai.core.SlideDeckAI.rst +0 -27
- docs/generated/slidedeckai.core.rst +0 -24
- docs/generated/slidedeckai.helpers.chat_helper.AIMessage.rst +0 -22
- docs/generated/slidedeckai.helpers.chat_helper.ChatMessage.rst +0 -22
- docs/generated/slidedeckai.helpers.chat_helper.ChatMessageHistory.rst +0 -24
- docs/generated/slidedeckai.helpers.chat_helper.ChatPromptTemplate.rst +0 -24
- docs/generated/slidedeckai.helpers.chat_helper.HumanMessage.rst +0 -22
- docs/generated/slidedeckai.helpers.chat_helper.rst +0 -32
- docs/generated/slidedeckai.helpers.file_manager.get_pdf_contents.rst +0 -6
- docs/generated/slidedeckai.helpers.file_manager.rst +0 -26
- docs/generated/slidedeckai.helpers.file_manager.validate_page_range.rst +0 -6
- docs/generated/slidedeckai.helpers.icons_embeddings.find_icons.rst +0 -6
- docs/generated/slidedeckai.helpers.icons_embeddings.get_embeddings.rst +0 -6
- docs/generated/slidedeckai.helpers.icons_embeddings.get_icons_list.rst +0 -6
- docs/generated/slidedeckai.helpers.icons_embeddings.load_saved_embeddings.rst +0 -6
- docs/generated/slidedeckai.helpers.icons_embeddings.main.rst +0 -6
.codecov.yml
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
ignore:
|
| 2 |
-
# Exclude the version file from all coverage calculations
|
| 3 |
-
- "src/slidedeckai/_version.py"
|
| 4 |
-
|
| 5 |
-
coverage:
|
| 6 |
-
status:
|
| 7 |
-
patch:
|
| 8 |
-
default:
|
| 9 |
-
target: 80%
|
| 10 |
-
threshold: 5%
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.coveragerc
DELETED
|
@@ -1,15 +0,0 @@
|
|
| 1 |
-
[run]
|
| 2 |
-
source = src/slidedeckai
|
| 3 |
-
omit =
|
| 4 |
-
tests/*
|
| 5 |
-
*/__init__.py
|
| 6 |
-
setup.py
|
| 7 |
-
|
| 8 |
-
[report]
|
| 9 |
-
exclude_lines =
|
| 10 |
-
pragma: no cover
|
| 11 |
-
def __repr__
|
| 12 |
-
if __name__ == '__main__':
|
| 13 |
-
raise NotImplementedError
|
| 14 |
-
pass
|
| 15 |
-
raise ImportError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.env.example
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
# Example .env file for SlideDeck AI
|
| 2 |
-
# Add your API keys and configuration values here
|
| 3 |
-
|
| 4 |
-
PEXEL_API_KEY=your-pexel-key-for-images
|
| 5 |
-
|
| 6 |
-
TOGETHER_API_KEY=your-together-ai-key
|
| 7 |
-
OPENROUTER_API_KEY=your-openrouter-api-key
|
| 8 |
-
|
| 9 |
-
RUN_IN_OFFLINE_MODE=true-or-false
|
| 10 |
-
DEFAULT_MODEL_INDEX=3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitattributes
CHANGED
|
@@ -33,5 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
*.pptx filter=lfs diff=lfs merge=lfs -text
|
| 37 |
-
pptx_templates/Minimalist_sales_pitch.pptx filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
.github/copilot-instructions.md
DELETED
|
@@ -1,14 +0,0 @@
|
|
| 1 |
-
1. In Python code, always use single quote for strings unless double quotes are necessary. Use triple double quotes for docstrings.
|
| 2 |
-
2. When defining functions, always include type hints for parameters and return types.
|
| 3 |
-
3. Except for logs, use f-strings for string formatting instead of other methods like % or .format().
|
| 4 |
-
4. Use Google-style docstrings for all functions and classes.
|
| 5 |
-
5. Two blank lines should precede top-level function and class definitions. One blank line between methods inside a class.
|
| 6 |
-
6. Max line length is 100 characters. Use brackets to break long lines. Wrap long strings (or expressions) inside ( and ).
|
| 7 |
-
7. Split long lines at braces, e.g., like this:
|
| 8 |
-
my_function(
|
| 9 |
-
param1,
|
| 10 |
-
param2
|
| 11 |
-
)
|
| 12 |
-
NOT like this:
|
| 13 |
-
my_function(param1,
|
| 14 |
-
param2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.github/workflows/codeql.yml
DELETED
|
@@ -1,98 +0,0 @@
|
|
| 1 |
-
# For most projects, this workflow file will not need changing; you simply need
|
| 2 |
-
# to commit it to your repository.
|
| 3 |
-
#
|
| 4 |
-
# You may wish to alter this file to override the set of languages analyzed,
|
| 5 |
-
# or to provide custom queries or build logic.
|
| 6 |
-
#
|
| 7 |
-
# ******** NOTE ********
|
| 8 |
-
# We have attempted to detect the languages in your repository. Please check
|
| 9 |
-
# the `language` matrix defined below to confirm you have the correct set of
|
| 10 |
-
# supported CodeQL languages.
|
| 11 |
-
#
|
| 12 |
-
name: "CodeQL Advanced"
|
| 13 |
-
|
| 14 |
-
on:
|
| 15 |
-
push:
|
| 16 |
-
branches: [ "main" ]
|
| 17 |
-
pull_request:
|
| 18 |
-
branches: [ "main" ]
|
| 19 |
-
schedule:
|
| 20 |
-
- cron: '35 12 * * 6'
|
| 21 |
-
|
| 22 |
-
jobs:
|
| 23 |
-
analyze:
|
| 24 |
-
name: Analyze (${{ matrix.language }})
|
| 25 |
-
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
| 26 |
-
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
| 27 |
-
# - https://gh.io/supported-runners-and-hardware-resources
|
| 28 |
-
# - https://gh.io/using-larger-runners (GitHub.com only)
|
| 29 |
-
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
| 30 |
-
runs-on: ubuntu-latest
|
| 31 |
-
permissions:
|
| 32 |
-
# required for all workflows
|
| 33 |
-
security-events: write
|
| 34 |
-
|
| 35 |
-
# required to fetch internal or private CodeQL packs
|
| 36 |
-
packages: read
|
| 37 |
-
|
| 38 |
-
# only required for workflows in private repositories
|
| 39 |
-
actions: read
|
| 40 |
-
contents: read
|
| 41 |
-
|
| 42 |
-
strategy:
|
| 43 |
-
fail-fast: false
|
| 44 |
-
matrix:
|
| 45 |
-
include:
|
| 46 |
-
- language: python
|
| 47 |
-
build-mode: none
|
| 48 |
-
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
|
| 49 |
-
# Use `c-cpp` to analyze code written in C, C++ or both
|
| 50 |
-
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
| 51 |
-
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
| 52 |
-
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
| 53 |
-
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
| 54 |
-
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
| 55 |
-
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
| 56 |
-
steps:
|
| 57 |
-
- name: Checkout repository
|
| 58 |
-
uses: actions/checkout@v4
|
| 59 |
-
|
| 60 |
-
# Add any setup steps before running the `github/codeql-action/init` action.
|
| 61 |
-
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
| 62 |
-
# or others). This is typically only required for manual builds.
|
| 63 |
-
# - name: Setup runtime (example)
|
| 64 |
-
# uses: actions/setup-example@v1
|
| 65 |
-
|
| 66 |
-
# Initializes the CodeQL tools for scanning.
|
| 67 |
-
- name: Initialize CodeQL
|
| 68 |
-
uses: github/codeql-action/init@v3
|
| 69 |
-
with:
|
| 70 |
-
languages: ${{ matrix.language }}
|
| 71 |
-
build-mode: ${{ matrix.build-mode }}
|
| 72 |
-
# If you wish to specify custom queries, you can do so here or in a config file.
|
| 73 |
-
# By default, queries listed here will override any specified in a config file.
|
| 74 |
-
# Prefix the list here with "+" to use these queries and those in the config file.
|
| 75 |
-
|
| 76 |
-
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
| 77 |
-
# queries: security-extended,security-and-quality
|
| 78 |
-
|
| 79 |
-
# If the analyze step fails for one of the languages you are analyzing with
|
| 80 |
-
# "We were unable to automatically build your code", modify the matrix above
|
| 81 |
-
# to set the build mode to "manual" for that language. Then modify this step
|
| 82 |
-
# to build your code.
|
| 83 |
-
# ℹ️ Command-line programs to run using the OS shell.
|
| 84 |
-
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
| 85 |
-
- if: matrix.build-mode == 'manual'
|
| 86 |
-
shell: bash
|
| 87 |
-
run: |
|
| 88 |
-
echo 'If you are using a "manual" build mode for one or more of the' \
|
| 89 |
-
'languages you are analyzing, replace this with the commands to build' \
|
| 90 |
-
'your code, for example:'
|
| 91 |
-
echo ' make bootstrap'
|
| 92 |
-
echo ' make release'
|
| 93 |
-
exit 1
|
| 94 |
-
|
| 95 |
-
- name: Perform CodeQL Analysis
|
| 96 |
-
uses: github/codeql-action/analyze@v3
|
| 97 |
-
with:
|
| 98 |
-
category: "/language:${{matrix.language}}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.github/workflows/pr-workflow.yml
DELETED
|
@@ -1,51 +0,0 @@
|
|
| 1 |
-
name: PR Check
|
| 2 |
-
|
| 3 |
-
on:
|
| 4 |
-
pull_request:
|
| 5 |
-
branches: [ "main" ]
|
| 6 |
-
|
| 7 |
-
jobs:
|
| 8 |
-
test:
|
| 9 |
-
runs-on: ubuntu-latest
|
| 10 |
-
strategy:
|
| 11 |
-
matrix:
|
| 12 |
-
python-version: ["3.10", "3.11", "3.12"]
|
| 13 |
-
|
| 14 |
-
steps:
|
| 15 |
-
- uses: actions/checkout@v4
|
| 16 |
-
|
| 17 |
-
- name: Set up Python ${{ matrix.python-version }}
|
| 18 |
-
uses: actions/setup-python@v5
|
| 19 |
-
with:
|
| 20 |
-
python-version: ${{ matrix.python-version }}
|
| 21 |
-
|
| 22 |
-
- name: Install dependencies
|
| 23 |
-
run: |
|
| 24 |
-
python -m pip install --upgrade pip
|
| 25 |
-
pip install -r requirements.txt
|
| 26 |
-
pip install pytest pytest-asyncio pytest-cov
|
| 27 |
-
|
| 28 |
-
- name: Run tests with coverage
|
| 29 |
-
run: |
|
| 30 |
-
pytest tests/unit --asyncio-mode=auto --cov=src/slidedeckai --cov-report=xml --cov-report=html
|
| 31 |
-
|
| 32 |
-
- name: Upload test results and coverage
|
| 33 |
-
uses: actions/upload-artifact@v4
|
| 34 |
-
if: always()
|
| 35 |
-
with:
|
| 36 |
-
name: pytest-results-py${{ matrix.python-version }}
|
| 37 |
-
path: |
|
| 38 |
-
htmlcov
|
| 39 |
-
coverage.xml
|
| 40 |
-
retention-days: 30
|
| 41 |
-
|
| 42 |
-
- name: Coverage Report
|
| 43 |
-
uses: codecov/codecov-action@v5
|
| 44 |
-
with:
|
| 45 |
-
# Provide the Codecov upload token from repo secrets
|
| 46 |
-
token: ${{ secrets.CODECOV_TOKEN }}
|
| 47 |
-
# Path to the coverage XML produced by pytest-cov
|
| 48 |
-
files: ./coverage.xml
|
| 49 |
-
# Fail the job if Codecov returns an error
|
| 50 |
-
fail_ci_if_error: true
|
| 51 |
-
verbose: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.github/workflows/publish-to-pypi.yml
DELETED
|
@@ -1,47 +0,0 @@
|
|
| 1 |
-
name: Publish to PyPI
|
| 2 |
-
|
| 3 |
-
on:
|
| 4 |
-
workflow_dispatch:
|
| 5 |
-
push:
|
| 6 |
-
tags:
|
| 7 |
-
- 'v*'
|
| 8 |
-
|
| 9 |
-
permissions:
|
| 10 |
-
contents: read # Default read permission for all jobs
|
| 11 |
-
id-token: write # Overridden for the pypi-publish job
|
| 12 |
-
|
| 13 |
-
jobs:
|
| 14 |
-
pypi-publish:
|
| 15 |
-
name: Upload release to PyPI
|
| 16 |
-
runs-on: ubuntu-latest
|
| 17 |
-
environment:
|
| 18 |
-
name: pypi
|
| 19 |
-
url: https://pypi.org/p/slidedeckai
|
| 20 |
-
permissions:
|
| 21 |
-
id-token: write # Enables OIDC authentication
|
| 22 |
-
|
| 23 |
-
steps:
|
| 24 |
-
- name: Checkout code
|
| 25 |
-
uses: actions/checkout@v4
|
| 26 |
-
with:
|
| 27 |
-
lfs: true # This ensures Git LFS files are downloaded
|
| 28 |
-
|
| 29 |
-
- name: Set up Python
|
| 30 |
-
uses: actions/setup-python@v5
|
| 31 |
-
with:
|
| 32 |
-
python-version: "3.10"
|
| 33 |
-
|
| 34 |
-
- name: Install build tools
|
| 35 |
-
run: |
|
| 36 |
-
python -m pip install --upgrade pip
|
| 37 |
-
pip install build
|
| 38 |
-
|
| 39 |
-
- name: Build package
|
| 40 |
-
run: |
|
| 41 |
-
rm -rf dist/ build/ *.egg-info
|
| 42 |
-
python -m build
|
| 43 |
-
|
| 44 |
-
- name: Publish package to PyPI
|
| 45 |
-
uses: pypa/gh-action-pypi-publish@release/v1
|
| 46 |
-
with:
|
| 47 |
-
packages-dir: dist
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.gitignore
CHANGED
|
@@ -144,5 +144,3 @@ dmypy.json
|
|
| 144 |
# Cython debug symbols
|
| 145 |
cython_debug/
|
| 146 |
|
| 147 |
-
.DS_Store
|
| 148 |
-
.idea/**/.DS_Store
|
|
|
|
| 144 |
# Cython debug symbols
|
| 145 |
cython_debug/
|
| 146 |
|
|
|
|
|
|
.idea/.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Default ignored files
|
| 2 |
+
/shelf/
|
| 3 |
+
/workspace.xml
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<profile version="1.0">
|
| 3 |
+
<option name="myName" value="Project Default" />
|
| 4 |
+
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
| 5 |
+
<option name="ignoredPackages">
|
| 6 |
+
<value>
|
| 7 |
+
<list size="1">
|
| 8 |
+
<item index="0" class="java.lang.String" itemvalue="numpy" />
|
| 9 |
+
</list>
|
| 10 |
+
</value>
|
| 11 |
+
</option>
|
| 12 |
+
</inspection_tool>
|
| 13 |
+
</profile>
|
| 14 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<settings>
|
| 3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
| 4 |
+
<version value="1.0" />
|
| 5 |
+
</settings>
|
| 6 |
+
</component>
|
.idea/misc.xml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="Black">
|
| 4 |
+
<option name="sdkName" value="Python 3.10 (slide-deck-ai)" />
|
| 5 |
+
</component>
|
| 6 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (slide-deck-ai)" project-jdk-type="Python SDK" />
|
| 7 |
+
</project>
|
.idea/modules.xml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectModuleManager">
|
| 4 |
+
<modules>
|
| 5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/slide-deck-ai.iml" filepath="$PROJECT_DIR$/.idea/slide-deck-ai.iml" />
|
| 6 |
+
</modules>
|
| 7 |
+
</component>
|
| 8 |
+
</project>
|
.idea/slide-deck-ai.iml
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<module type="PYTHON_MODULE" version="4">
|
| 3 |
+
<component name="NewModuleRootManager">
|
| 4 |
+
<content url="file://$MODULE_DIR$">
|
| 5 |
+
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
| 6 |
+
</content>
|
| 7 |
+
<orderEntry type="inheritedJdk" />
|
| 8 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
| 9 |
+
</component>
|
| 10 |
+
</module>
|
.idea/vcs.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="VcsDirectoryMappings">
|
| 4 |
+
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
| 5 |
+
</component>
|
| 6 |
+
</project>
|
.readthedocs.yaml
DELETED
|
@@ -1,17 +0,0 @@
|
|
| 1 |
-
# .readthedocs.yaml
|
| 2 |
-
version: 2
|
| 3 |
-
|
| 4 |
-
build:
|
| 5 |
-
os: ubuntu-22.04
|
| 6 |
-
tools:
|
| 7 |
-
python: "3.10"
|
| 8 |
-
|
| 9 |
-
sphinx:
|
| 10 |
-
configuration: docs/conf.py
|
| 11 |
-
|
| 12 |
-
python:
|
| 13 |
-
install:
|
| 14 |
-
- method: pip
|
| 15 |
-
# Install the main project code (required for autodoc)
|
| 16 |
-
path: .
|
| 17 |
-
- requirements: docs/requirements.txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.streamlit/config.toml
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
[server]
|
| 2 |
-
runOnSave = true
|
| 3 |
-
headless = false
|
| 4 |
-
maxUploadSize = 2
|
| 5 |
-
|
| 6 |
-
[browser]
|
| 7 |
-
gatherUsageStats = false
|
| 8 |
-
|
| 9 |
-
[theme]
|
| 10 |
-
base = "dark"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LITELLM_MIGRATION_SUMMARY.md
DELETED
|
@@ -1,145 +0,0 @@
|
|
| 1 |
-
# LiteLLM Integration Summary
|
| 2 |
-
|
| 3 |
-
## Overview
|
| 4 |
-
Successfully replaced LangChain with LiteLLM in the SlideDeck AI project, providing a uniform API to access all LLMs while reducing software dependencies and build times.
|
| 5 |
-
|
| 6 |
-
## Changes Made
|
| 7 |
-
|
| 8 |
-
### 1. Updated Dependencies (`requirements.txt`)
|
| 9 |
-
**Before:**
|
| 10 |
-
```txt
|
| 11 |
-
langchain~=0.3.27
|
| 12 |
-
langchain-core~=0.3.35
|
| 13 |
-
langchain-community~=0.3.27
|
| 14 |
-
langchain-google-genai==2.0.10
|
| 15 |
-
langchain-cohere~=0.4.4
|
| 16 |
-
langchain-together~=0.3.0
|
| 17 |
-
langchain-ollama~=0.3.6
|
| 18 |
-
langchain-openai~=0.3.28
|
| 19 |
-
```
|
| 20 |
-
|
| 21 |
-
**After:**
|
| 22 |
-
```txt
|
| 23 |
-
litellm>=1.55.0
|
| 24 |
-
google-generativeai # ~=0.8.3
|
| 25 |
-
```
|
| 26 |
-
|
| 27 |
-
### 2. Replaced LLM Helper (`helpers/llm_helper.py`)
|
| 28 |
-
- **Removed:** All LangChain-specific imports and implementations
|
| 29 |
-
- **Added:** LiteLLM-based implementation with:
|
| 30 |
-
- `stream_litellm_completion()`: Handles streaming responses from LiteLLM
|
| 31 |
-
- `get_litellm_llm()`: Creates LiteLLM-compatible wrapper objects
|
| 32 |
-
- `get_litellm_model_name()`: Converts provider/model to LiteLLM format
|
| 33 |
-
- `get_litellm_api_key()`: Manages API keys for different providers
|
| 34 |
-
- Backward compatibility alias: `get_langchain_llm = get_litellm_llm`
|
| 35 |
-
|
| 36 |
-
### 3. Replaced Chat Components (`app.py`)
|
| 37 |
-
**Removed LangChain imports:**
|
| 38 |
-
```python
|
| 39 |
-
from langchain_community.chat_message_histories import StreamlitChatMessageHistory
|
| 40 |
-
from langchain_core.messages import HumanMessage
|
| 41 |
-
from langchain_core.prompts import ChatPromptTemplate
|
| 42 |
-
```
|
| 43 |
-
|
| 44 |
-
**Added custom implementations:**
|
| 45 |
-
```python
|
| 46 |
-
class ChatMessage:
|
| 47 |
-
def __init__(self, content: str, role: str):
|
| 48 |
-
self.content = content
|
| 49 |
-
self.role = role
|
| 50 |
-
self.type = role # For compatibility
|
| 51 |
-
|
| 52 |
-
class HumanMessage(ChatMessage):
|
| 53 |
-
def __init__(self, content: str):
|
| 54 |
-
super().__init__(content, "user")
|
| 55 |
-
|
| 56 |
-
class AIMessage(ChatMessage):
|
| 57 |
-
def __init__(self, content: str):
|
| 58 |
-
super().__init__(content, "ai")
|
| 59 |
-
|
| 60 |
-
class StreamlitChatMessageHistory:
|
| 61 |
-
def __init__(self, key: str):
|
| 62 |
-
self.key = key
|
| 63 |
-
if key not in st.session_state:
|
| 64 |
-
st.session_state[key] = []
|
| 65 |
-
|
| 66 |
-
@property
|
| 67 |
-
def messages(self):
|
| 68 |
-
return st.session_state[self.key]
|
| 69 |
-
|
| 70 |
-
def add_user_message(self, content: str):
|
| 71 |
-
st.session_state[self.key].append(HumanMessage(content))
|
| 72 |
-
|
| 73 |
-
def add_ai_message(self, content: str):
|
| 74 |
-
st.session_state[self.key].append(AIMessage(content))
|
| 75 |
-
|
| 76 |
-
class ChatPromptTemplate:
|
| 77 |
-
def __init__(self, template: str):
|
| 78 |
-
self.template = template
|
| 79 |
-
|
| 80 |
-
@classmethod
|
| 81 |
-
def from_template(cls, template: str):
|
| 82 |
-
return cls(template)
|
| 83 |
-
|
| 84 |
-
def format(self, **kwargs):
|
| 85 |
-
return self.template.format(**kwargs)
|
| 86 |
-
```
|
| 87 |
-
|
| 88 |
-
### 4. Updated Function Calls
|
| 89 |
-
- Changed `llm_helper.get_langchain_llm()` to `llm_helper.get_litellm_llm()`
|
| 90 |
-
- Maintained backward compatibility with existing function names
|
| 91 |
-
|
| 92 |
-
## Supported Providers
|
| 93 |
-
|
| 94 |
-
The LiteLLM integration supports all the same providers as before:
|
| 95 |
-
|
| 96 |
-
- **Azure OpenAI** (`az`): `azure/{model}`
|
| 97 |
-
- **Cohere** (`co`): `cohere/{model}`
|
| 98 |
-
- **Google Gemini** (`gg`): `gemini/{model}`
|
| 99 |
-
- **Hugging Face** (`hf`): `huggingface/{model}` (commented out in config)
|
| 100 |
-
- **Ollama** (`ol`): `ollama/{model}` (offline models)
|
| 101 |
-
- **OpenRouter** (`or`): `openrouter/{model}`
|
| 102 |
-
- **Together AI** (`to`): `together_ai/{model}`
|
| 103 |
-
|
| 104 |
-
## Benefits Achieved
|
| 105 |
-
|
| 106 |
-
1. **Reduced Dependencies:** Eliminated 8 LangChain packages, replaced with single LiteLLM package
|
| 107 |
-
2. **Faster Build Times:** Fewer packages to install and resolve
|
| 108 |
-
3. **Uniform API:** Single interface for all LLM providers
|
| 109 |
-
4. **Maintained Compatibility:** All existing functionality preserved
|
| 110 |
-
5. **Offline Support:** Ollama integration continues to work for offline models
|
| 111 |
-
6. **Streaming Support:** Maintained streaming capabilities for real-time responses
|
| 112 |
-
|
| 113 |
-
## Testing Results
|
| 114 |
-
|
| 115 |
-
✅ **LiteLLM Import:** Successfully imported and initialized
|
| 116 |
-
✅ **LLM Helper:** Provider parsing and validation working correctly
|
| 117 |
-
✅ **Ollama Integration:** Compatible with offline Ollama models
|
| 118 |
-
✅ **Custom Chat Components:** Message history and prompt templates working
|
| 119 |
-
✅ **App Structure:** All required files present and functional
|
| 120 |
-
|
| 121 |
-
## Migration Notes
|
| 122 |
-
|
| 123 |
-
- **Backward Compatibility:** Existing function names maintained (`get_langchain_llm` still works)
|
| 124 |
-
- **No Breaking Changes:** All existing functionality preserved
|
| 125 |
-
- **Environment Variables:** Same API key environment variables used
|
| 126 |
-
- **Configuration:** No changes needed to `global_config.py`
|
| 127 |
-
|
| 128 |
-
## Next Steps
|
| 129 |
-
|
| 130 |
-
1. **Deploy:** The app is ready for deployment with LiteLLM
|
| 131 |
-
2. **Monitor:** Watch for any provider-specific issues in production
|
| 132 |
-
3. **Optimize:** Consider LiteLLM-specific optimizations (caching, retries, etc.)
|
| 133 |
-
4. **Document:** Update user documentation to reflect the simplified dependency structure
|
| 134 |
-
|
| 135 |
-
## Verification
|
| 136 |
-
|
| 137 |
-
The integration has been thoroughly tested and verified to work with:
|
| 138 |
-
- Multiple LLM providers (Google Gemini, Cohere, Together AI, etc.)
|
| 139 |
-
- Ollama for offline models
|
| 140 |
-
- Streaming responses
|
| 141 |
-
- Chat message history
|
| 142 |
-
- Prompt template formatting
|
| 143 |
-
- Error handling and validation
|
| 144 |
-
|
| 145 |
-
The SlideDeck AI application is now successfully running on LiteLLM with reduced dependencies and improved maintainability.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MANIFEST.in
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
include src/slidedeckai/strings.json
|
| 2 |
-
recursive-include src/slidedeckai/prompts *.txt
|
| 3 |
-
recursive-include src/slidedeckai/pptx_templates *.pptx
|
| 4 |
-
recursive-include src/slidedeckai/icons *.png
|
| 5 |
-
recursive-include src/slidedeckai/icons *.txt
|
| 6 |
-
recursive-include src/slidedeckai/file_embeddings *.npy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -4,199 +4,48 @@ emoji: 🏢
|
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: green
|
| 6 |
sdk: streamlit
|
| 7 |
-
sdk_version: 1.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
---
|
| 12 |
|
|
|
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
[](https://opensource.org/licenses/MIT)
|
| 18 |
-
[](https://huggingface.co/spaces/barunsaha/slide-deck-ai)
|
| 19 |
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
#
|
| 22 |
|
| 23 |
-
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
-
|
| 28 |
|
| 29 |
|
| 30 |
-
|
| 31 |
|
| 32 |
-
[
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
SlideDeck AI streamlines the creation process through the following steps:
|
| 38 |
-
|
| 39 |
-
1. **AI Content Generation:** Given a topic description, a Large Language Model (LLM) generates the *initial* slide content as structured JSON data based on a pre-defined schema.
|
| 40 |
-
2. **Visual Enhancement:** It uses keywords from the JSON output to search and download relevant images, which are added to the presentation with a certain probability.
|
| 41 |
-
3. **PPTX Assembly:** Subsequently, the powerful `python-pptx` library is used to generate the slides based on the structured JSON data. A user can choose from a set of pre-defined presentation templates.
|
| 42 |
-
4. **Refinement & Iteration:** At this stage onward, a user can provide additional instructions to *refine* the content (e.g., "add another slide," or "modify an existing slide"). A history of instructions is maintained for seamless iteration.
|
| 43 |
-
5. **Instant Download:** Every time SlideDeck AI generates a PowerPoint presentation, a download button is provided to instantly save the file.
|
| 44 |
-
|
| 45 |
-
In addition, SlideDeck AI can also create a presentation based on **PDF files**, transforming documents into decks!
|
| 46 |
-
|
| 47 |
-
## Python API Quickstart
|
| 48 |
-
|
| 49 |
-
<a target="_blank" href="https://colab.research.google.com/drive/1YA9EEmyiQFk03bOSc7lZnxK5l2hAL60l?usp=sharing">
|
| 50 |
-
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
|
| 51 |
-
</a>
|
| 52 |
-
|
| 53 |
-
```python
|
| 54 |
-
from slidedeckai.core import SlideDeckAI
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
slide_generator = SlideDeckAI(
|
| 58 |
-
model='[gg]gemini-2.5-flash-lite',
|
| 59 |
-
topic='Make a slide deck on AI',
|
| 60 |
-
api_key='your-google-api-key', # Or set via environment variable
|
| 61 |
-
)
|
| 62 |
-
pptx_path = slide_generator.generate()
|
| 63 |
-
print(f'🤖 Generated slide deck: {pptx_path}')
|
| 64 |
-
```
|
| 65 |
-
|
| 66 |
-
## CLI Usage
|
| 67 |
-
|
| 68 |
-
Generate a new slide deck:
|
| 69 |
-
```bash
|
| 70 |
-
slidedeckai generate --model '[gg]gemini-2.5-flash-lite' --topic 'Make a slide deck on AI' --api-key 'your-google-api-key'
|
| 71 |
-
```
|
| 72 |
-
|
| 73 |
-
Launch the Streamlit app:
|
| 74 |
-
```bash
|
| 75 |
-
slidedeckai launch
|
| 76 |
-
```
|
| 77 |
-
|
| 78 |
-
List supported models (these are the only models supported by SlideDeck AI):
|
| 79 |
-
```bash
|
| 80 |
-
slidedeckai --list-models
|
| 81 |
-
```
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
## Unmatched Flexibility: Choose Your AI Brain
|
| 85 |
-
|
| 86 |
-
SlideDeck AI stands out by supporting a wide array of LLMs from several online providers—Azure/ OpenAI, Google, SambaNova, Together AI, and OpenRouter. This gives you flexibility and control over your content generation style.
|
| 87 |
-
|
| 88 |
-
Most supported service providers also offer generous free usage tiers, meaning you can often start building without immediate billing concerns.
|
| 89 |
-
|
| 90 |
-
Model names in SlideDeck AI are specified in the `[code]model-name` format. It begins with a two-character prefix code in square brackets to indicate the provider, for example, `[oa]` for OpenAI, `[gg]` for Google Gemini, and so on. Following the code, the model name is specified, for example, `gemini-2.0-flash` or `gpt-4o`. So, to use Google Gemini 2.0 Flash Lite, the model name would be `[gg]gemini-2.0-flash-lite`.
|
| 91 |
-
|
| 92 |
-
Based on several experiments, SlideDeck AI generally recommends the use of Gemini Flash and GPT-4o to generate the best-quality slide decks.
|
| 93 |
-
|
| 94 |
-
The supported LLMs offer different styles of content generation. Use one of the following LLMs along with relevant API keys/access tokens, as appropriate, to create the content of the slide deck:
|
| 95 |
-
|
| 96 |
-
| LLM | Provider (code) | Requires API key | Characteristics |
|
| 97 |
-
|:------------------------------------|:-------------------------|:-------------------------------------------------------------------------------------------------------------------------|:-------------------------|
|
| 98 |
-
| Claude Haiku 4.5 | Anthropic (`an`) | Mandatory; [get here](https://platform.claude.com/settings/keys) | Faster, detailed |
|
| 99 |
-
| Gemini 2.0 Flash | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
|
| 100 |
-
| Gemini 2.0 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Fastest, longer content |
|
| 101 |
-
| Gemini 2.5 Flash | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
|
| 102 |
-
| Gemini 2.5 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Fastest, longer content |
|
| 103 |
-
| GPT-4.1-mini | OpenAI (`oa`) | Mandatory; [get here](https://platform.openai.com/settings/organization/api-keys) | Faster, medium content |
|
| 104 |
-
| GPT-4.1-nano | OpenAI (`oa`) | Mandatory; [get here](https://platform.openai.com/settings/organization/api-keys) | Faster, shorter content |
|
| 105 |
-
| GPT-5 | OpenAI (`oa`) | Mandatory; [get here](https://platform.openai.com/settings/organization/api-keys) | Slow, shorter content |
|
| 106 |
-
| GPT | Azure OpenAI (`az`) | Mandatory; [get here](https://ai.azure.com/resource/playground) NOTE: You need to have your subscription/billing set up | Faster, longer content |
|
| 107 |
-
| Command R+ | Cohere (`co`) | Mandatory; [get here](https://dashboard.cohere.com/api-keys) | Shorter, simpler content |
|
| 108 |
-
| Gemini-2.0-flash-001 | OpenRouter (`or`) | Mandatory; [get here](https://openrouter.ai/settings/keys) | Faster, longer content |
|
| 109 |
-
| GPT-3.5 Turbo | OpenRouter (`or`) | Mandatory; [get here](https://openrouter.ai/settings/keys) | Faster, longer content |
|
| 110 |
-
| DeepSeek-V3.1-Terminus | SambaNova (`sn`) | Mandatory; [get here](https://cloud.sambanova.ai/apis) | Fast, detailed content |
|
| 111 |
-
| Llama-3.3-Swallow-70B-Instruct-v0.4 | SambaNova (`sn`) | Mandatory; [get here](https://cloud.sambanova.ai/apis) | Fast, shorter |
|
| 112 |
-
| DeepSeek V3-0324 | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Slower, medium-length |
|
| 113 |
-
| Llama 3.3 70B Instruct Turbo | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Slower, detailed |
|
| 114 |
-
| Llama 3.1 8B Instruct Turbo 128K | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Faster, shorter |
|
| 115 |
-
|
| 116 |
-
> **🔒 IMPORTANT: Your Privacy and Security are Paramount**
|
| 117 |
-
>
|
| 118 |
-
> SlideDeck AI does **NOT** store your API keys/tokens or transmit them elsewhere. Your key is _only_ used to invoke the relevant LLM for content generation—and that's it! As a fully **Open-Source** project, we encourage you to audit the code yourself for complete peace of mind.
|
| 119 |
-
|
| 120 |
-
In addition, offline LLMs provided by Ollama can be used. Read below to know more.
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
## Icons
|
| 124 |
-
|
| 125 |
-
SlideDeck AI uses a subset of icons from [bootstrap-icons-1.11.3](https://github.com/twbs/icons) (MIT license) in the slides. A few icons from [SVG Repo](https://www.svgrepo.com/)
|
| 126 |
-
(CC0, MIT, and Apache licenses) are also used.
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
## Local Development
|
| 130 |
-
|
| 131 |
-
SlideDeck AI uses LLMs via different providers. To run this project by yourself, you need to use an appropriate API key, for example, in a `.env` file.
|
| 132 |
-
Alternatively, you can provide the access token in the app's user interface itself (UI).
|
| 133 |
-
|
| 134 |
-
### Ultimate Privacy: Offline Generation with Ollama
|
| 135 |
-
|
| 136 |
-
SlideDeck AI allows the use of **offline LLMs** to generate the contents of the slide decks. This is typically suitable for individuals or organizations who would like to use self-hosted LLMs for privacy concerns, for example.
|
| 137 |
-
|
| 138 |
-
Offline LLMs are made available via Ollama. Therefore, a pre-requisite here is to have [Ollama installed](https://ollama.com/download) on the system and the desired [LLM](https://ollama.com/search) pulled locally. You should choose a model to use based on your hardware capacity. However, if you have no GPU, [gemma3:1b](https://ollama.com/library/gemma3:1b) can be a suitable model to run only on CPU.
|
| 139 |
-
|
| 140 |
-
In addition, the `RUN_IN_OFFLINE_MODE` environment variable needs to be set to `True` to enable the offline mode. This, for example, can be done using a `.env` file or from the terminal. The typical steps to use SlideDeck AI in offline mode (in a `bash` shell) are as follows:
|
| 141 |
-
|
| 142 |
-
```bash
|
| 143 |
-
# Environment initialization, especially on Debian
|
| 144 |
-
sudo apt update -y
|
| 145 |
-
sudo apt install python-is-python3 -y
|
| 146 |
-
sudo apt install git -y
|
| 147 |
-
# Change the package name based on the Python version installed: python -V
|
| 148 |
-
sudo apt install python3.11-venv -y
|
| 149 |
-
|
| 150 |
-
# Install Git Large File Storage (LFS)
|
| 151 |
-
sudo apt install git-lfs -y
|
| 152 |
-
git lfs install
|
| 153 |
-
|
| 154 |
-
ollama list # View locally available LLMs
|
| 155 |
-
export RUN_IN_OFFLINE_MODE=True # Enable the offline mode to use Ollama
|
| 156 |
-
git clone [https://github.com/barun-saha/slide-deck-ai.git](https://github.com/barun-saha/slide-deck-ai.git)
|
| 157 |
-
cd slide-deck-ai
|
| 158 |
-
git lfs pull # Pull the PPTX template files - ESSENTIAL STEP!
|
| 159 |
-
|
| 160 |
-
python -m venv venv # Create a virtual environment
|
| 161 |
-
source venv/bin/activate # On a Linux system
|
| 162 |
-
pip install -r requirements.txt
|
| 163 |
-
|
| 164 |
-
streamlit run ./app.py # Run the application
|
| 165 |
-
```
|
| 166 |
-
|
| 167 |
-
> 💡If you have cloned the repository locally but cannot open and view the PPTX templates, you may need to run `git lfs pull` to download the template files. Without this, although content generation will work, the slide deck cannot be created.
|
| 168 |
-
|
| 169 |
-
The `.env` file should be created inside the `slide-deck-ai` directory.
|
| 170 |
-
|
| 171 |
-
The UI is similar to the online mode. However, rather than selecting an LLM from a list, one has to write the name of the Ollama model to be used in a textbox. There is no API key asked here.
|
| 172 |
-
|
| 173 |
-
The online and offline modes are mutually exclusive. So, setting `RUN_IN_OFFLINE_MODE` to `False` will make SlideDeck AI use the online LLMs (i.e., the "original mode."). By default, `RUN_IN_OFFLINE_MODE` is set to `False`.
|
| 174 |
-
|
| 175 |
-
Finally, the focus is on using offline LLMs, not going completely offline. So, Internet connectivity would still be required to fetch the images from Pexels.
|
| 176 |
|
| 177 |
|
| 178 |
# Live Demo
|
| 179 |
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
- 🚀 Live App: [Try SlideDeck AI on Hugging Face Spaces](https://huggingface.co/spaces/barunsaha/slide-deck-ai)
|
| 183 |
-
- 🎥 Quick Demo: [Watch the core chat interface in action (YouTube)](https://youtu.be/QvAKzNKtk9k)
|
| 184 |
-
- 🤝 Enterprise Showcase: [See a demonstration using Azure OpenAI (YouTube)](https://youtu.be/oPbH-z3q0Mw)
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
# 🏆 Recognized Excellence
|
| 188 |
-
|
| 189 |
-
SlideDeck AI has won the 3rd Place in the [Llama 2 Hackathon with Clarifai](https://lablab.ai/event/llama-2-hackathon-with-clarifai) in 2023.
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
# Contributors
|
| 193 |
|
| 194 |
-
SlideDeck AI is glad to have the following community contributions:
|
| 195 |
-
- [Aditya](https://github.com/AdiBak): added support for page range selection for PDF files and new chat button.
|
| 196 |
-
- [Sagar Bharatbhai Bharadia](https://github.com/sagarbharadia17): added support for Gemini 2.5 Flash Lite and Gemini 2.5 Flash LLMs.
|
| 197 |
-
- [Sairam Pillai](https://github.com/sairampillai): unified the project's LLM access by migrating the API calls to **LiteLLM**.
|
| 198 |
-
- [Srinivasan Ragothaman](https://github.com/rsrini7): added OpenRouter support and API keys mapping from the `.env` file.
|
| 199 |
|
| 200 |
-
|
| 201 |
|
| 202 |
-
[
|
|
|
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: green
|
| 6 |
sdk: streamlit
|
| 7 |
+
sdk_version: 1.26.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
---
|
| 12 |
|
| 13 |
+
# SlideDeck AI
|
| 14 |
|
| 15 |
+
We spend a lot of time on creating the slides and organizing our thoughts for any presentation.
|
| 16 |
+
With SlideDeck AI, co-create slide decks on any topic with Generative Artificial Intelligence.
|
| 17 |
+
Describe your topic and let SlideDeck AI generate a PowerPoint slide deck for you—it's as simple as that!
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
SlideDeck AI is powered by [Mistral 7B Instruct](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1).
|
| 20 |
+
Originally, it was built using the Llama 2 API provided by Clarifai.
|
| 21 |
|
| 22 |
+
# Process
|
| 23 |
|
| 24 |
+
SlideDeck AI works in the following way:
|
| 25 |
|
| 26 |
+
1. Given a topic description, it uses Mistral 7B Instruct to generate the outline/contents of the slides.
|
| 27 |
+
The output is generated as structured JSON data based on a pre-defined schema.
|
| 28 |
+
2. Subsequently, it uses the `python-pptx` library to generate the slides,
|
| 29 |
+
based on the JSON data from the previous step.
|
| 30 |
+
Here, a user can choose from a set of three pre-defined presentation templates.
|
| 31 |
+
3. In addition, it uses Metaphor to fetch Web pages related to the topic.
|
| 32 |
|
| 33 |
+
4. ~~Finally, it uses Stable Diffusion 2 to generate an image, based on the title and each slide heading.~~
|
| 34 |
|
| 35 |
|
| 36 |
+
# Local Development
|
| 37 |
|
| 38 |
+
SlideDeck AI uses [Mistral 7B Instruct](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
|
| 39 |
+
via the Hugging Face Inference API.
|
| 40 |
+
To run this project by yourself, you need to provide the `HUGGINGFACEHUB_API_TOKEN` and `METAPHOR_API_KEY` API keys,
|
| 41 |
+
for example, in a `.env` file. Visit the respective websites to obtain the keys.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
|
| 44 |
# Live Demo
|
| 45 |
|
| 46 |
+
[SlideDeck AI](https://huggingface.co/spaces/barunsaha/slide-deck-ai)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
# Award
|
| 50 |
|
| 51 |
+
SlideDeck AI has won the 3rd Place in the [Llama 2 Hackathon with Clarifai](https://lablab.ai/event/llama-2-hackathon-with-clarifai).
|
app.py
CHANGED
|
@@ -1,540 +1,319 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Streamlit app containing the UI and the application logic.
|
| 3 |
-
"""
|
| 4 |
-
import datetime
|
| 5 |
-
import logging
|
| 6 |
-
import os
|
| 7 |
import pathlib
|
| 8 |
-
import
|
| 9 |
-
import
|
|
|
|
| 10 |
|
| 11 |
-
import httpx
|
| 12 |
import json5
|
| 13 |
-
import
|
| 14 |
-
import requests
|
| 15 |
import streamlit as st
|
| 16 |
-
from dotenv import load_dotenv
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
from
|
| 21 |
-
from slidedeckai.global_config import GlobalConfig
|
| 22 |
-
from slidedeckai.helpers import llm_helper, text_helper
|
| 23 |
-
import slidedeckai.helpers.file_manager as filem
|
| 24 |
-
from slidedeckai.helpers.chat_helper import ChatMessage, HumanMessage, AIMessage
|
| 25 |
-
from slidedeckai.helpers import chat_helper
|
| 26 |
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
|
| 31 |
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
-
# Session variables
|
| 35 |
-
SLIDE_GENERATOR = 'slide_generator_instance'
|
| 36 |
-
CHAT_MESSAGES = 'chat_messages'
|
| 37 |
-
DOWNLOAD_FILE_KEY = 'download_file_name'
|
| 38 |
-
IS_IT_REFINEMENT = 'is_it_refinement'
|
| 39 |
-
ADDITIONAL_INFO = 'additional_info'
|
| 40 |
-
PDF_FILE_KEY = 'pdf_file'
|
| 41 |
-
API_INPUT_KEY = 'api_key_input'
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
| 45 |
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
|
| 50 |
-
def __init__(self, key: str):
|
| 51 |
-
"""Initialize the chat message history."""
|
| 52 |
-
self.key = key
|
| 53 |
-
if key not in st.session_state:
|
| 54 |
-
st.session_state[key] = []
|
| 55 |
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
st.session_state[self.key].append(HumanMessage(content))
|
| 64 |
|
| 65 |
-
|
| 66 |
-
"""Add an AI message to the history."""
|
| 67 |
-
st.session_state[self.key].append(AIMessage(content))
|
| 68 |
|
| 69 |
|
| 70 |
@st.cache_data
|
| 71 |
-
def
|
| 72 |
"""
|
| 73 |
-
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
"""
|
| 78 |
-
with open(GlobalConfig.APP_STRINGS_FILE, 'r', encoding='utf-8') as in_file:
|
| 79 |
-
return json5.loads(in_file.read())
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
-
Args:
|
| 88 |
-
is_refinement: Whether this is the initial or refinement prompt.
|
| 89 |
|
| 90 |
-
|
| 91 |
-
|
|
|
|
| 92 |
"""
|
| 93 |
-
if is_refinement:
|
| 94 |
-
with open(GlobalConfig.REFINEMENT_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
|
| 95 |
-
template = in_file.read()
|
| 96 |
-
else:
|
| 97 |
-
with open(GlobalConfig.INITIAL_PROMPT_TEMPLATE, 'r', encoding='utf-8') as in_file:
|
| 98 |
-
template = in_file.read()
|
| 99 |
|
| 100 |
-
|
| 101 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
Args:
|
| 116 |
-
user_prompt: The prompt.
|
| 117 |
-
provider: The LLM provider.
|
| 118 |
-
selected_model: Name of the model.
|
| 119 |
-
user_key: User-provided API key.
|
| 120 |
-
azure_deployment_url: Azure OpenAI deployment URL.
|
| 121 |
-
azure_endpoint_name: Azure OpenAI model endpoint.
|
| 122 |
-
azure_api_version: Azure OpenAI API version.
|
| 123 |
-
|
| 124 |
-
Returns:
|
| 125 |
-
`True` if all inputs "look" OK; `False` otherwise.
|
| 126 |
-
"""
|
| 127 |
-
if not text_helper.is_valid_prompt(user_prompt):
|
| 128 |
-
handle_error(
|
| 129 |
-
'Not enough information provided!'
|
| 130 |
-
' Please be a little more descriptive and type a few words'
|
| 131 |
-
' with a few characters :)',
|
| 132 |
-
False
|
| 133 |
-
)
|
| 134 |
-
return False
|
| 135 |
-
|
| 136 |
-
if not provider or not selected_model:
|
| 137 |
-
handle_error('No valid LLM provider and/or model name found!', False)
|
| 138 |
-
return False
|
| 139 |
-
|
| 140 |
-
if not llm_helper.is_valid_llm_provider_model(
|
| 141 |
-
provider, selected_model, user_key,
|
| 142 |
-
azure_endpoint_name, azure_deployment_url, azure_api_version
|
| 143 |
-
):
|
| 144 |
-
handle_error(
|
| 145 |
-
'The LLM settings do not look correct. Make sure that an API key/access token'
|
| 146 |
-
' is provided if the selected LLM requires it. An API key should be 6-200 characters'
|
| 147 |
-
' long, only containing alphanumeric characters, hyphens, and underscores.\n\n'
|
| 148 |
-
'If you are using Azure OpenAI, make sure that you have provided the additional and'
|
| 149 |
-
' correct configurations.',
|
| 150 |
-
False
|
| 151 |
)
|
| 152 |
-
return False
|
| 153 |
|
| 154 |
-
|
|
|
|
| 155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
Display an error message in the app.
|
| 160 |
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
"""
|
| 165 |
-
if should_log:
|
| 166 |
-
logger.error(error_msg)
|
| 167 |
|
| 168 |
-
|
|
|
|
|
|
|
|
|
|
| 169 |
|
|
|
|
|
|
|
| 170 |
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
"""
|
| 175 |
-
st.session_state.api_key_input = ''
|
| 176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
-
|
|
|
|
| 179 |
"""
|
| 180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
"""
|
| 182 |
-
# Clear session state variables using pop with None default
|
| 183 |
-
st.session_state.pop(SLIDE_GENERATOR, None)
|
| 184 |
-
st.session_state.pop(CHAT_MESSAGES, None)
|
| 185 |
-
st.session_state.pop(IS_IT_REFINEMENT, None)
|
| 186 |
-
st.session_state.pop(ADDITIONAL_INFO, None)
|
| 187 |
-
st.session_state.pop(PDF_FILE_KEY, None)
|
| 188 |
-
|
| 189 |
-
# Remove previously generated temp PPTX file
|
| 190 |
-
temp_pptx_path = st.session_state.pop(DOWNLOAD_FILE_KEY, None)
|
| 191 |
-
if temp_pptx_path:
|
| 192 |
-
pptx_path = pathlib.Path(temp_pptx_path)
|
| 193 |
-
if pptx_path.exists() and pptx_path.is_file():
|
| 194 |
-
pptx_path.unlink()
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
APP_TEXT = _load_strings()
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
# -----= UI display begins here =-----
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
with st.sidebar:
|
| 204 |
-
# New Chat button at the top of sidebar
|
| 205 |
-
col1, col2, col3 = st.columns([.17, 0.8, .1])
|
| 206 |
-
with col2:
|
| 207 |
-
if st.button('New Chat 💬', help='Start a new conversation', key='new_chat_button'):
|
| 208 |
-
reset_chat_history() # Reset the chat history when the button is clicked
|
| 209 |
-
|
| 210 |
-
# The PPT templates
|
| 211 |
-
pptx_template = st.sidebar.radio(
|
| 212 |
-
'1: Select a presentation template:',
|
| 213 |
-
TEXTS,
|
| 214 |
-
captions=CAPTIONS,
|
| 215 |
-
horizontal=True
|
| 216 |
-
)
|
| 217 |
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
label='2: Enter Ollama model name to use (e.g., gemma3:1b):',
|
| 221 |
-
help=(
|
| 222 |
-
'Specify a correct, locally available LLM, found by running `ollama list`, for'
|
| 223 |
-
' example, `gemma3:1b`, `mistral:v0.2`, and `mistral-nemo:latest`. Having an'
|
| 224 |
-
' Ollama-compatible and supported GPU is strongly recommended.'
|
| 225 |
-
)
|
| 226 |
-
)
|
| 227 |
-
# If a SlideDeckAI instance already exists in session state, update its model
|
| 228 |
-
# to reflect the user change rather than reusing the old model
|
| 229 |
-
# No API key required for local models
|
| 230 |
-
if SLIDE_GENERATOR in st.session_state and llm_provider_to_use:
|
| 231 |
-
try:
|
| 232 |
-
st.session_state[SLIDE_GENERATOR].set_model(llm_provider_to_use)
|
| 233 |
-
except Exception as e:
|
| 234 |
-
logger.error('Failed to update model on existing SlideDeckAI: %s', e)
|
| 235 |
-
# If updating fails, drop the stored instance so a new one is created
|
| 236 |
-
st.session_state.pop(SLIDE_GENERATOR, None)
|
| 237 |
-
|
| 238 |
-
api_key_token: str = ''
|
| 239 |
-
azure_endpoint: str = ''
|
| 240 |
-
azure_deployment: str = ''
|
| 241 |
-
api_version: str = ''
|
| 242 |
-
else:
|
| 243 |
-
# The online LLMs
|
| 244 |
-
llm_provider_to_use = st.sidebar.selectbox(
|
| 245 |
-
label='2: Select a suitable LLM to use:\n\n(Gemini and Mistral-Nemo are recommended)',
|
| 246 |
-
options=[f'{k} ({v["description"]})' for k, v in GlobalConfig.VALID_MODELS.items()],
|
| 247 |
-
index=GlobalConfig.DEFAULT_MODEL_INDEX,
|
| 248 |
-
help=GlobalConfig.LLM_PROVIDER_HELP,
|
| 249 |
-
on_change=reset_api_key
|
| 250 |
-
).split(' ')[0]
|
| 251 |
-
|
| 252 |
-
# --- Automatically fetch API key from .env if available ---
|
| 253 |
-
# Extract provider key using regex
|
| 254 |
-
provider_match = GlobalConfig.PROVIDER_REGEX.match(llm_provider_to_use)
|
| 255 |
-
if provider_match:
|
| 256 |
-
selected_provider = provider_match.group(1)
|
| 257 |
-
else:
|
| 258 |
-
# If regex doesn't match, try to extract provider from the beginning
|
| 259 |
-
selected_provider = (
|
| 260 |
-
llm_provider_to_use.split(' ')[0]
|
| 261 |
-
if ' ' in llm_provider_to_use else llm_provider_to_use
|
| 262 |
-
)
|
| 263 |
-
logger.warning(
|
| 264 |
-
'Provider regex did not match for: %s, using: %s',
|
| 265 |
-
llm_provider_to_use, selected_provider
|
| 266 |
-
)
|
| 267 |
-
|
| 268 |
-
# Validate that the selected provider is valid
|
| 269 |
-
if selected_provider not in GlobalConfig.VALID_PROVIDERS:
|
| 270 |
-
logger.error('Invalid provider: %s', selected_provider)
|
| 271 |
-
handle_error(f'Invalid provider selected: {selected_provider}', True)
|
| 272 |
-
st.stop()
|
| 273 |
-
|
| 274 |
-
env_key_name = GlobalConfig.PROVIDER_ENV_KEYS.get(selected_provider)
|
| 275 |
-
default_api_key = os.getenv(env_key_name, '') if env_key_name else ''
|
| 276 |
-
|
| 277 |
-
# Always sync session state to env value if needed (autofill on provider change)
|
| 278 |
-
if default_api_key and st.session_state.get(API_INPUT_KEY, None) != default_api_key:
|
| 279 |
-
st.session_state[API_INPUT_KEY] = default_api_key
|
| 280 |
-
|
| 281 |
-
api_key_token = st.text_input(
|
| 282 |
-
label=(
|
| 283 |
-
'3: Paste your API key/access token:\n\n'
|
| 284 |
-
'*Mandatory* for all providers.'
|
| 285 |
-
),
|
| 286 |
-
key=API_INPUT_KEY,
|
| 287 |
-
type='password',
|
| 288 |
-
disabled=bool(default_api_key),
|
| 289 |
-
)
|
| 290 |
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
'
|
| 305 |
-
'
|
|
|
|
| 306 |
)
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
'
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
)
|
| 314 |
-
api_version = st.text_input(
|
| 315 |
-
label=(
|
| 316 |
-
'6: API version:\n\n'
|
| 317 |
-
'*Mandatory* field. Change based on your deployment configurations.'
|
| 318 |
-
),
|
| 319 |
-
value='2024-05-01-preview',
|
| 320 |
-
)
|
| 321 |
-
|
| 322 |
-
# Make slider with initial values
|
| 323 |
-
page_range_slider = st.slider(
|
| 324 |
-
'Specify a page range for the uploaded PDF file (if any):',
|
| 325 |
-
1, GlobalConfig.MAX_ALLOWED_PAGES,
|
| 326 |
-
[1, GlobalConfig.MAX_ALLOWED_PAGES]
|
| 327 |
-
)
|
| 328 |
-
st.session_state['page_range_slider'] = page_range_slider
|
| 329 |
|
|
|
|
| 330 |
|
| 331 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
"""
|
| 333 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
"""
|
| 335 |
-
st.title(APP_TEXT['app_name'])
|
| 336 |
-
st.subheader(APP_TEXT['caption'])
|
| 337 |
-
st.markdown(
|
| 338 |
-
'' # noqa: E501
|
| 339 |
-
)
|
| 340 |
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
|
|
|
| 350 |
)
|
| 351 |
|
| 352 |
-
|
| 353 |
-
st.text(APP_TEXT['tos'] + '\n\n' + APP_TEXT['tos2'])
|
| 354 |
|
| 355 |
-
|
|
|
|
| 356 |
|
|
|
|
| 357 |
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
Prepare the chat interface and related functionality.
|
| 361 |
"""
|
| 362 |
-
|
| 363 |
-
st.session_state['start_page'] = st.session_state['page_range_slider'][0]
|
| 364 |
-
st.session_state['end_page'] = st.session_state['page_range_slider'][1]
|
| 365 |
|
| 366 |
-
|
| 367 |
-
|
|
|
|
|
|
|
|
|
|
| 368 |
|
| 369 |
-
|
| 370 |
-
|
| 371 |
|
| 372 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
for msg in history.messages:
|
| 377 |
-
st.chat_message(msg.type).code(msg.content, language='json')
|
| 378 |
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
)
|
|
|
|
| 386 |
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
if prompt['files']:
|
| 390 |
-
# Store uploaded pdf in session state
|
| 391 |
-
uploaded_pdf = prompt['files'][0]
|
| 392 |
-
st.session_state[PDF_FILE_KEY] = uploaded_pdf
|
| 393 |
-
# Apparently, Streamlit stores uploaded files in memory and clears on browser close
|
| 394 |
-
# https://docs.streamlit.io/knowledge-base/using-streamlit/where-file-uploader-store-when-deleted
|
| 395 |
-
|
| 396 |
-
# Check if pdf file is uploaded
|
| 397 |
-
# (we can use the same file if the user doesn't upload a new one)
|
| 398 |
-
if PDF_FILE_KEY in st.session_state:
|
| 399 |
-
# Get validated page range
|
| 400 |
-
(
|
| 401 |
-
st.session_state['start_page'],
|
| 402 |
-
st.session_state['end_page']
|
| 403 |
-
) = filem.validate_page_range(
|
| 404 |
-
st.session_state[PDF_FILE_KEY],
|
| 405 |
-
st.session_state['start_page'],
|
| 406 |
-
st.session_state['end_page']
|
| 407 |
-
)
|
| 408 |
-
# Show sidebar text for page selection and file name
|
| 409 |
-
with st.sidebar:
|
| 410 |
-
if st.session_state['end_page'] is None: # If the PDF has only one page
|
| 411 |
-
st.text(
|
| 412 |
-
f'Extracting page {st.session_state["start_page"]} in'
|
| 413 |
-
f' {st.session_state["pdf_file"].name}'
|
| 414 |
-
)
|
| 415 |
-
else:
|
| 416 |
-
st.text(
|
| 417 |
-
f'Extracting pages {st.session_state["start_page"]} to'
|
| 418 |
-
f' {st.session_state["end_page"]} in {st.session_state["pdf_file"].name}'
|
| 419 |
-
)
|
| 420 |
-
|
| 421 |
-
st.chat_message('user').write(prompt_text)
|
| 422 |
-
|
| 423 |
-
if SLIDE_GENERATOR in st.session_state:
|
| 424 |
-
slide_generator = st.session_state[SLIDE_GENERATOR]
|
| 425 |
-
else:
|
| 426 |
-
slide_generator = SlideDeckAI(
|
| 427 |
-
model=llm_provider_to_use,
|
| 428 |
-
topic=prompt_text,
|
| 429 |
-
api_key=api_key_token.strip(),
|
| 430 |
-
template_idx=list(GlobalConfig.PPTX_TEMPLATE_FILES.keys()).index(pptx_template),
|
| 431 |
-
pdf_path_or_stream=st.session_state.get(PDF_FILE_KEY),
|
| 432 |
-
pdf_page_range=(
|
| 433 |
-
st.session_state.get('start_page'), st.session_state.get('end_page')
|
| 434 |
-
),
|
| 435 |
-
)
|
| 436 |
-
st.session_state[SLIDE_GENERATOR] = slide_generator
|
| 437 |
-
|
| 438 |
-
progress_bar = st.progress(0, 'Preparing to call LLM...')
|
| 439 |
-
|
| 440 |
-
def progress_callback(current_progress):
|
| 441 |
-
progress_bar.progress(
|
| 442 |
-
min(current_progress / gcfg.get_max_output_tokens(llm_provider_to_use), 0.95),
|
| 443 |
-
text='Streaming content...this might take a while...'
|
| 444 |
-
)
|
| 445 |
|
| 446 |
-
|
| 447 |
-
if _is_it_refinement():
|
| 448 |
-
path = slide_generator.revise(
|
| 449 |
-
instructions=prompt_text, progress_callback=progress_callback
|
| 450 |
-
)
|
| 451 |
-
else:
|
| 452 |
-
path = slide_generator.generate(progress_callback=progress_callback)
|
| 453 |
|
| 454 |
-
progress_bar.progress(1.0, text='Done!')
|
| 455 |
-
|
| 456 |
-
if path:
|
| 457 |
-
st.session_state[DOWNLOAD_FILE_KEY] = str(path)
|
| 458 |
-
history.add_user_message(prompt_text)
|
| 459 |
-
history.add_ai_message(slide_generator.last_response)
|
| 460 |
-
st.chat_message('ai').code(slide_generator.last_response, language='json')
|
| 461 |
-
_display_download_button(path)
|
| 462 |
-
else:
|
| 463 |
-
handle_error('Failed to generate slide deck.', True)
|
| 464 |
-
|
| 465 |
-
except (httpx.ConnectError, requests.exceptions.ConnectionError):
|
| 466 |
-
handle_error(
|
| 467 |
-
'A connection error occurred while streaming content from the LLM endpoint.'
|
| 468 |
-
' Unfortunately, the slide deck cannot be generated. Please try again later.'
|
| 469 |
-
' Alternatively, try selecting a different LLM from the dropdown list. If you are'
|
| 470 |
-
' using Ollama, make sure that Ollama is already running on your system.',
|
| 471 |
-
True
|
| 472 |
-
)
|
| 473 |
-
except ollama.ResponseError:
|
| 474 |
-
handle_error(
|
| 475 |
-
'The model is unavailable with Ollama on your system.'
|
| 476 |
-
' Make sure that you have provided the correct LLM name or pull it.'
|
| 477 |
-
' View LLMs available locally by running `ollama list`.',
|
| 478 |
-
True
|
| 479 |
-
)
|
| 480 |
-
except Exception as ex:
|
| 481 |
-
if 'litellm.AuthenticationError' in str(ex):
|
| 482 |
-
handle_error(
|
| 483 |
-
f'LLM API authentication failed: {ex}'
|
| 484 |
-
'\nMake sure that you have provided a valid, correct API key.'
|
| 485 |
-
' Read **[how to get free LLM API keys](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#unmatched-flexibility-choose-your-ai-brain)**.',
|
| 486 |
-
True
|
| 487 |
-
)
|
| 488 |
-
else:
|
| 489 |
-
handle_error('An unexpected error occurred: ' + str(ex), True)
|
| 490 |
|
| 491 |
-
|
| 492 |
-
def _is_it_refinement() -> bool:
|
| 493 |
"""
|
| 494 |
-
|
| 495 |
|
| 496 |
-
|
| 497 |
-
True if it is the initial prompt; False otherwise.
|
| 498 |
"""
|
| 499 |
-
if IS_IT_REFINEMENT in st.session_state:
|
| 500 |
-
return True
|
| 501 |
|
| 502 |
-
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
|
|
|
| 506 |
|
| 507 |
-
|
|
|
|
| 508 |
|
|
|
|
|
|
|
| 509 |
|
| 510 |
-
|
| 511 |
-
"""
|
| 512 |
-
Get a list of user messages submitted until now from the session state.
|
| 513 |
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
|
| 519 |
-
|
| 520 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 521 |
|
| 522 |
|
| 523 |
-
def
|
| 524 |
"""
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
Args:
|
| 528 |
-
file_path: The path of the .pptx file.
|
| 529 |
"""
|
| 530 |
-
|
| 531 |
-
|
| 532 |
-
'Download PPTX file ⬇️',
|
| 533 |
-
data=download_file,
|
| 534 |
-
file_name='Presentation.pptx',
|
| 535 |
-
key=datetime.datetime.now()
|
| 536 |
-
)
|
| 537 |
|
| 538 |
|
| 539 |
if __name__ == '__main__':
|
| 540 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import pathlib
|
| 2 |
+
import logging
|
| 3 |
+
import tempfile
|
| 4 |
+
from typing import List, Tuple
|
| 5 |
|
|
|
|
| 6 |
import json5
|
| 7 |
+
import metaphor_python as metaphor
|
|
|
|
| 8 |
import streamlit as st
|
|
|
|
| 9 |
|
| 10 |
+
import llm_helper
|
| 11 |
+
import pptx_helper
|
| 12 |
+
from global_config import GlobalConfig
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
|
| 15 |
+
APP_TEXT = json5.loads(open(GlobalConfig.APP_STRINGS_FILE, 'r', encoding='utf-8').read())
|
| 16 |
+
GB_CONVERTER = 2 ** 30
|
| 17 |
|
| 18 |
|
| 19 |
+
logging.basicConfig(
|
| 20 |
+
level=GlobalConfig.LOG_LEVEL,
|
| 21 |
+
format='%(asctime)s - %(message)s',
|
| 22 |
+
)
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
+
@st.cache_data
|
| 26 |
+
def get_contents_wrapper(text: str) -> str:
|
| 27 |
+
"""
|
| 28 |
+
Fetch and cache the slide deck contents on a topic by calling an external API.
|
| 29 |
|
| 30 |
+
:param text: The presentation topic
|
| 31 |
+
:return: The slide deck contents or outline in JSON format
|
| 32 |
+
"""
|
| 33 |
|
| 34 |
+
logging.info('LLM call because of cache miss...')
|
| 35 |
+
return llm_helper.generate_slides_content(text).strip()
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
+
@st.cache_resource
|
| 39 |
+
def get_metaphor_client_wrapper() -> metaphor.Metaphor:
|
| 40 |
+
"""
|
| 41 |
+
Create a Metaphor client for semantic Web search.
|
| 42 |
|
| 43 |
+
:return: Metaphor instance
|
| 44 |
+
"""
|
|
|
|
| 45 |
|
| 46 |
+
return metaphor.Metaphor(api_key=GlobalConfig.METAPHOR_API_KEY)
|
|
|
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
@st.cache_data
|
| 50 |
+
def get_web_search_results_wrapper(text: str) -> List[Tuple[str, str]]:
|
| 51 |
"""
|
| 52 |
+
Fetch and cache the Web search results on a given topic.
|
| 53 |
|
| 54 |
+
:param text: The topic
|
| 55 |
+
:return: A list of (title, link) tuples
|
| 56 |
"""
|
|
|
|
|
|
|
| 57 |
|
| 58 |
+
results = []
|
| 59 |
+
search_results = get_metaphor_client_wrapper().search(
|
| 60 |
+
text,
|
| 61 |
+
use_autoprompt=True,
|
| 62 |
+
num_results=5
|
| 63 |
+
)
|
| 64 |
|
| 65 |
+
for a_result in search_results.results:
|
| 66 |
+
results.append((a_result.title, a_result.url))
|
| 67 |
+
|
| 68 |
+
return results
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
# def get_disk_used_percentage() -> float:
|
| 72 |
+
# """
|
| 73 |
+
# Compute the disk usage.
|
| 74 |
+
#
|
| 75 |
+
# :return: Percentage of the disk space currently used
|
| 76 |
+
# """
|
| 77 |
+
#
|
| 78 |
+
# total, used, free = shutil.disk_usage(__file__)
|
| 79 |
+
# total = total // GB_CONVERTER
|
| 80 |
+
# used = used // GB_CONVERTER
|
| 81 |
+
# free = free // GB_CONVERTER
|
| 82 |
+
# used_perc = 100.0 * used / total
|
| 83 |
+
#
|
| 84 |
+
# logging.debug(f'Total: {total} GB\n'
|
| 85 |
+
# f'Used: {used} GB\n'
|
| 86 |
+
# f'Free: {free} GB')
|
| 87 |
+
#
|
| 88 |
+
# logging.debug('\n'.join(os.listdir()))
|
| 89 |
+
#
|
| 90 |
+
# return used_perc
|
| 91 |
|
|
|
|
|
|
|
| 92 |
|
| 93 |
+
def build_ui():
|
| 94 |
+
"""
|
| 95 |
+
Display the input elements for content generation. Only covers the first step.
|
| 96 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
+
# get_disk_used_percentage()
|
| 99 |
|
| 100 |
+
st.title(APP_TEXT['app_name'])
|
| 101 |
+
st.subheader(APP_TEXT['caption'])
|
| 102 |
+
st.markdown(
|
| 103 |
+
'Powered by'
|
| 104 |
+
' [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2).'
|
| 105 |
+
)
|
| 106 |
+
st.markdown(
|
| 107 |
+
'*If the JSON is generated or parsed incorrectly, try again later by making minor changes'
|
| 108 |
+
' to the input text.*'
|
| 109 |
+
)
|
| 110 |
|
| 111 |
+
with st.form('my_form'):
|
| 112 |
+
# Topic input
|
| 113 |
+
try:
|
| 114 |
+
with open(GlobalConfig.PRELOAD_DATA_FILE, 'r', encoding='utf-8') as in_file:
|
| 115 |
+
preload_data = json5.loads(in_file.read())
|
| 116 |
+
except (FileExistsError, FileNotFoundError):
|
| 117 |
+
preload_data = {'topic': '', 'audience': ''}
|
| 118 |
+
|
| 119 |
+
topic = st.text_area(
|
| 120 |
+
APP_TEXT['input_labels'][0],
|
| 121 |
+
value=preload_data['topic']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
)
|
|
|
|
| 123 |
|
| 124 |
+
texts = list(GlobalConfig.PPTX_TEMPLATE_FILES.keys())
|
| 125 |
+
captions = [GlobalConfig.PPTX_TEMPLATE_FILES[x]['caption'] for x in texts]
|
| 126 |
|
| 127 |
+
pptx_template = st.radio(
|
| 128 |
+
'Select a presentation template:',
|
| 129 |
+
texts,
|
| 130 |
+
captions=captions,
|
| 131 |
+
horizontal=True
|
| 132 |
+
)
|
| 133 |
|
| 134 |
+
st.divider()
|
| 135 |
+
submit = st.form_submit_button('Generate slide deck')
|
|
|
|
| 136 |
|
| 137 |
+
if submit:
|
| 138 |
+
# st.write(f'Clicked {time.time()}')
|
| 139 |
+
st.session_state.submitted = True
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
+
# https://github.com/streamlit/streamlit/issues/3832#issuecomment-1138994421
|
| 142 |
+
if 'submitted' in st.session_state:
|
| 143 |
+
progress_text = 'Generating the slides...give it a moment'
|
| 144 |
+
progress_bar = st.progress(0, text=progress_text)
|
| 145 |
|
| 146 |
+
topic_txt = topic.strip()
|
| 147 |
+
generate_presentation(topic_txt, pptx_template, progress_bar)
|
| 148 |
|
| 149 |
+
st.divider()
|
| 150 |
+
st.text(APP_TEXT['tos'])
|
| 151 |
+
st.text(APP_TEXT['tos2'])
|
|
|
|
|
|
|
| 152 |
|
| 153 |
+
st.markdown(
|
| 154 |
+
'![Visitors]'
|
| 155 |
+
'(https://api.visitorbadge.io/api/visitors?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fbarunsaha%2Fslide-deck-ai&countColor=%23263759)'
|
| 156 |
+
)
|
| 157 |
|
| 158 |
+
|
| 159 |
+
def generate_presentation(topic: str, pptx_template: str, progress_bar):
|
| 160 |
"""
|
| 161 |
+
Process the inputs to generate the slides.
|
| 162 |
+
|
| 163 |
+
:param topic: The presentation topic based on which contents are to be generated
|
| 164 |
+
:param pptx_template: The PowerPoint template name to be used
|
| 165 |
+
:param progress_bar: Progress bar from the page
|
| 166 |
+
:return:
|
| 167 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
|
| 169 |
+
topic_length = len(topic)
|
| 170 |
+
logging.debug('Input length:: topic: %s', topic_length)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
|
| 172 |
+
if topic_length >= 10:
|
| 173 |
+
logging.debug('Topic: %s', topic)
|
| 174 |
+
target_length = min(topic_length, GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH)
|
| 175 |
+
|
| 176 |
+
try:
|
| 177 |
+
# Step 1: Generate the contents in JSON format using an LLM
|
| 178 |
+
json_str = process_slides_contents(topic[:target_length], progress_bar)
|
| 179 |
+
logging.debug('Truncated topic: %s', topic[:target_length])
|
| 180 |
+
logging.debug('Length of JSON: %d', len(json_str))
|
| 181 |
+
|
| 182 |
+
# Step 2: Generate the slide deck based on the template specified
|
| 183 |
+
if len(json_str) > 0:
|
| 184 |
+
st.info(
|
| 185 |
+
'Tip: The generated content doesn\'t look so great?'
|
| 186 |
+
' Need alternatives? Just change your description text and try again.',
|
| 187 |
+
icon="💡️"
|
| 188 |
)
|
| 189 |
+
else:
|
| 190 |
+
st.error(
|
| 191 |
+
'Unfortunately, JSON generation failed, so the next steps would lead'
|
| 192 |
+
' to nowhere. Try again or come back later.'
|
| 193 |
+
)
|
| 194 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
|
| 196 |
+
all_headers = generate_slide_deck(json_str, pptx_template, progress_bar)
|
| 197 |
|
| 198 |
+
# Step 3: Bonus stuff: Web references and AI art
|
| 199 |
+
show_bonus_stuff(all_headers)
|
| 200 |
+
|
| 201 |
+
except ValueError as ve:
|
| 202 |
+
st.error(f'Unfortunately, an error occurred: {ve}! '
|
| 203 |
+
f'Please change the text, try again later, or report it, sharing your inputs.')
|
| 204 |
+
|
| 205 |
+
else:
|
| 206 |
+
st.error('Not enough information provided! Please be little more descriptive :)')
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def process_slides_contents(text: str, progress_bar: st.progress) -> str:
|
| 210 |
"""
|
| 211 |
+
Convert given text into structured data and display. Update the UI.
|
| 212 |
+
|
| 213 |
+
:param text: The topic description for the presentation
|
| 214 |
+
:param progress_bar: Progress bar for this step
|
| 215 |
+
:return: The contents as a JSON-formatted string
|
| 216 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
+
json_str = ''
|
| 219 |
+
|
| 220 |
+
try:
|
| 221 |
+
logging.info('Calling LLM for content generation on the topic: %s', text)
|
| 222 |
+
json_str = get_contents_wrapper(text)
|
| 223 |
+
except Exception as ex:
|
| 224 |
+
st.error(
|
| 225 |
+
f'An exception occurred while trying to convert to JSON. It could be because of heavy'
|
| 226 |
+
f' traffic or something else. Try doing it again or try again later.'
|
| 227 |
+
f'\nError message: {ex}'
|
| 228 |
)
|
| 229 |
|
| 230 |
+
progress_bar.progress(50, text='Contents generated')
|
|
|
|
| 231 |
|
| 232 |
+
with st.expander('The generated contents (in JSON format)'):
|
| 233 |
+
st.code(json_str, language='json')
|
| 234 |
|
| 235 |
+
return json_str
|
| 236 |
|
| 237 |
+
|
| 238 |
+
def generate_slide_deck(json_str: str, pptx_template: str, progress_bar) -> List:
|
|
|
|
| 239 |
"""
|
| 240 |
+
Create a slide deck.
|
|
|
|
|
|
|
| 241 |
|
| 242 |
+
:param json_str: The contents in JSON format
|
| 243 |
+
:param pptx_template: The PPTX template name
|
| 244 |
+
:param progress_bar: Progress bar
|
| 245 |
+
:return: A list of all slide headers and the title
|
| 246 |
+
"""
|
| 247 |
|
| 248 |
+
progress_text = 'Creating the slide deck...give it a moment'
|
| 249 |
+
progress_bar.progress(75, text=progress_text)
|
| 250 |
|
| 251 |
+
# # Get a unique name for the file to save -- use the session ID
|
| 252 |
+
# ctx = st_sr.get_script_run_ctx()
|
| 253 |
+
# session_id = ctx.session_id
|
| 254 |
+
# timestamp = time.time()
|
| 255 |
+
# output_file_name = f'{session_id}_{timestamp}.pptx'
|
| 256 |
|
| 257 |
+
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.pptx')
|
| 258 |
+
path = pathlib.Path(temp.name)
|
|
|
|
|
|
|
| 259 |
|
| 260 |
+
logging.info('Creating PPTX file...')
|
| 261 |
+
all_headers = pptx_helper.generate_powerpoint_presentation(
|
| 262 |
+
json_str,
|
| 263 |
+
as_yaml=False,
|
| 264 |
+
slides_template=pptx_template,
|
| 265 |
+
output_file_path=path
|
| 266 |
)
|
| 267 |
+
progress_bar.progress(100, text='Done!')
|
| 268 |
|
| 269 |
+
with open(path, 'rb') as f:
|
| 270 |
+
st.download_button('Download PPTX file', f, file_name='Presentation.pptx')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 271 |
|
| 272 |
+
return all_headers
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 274 |
|
| 275 |
+
def show_bonus_stuff(ppt_headers: List[str]):
|
|
|
|
| 276 |
"""
|
| 277 |
+
Show bonus stuff for the presentation.
|
| 278 |
|
| 279 |
+
:param ppt_headers: A list of the slide headings.
|
|
|
|
| 280 |
"""
|
|
|
|
|
|
|
| 281 |
|
| 282 |
+
# Use the presentation title and the slide headers to find relevant info online
|
| 283 |
+
logging.info('Calling Metaphor search...')
|
| 284 |
+
ppt_text = ' '.join(ppt_headers)
|
| 285 |
+
search_results = get_web_search_results_wrapper(ppt_text)
|
| 286 |
+
md_text_items = []
|
| 287 |
|
| 288 |
+
for (title, link) in search_results:
|
| 289 |
+
md_text_items.append(f'[{title}]({link})')
|
| 290 |
|
| 291 |
+
with st.expander('Related Web references'):
|
| 292 |
+
st.markdown('\n\n'.join(md_text_items))
|
| 293 |
|
| 294 |
+
logging.info('Done!')
|
|
|
|
|
|
|
| 295 |
|
| 296 |
+
# # Avoid image generation. It costs time and an API call, so just limit to the text generation.
|
| 297 |
+
# with st.expander('AI-generated image on the presentation topic'):
|
| 298 |
+
# logging.info('Calling SDXL for image generation...')
|
| 299 |
+
# # img_empty.write('')
|
| 300 |
+
# # img_text.write(APP_TEXT['image_info'])
|
| 301 |
+
# image = get_ai_image_wrapper(ppt_text)
|
| 302 |
+
#
|
| 303 |
+
# if len(image) > 0:
|
| 304 |
+
# image = base64.b64decode(image)
|
| 305 |
+
# st.image(image, caption=ppt_text)
|
| 306 |
+
# st.info('Tip: Right-click on the image to save it.', icon="💡️")
|
| 307 |
+
# logging.info('Image added')
|
| 308 |
|
| 309 |
|
| 310 |
+
def main():
|
| 311 |
"""
|
| 312 |
+
Trigger application run.
|
|
|
|
|
|
|
|
|
|
| 313 |
"""
|
| 314 |
+
|
| 315 |
+
build_ui()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 316 |
|
| 317 |
|
| 318 |
if __name__ == '__main__':
|
| 319 |
+
main()
|
clarifai_grpc_helper.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
|
| 2 |
+
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
|
| 3 |
+
from clarifai_grpc.grpc.api.status import status_code_pb2
|
| 4 |
+
|
| 5 |
+
from global_config import GlobalConfig
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
CHANNEL = ClarifaiChannel.get_grpc_channel()
|
| 9 |
+
STUB = service_pb2_grpc.V2Stub(CHANNEL)
|
| 10 |
+
|
| 11 |
+
METADATA = (
|
| 12 |
+
('authorization', 'Key ' + GlobalConfig.CLARIFAI_PAT),
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
USER_DATA_OBJECT = resources_pb2.UserAppIDSet(
|
| 16 |
+
user_id=GlobalConfig.CLARIFAI_USER_ID,
|
| 17 |
+
app_id=GlobalConfig.CLARIFAI_APP_ID
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
RAW_TEXT = '''You are a helpful, intelligent chatbot. Create the slides for a presentation on the given topic. Include main headings for each slide, detailed bullet points for each slide. Add relevant content to each slide. Do not output any blank line.
|
| 21 |
+
|
| 22 |
+
Topic:
|
| 23 |
+
Talk about AI, covering what it is and how it works. Add its pros, cons, and future prospects. Also, cover its job prospects.
|
| 24 |
+
'''
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_text_from_llm(prompt: str) -> str:
|
| 28 |
+
post_model_outputs_response = STUB.PostModelOutputs(
|
| 29 |
+
service_pb2.PostModelOutputsRequest(
|
| 30 |
+
user_app_id=USER_DATA_OBJECT, # The userDataObject is created in the overview and is required when using a PAT
|
| 31 |
+
model_id=GlobalConfig.CLARIFAI_MODEL_ID,
|
| 32 |
+
# version_id=MODEL_VERSION_ID, # This is optional. Defaults to the latest model version
|
| 33 |
+
inputs=[
|
| 34 |
+
resources_pb2.Input(
|
| 35 |
+
data=resources_pb2.Data(
|
| 36 |
+
text=resources_pb2.Text(
|
| 37 |
+
raw=prompt
|
| 38 |
+
)
|
| 39 |
+
)
|
| 40 |
+
)
|
| 41 |
+
]
|
| 42 |
+
),
|
| 43 |
+
metadata=METADATA
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
|
| 47 |
+
print(post_model_outputs_response.status)
|
| 48 |
+
raise Exception(f"Post model outputs failed, status: {post_model_outputs_response.status.description}")
|
| 49 |
+
|
| 50 |
+
# Since we have one input, one output will exist here
|
| 51 |
+
output = post_model_outputs_response.outputs[0]
|
| 52 |
+
|
| 53 |
+
# print("Completion:\n")
|
| 54 |
+
# print(output.data.text.raw)
|
| 55 |
+
|
| 56 |
+
return output.data.text.raw
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
if __name__ == '__main__':
|
| 60 |
+
topic = ('Talk about AI, covering what it is and how it works.'
|
| 61 |
+
' Add its pros, cons, and future prospects.'
|
| 62 |
+
' Also, cover its job prospects.'
|
| 63 |
+
)
|
| 64 |
+
print(topic)
|
| 65 |
+
|
| 66 |
+
with open(GlobalConfig.SLIDES_TEMPLATE_FILE, 'r') as in_file:
|
| 67 |
+
prompt_txt = in_file.read()
|
| 68 |
+
prompt_txt = prompt_txt.replace('{topic}', topic)
|
| 69 |
+
response_txt = get_text_from_llm(prompt_txt)
|
| 70 |
+
|
| 71 |
+
print('Output:\n', response_txt)
|
docs/_templates/module.rst
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
{{ fullname | escape | underline }}
|
| 2 |
-
===================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: {{ module }}
|
| 5 |
-
|
| 6 |
-
.. automodule:: {{ fullname }}
|
| 7 |
-
:noindex:
|
| 8 |
-
|
| 9 |
-
.. autosummary::
|
| 10 |
-
:toctree:
|
| 11 |
-
:nosignatures:
|
| 12 |
-
|
| 13 |
-
{% for item in functions %}
|
| 14 |
-
{{ item }}
|
| 15 |
-
{% endfor %}
|
| 16 |
-
|
| 17 |
-
{% for item in classes %}
|
| 18 |
-
{{ item }}
|
| 19 |
-
{% endfor %}
|
| 20 |
-
|
| 21 |
-
.. automodule:: {{ fullname }}
|
| 22 |
-
:members:
|
| 23 |
-
:undoc-members:
|
| 24 |
-
:show-inheritance:
|
| 25 |
-
:member-order: alphabetical
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/api.rst
DELETED
|
@@ -1,18 +0,0 @@
|
|
| 1 |
-
API Reference
|
| 2 |
-
=============
|
| 3 |
-
|
| 4 |
-
.. autosummary::
|
| 5 |
-
:toctree: generated/
|
| 6 |
-
:template: module.rst
|
| 7 |
-
:nosignatures:
|
| 8 |
-
:caption: Core Modules and Classes
|
| 9 |
-
|
| 10 |
-
slidedeckai.cli
|
| 11 |
-
slidedeckai.core
|
| 12 |
-
slidedeckai.helpers.chat_helper
|
| 13 |
-
slidedeckai.helpers.file_manager
|
| 14 |
-
slidedeckai.helpers.icons_embeddings
|
| 15 |
-
slidedeckai.helpers.image_search
|
| 16 |
-
slidedeckai.helpers.llm_helper
|
| 17 |
-
slidedeckai.helpers.pptx_helper
|
| 18 |
-
slidedeckai.helpers.text_helper
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/conf.py
DELETED
|
@@ -1,50 +0,0 @@
|
|
| 1 |
-
"""
|
| 2 |
-
Sphinx configuration file for the SlideDeck AI documentation.
|
| 3 |
-
This file sets up Sphinx to generate documentation from the source code
|
| 4 |
-
located in the 'src' directory, and includes support for Markdown files
|
| 5 |
-
using the MyST parser.
|
| 6 |
-
"""
|
| 7 |
-
import os
|
| 8 |
-
import sys
|
| 9 |
-
|
| 10 |
-
# --- Path setup ---
|
| 11 |
-
# Crucial: This tells Sphinx to look in 'src' to find the 'slidedeckai' package.
|
| 12 |
-
sys.path.insert(0, os.path.abspath('../src'))
|
| 13 |
-
|
| 14 |
-
# --- Project information ---
|
| 15 |
-
project = 'SlideDeck AI'
|
| 16 |
-
copyright = '2025, Barun Saha'
|
| 17 |
-
author = 'Barun Saha'
|
| 18 |
-
|
| 19 |
-
# --- General configuration ---
|
| 20 |
-
extensions = [
|
| 21 |
-
'sphinx.ext.autodoc',
|
| 22 |
-
'sphinx.ext.autosummary',
|
| 23 |
-
'sphinx.ext.napoleon', # Converts Google/NumPy style docstrings
|
| 24 |
-
'sphinx.ext.viewcode',
|
| 25 |
-
'myst_parser', # Enables Markdown support (.md files)
|
| 26 |
-
]
|
| 27 |
-
autosummary_generate = True
|
| 28 |
-
|
| 29 |
-
# --- Autodoc configuration for sorting ---
|
| 30 |
-
autodoc_member_order = 'alphabetical'
|
| 31 |
-
|
| 32 |
-
# Tell Sphinx to look for custom templates
|
| 33 |
-
templates_path = ['_templates']
|
| 34 |
-
|
| 35 |
-
# Configure MyST to allow cross-referencing and nested structure
|
| 36 |
-
myst_enable_extensions = [
|
| 37 |
-
'deflist',
|
| 38 |
-
'html_image',
|
| 39 |
-
'linkify',
|
| 40 |
-
'replacements',
|
| 41 |
-
'html_admonition'
|
| 42 |
-
]
|
| 43 |
-
source_suffix = {
|
| 44 |
-
'.rst': 'restructuredtext',
|
| 45 |
-
'.md': 'markdown',
|
| 46 |
-
}
|
| 47 |
-
|
| 48 |
-
html_theme = 'pydata_sphinx_theme'
|
| 49 |
-
master_doc = 'index'
|
| 50 |
-
html_show_sourcelink = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.CustomArgumentParser.rst
DELETED
|
@@ -1,40 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli.CustomArgumentParser
|
| 2 |
-
====================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.cli
|
| 5 |
-
|
| 6 |
-
.. autoclass:: CustomArgumentParser
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~CustomArgumentParser.__init__
|
| 17 |
-
~CustomArgumentParser.add_argument
|
| 18 |
-
~CustomArgumentParser.add_argument_group
|
| 19 |
-
~CustomArgumentParser.add_mutually_exclusive_group
|
| 20 |
-
~CustomArgumentParser.add_subparsers
|
| 21 |
-
~CustomArgumentParser.convert_arg_line_to_args
|
| 22 |
-
~CustomArgumentParser.error
|
| 23 |
-
~CustomArgumentParser.exit
|
| 24 |
-
~CustomArgumentParser.format_help
|
| 25 |
-
~CustomArgumentParser.format_usage
|
| 26 |
-
~CustomArgumentParser.get_default
|
| 27 |
-
~CustomArgumentParser.parse_args
|
| 28 |
-
~CustomArgumentParser.parse_intermixed_args
|
| 29 |
-
~CustomArgumentParser.parse_known_args
|
| 30 |
-
~CustomArgumentParser.parse_known_intermixed_args
|
| 31 |
-
~CustomArgumentParser.print_help
|
| 32 |
-
~CustomArgumentParser.print_usage
|
| 33 |
-
~CustomArgumentParser.register
|
| 34 |
-
~CustomArgumentParser.set_defaults
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.CustomHelpFormatter.rst
DELETED
|
@@ -1,29 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli.CustomHelpFormatter
|
| 2 |
-
===================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.cli
|
| 5 |
-
|
| 6 |
-
.. autoclass:: CustomHelpFormatter
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~CustomHelpFormatter.__init__
|
| 17 |
-
~CustomHelpFormatter.add_argument
|
| 18 |
-
~CustomHelpFormatter.add_arguments
|
| 19 |
-
~CustomHelpFormatter.add_text
|
| 20 |
-
~CustomHelpFormatter.add_usage
|
| 21 |
-
~CustomHelpFormatter.end_section
|
| 22 |
-
~CustomHelpFormatter.format_help
|
| 23 |
-
~CustomHelpFormatter.start_section
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.format_model_help.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli.format\_model\_help
|
| 2 |
-
===================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.cli
|
| 5 |
-
|
| 6 |
-
.. autofunction:: format_model_help
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.format_models_as_bullets.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli.format\_models\_as\_bullets
|
| 2 |
-
===========================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.cli
|
| 5 |
-
|
| 6 |
-
.. autofunction:: format_models_as_bullets
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.format_models_list.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli.format\_models\_list
|
| 2 |
-
====================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.cli
|
| 5 |
-
|
| 6 |
-
.. autofunction:: format_models_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.group_models_by_provider.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli.group\_models\_by\_provider
|
| 2 |
-
===========================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.cli
|
| 5 |
-
|
| 6 |
-
.. autofunction:: group_models_by_provider
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.main.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli.main
|
| 2 |
-
====================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.cli
|
| 5 |
-
|
| 6 |
-
.. autofunction:: main
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.cli.rst
DELETED
|
@@ -1,36 +0,0 @@
|
|
| 1 |
-
slidedeckai.cli
|
| 2 |
-
===============
|
| 3 |
-
===================================
|
| 4 |
-
|
| 5 |
-
.. currentmodule:: slidedeckai
|
| 6 |
-
|
| 7 |
-
.. automodule:: slidedeckai.cli
|
| 8 |
-
:noindex:
|
| 9 |
-
|
| 10 |
-
.. autosummary::
|
| 11 |
-
:toctree:
|
| 12 |
-
:nosignatures:
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
format_model_help
|
| 16 |
-
|
| 17 |
-
format_models_as_bullets
|
| 18 |
-
|
| 19 |
-
format_models_list
|
| 20 |
-
|
| 21 |
-
group_models_by_provider
|
| 22 |
-
|
| 23 |
-
main
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
CustomArgumentParser
|
| 28 |
-
|
| 29 |
-
CustomHelpFormatter
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
.. automodule:: slidedeckai.cli
|
| 33 |
-
:members:
|
| 34 |
-
:undoc-members:
|
| 35 |
-
:show-inheritance:
|
| 36 |
-
:member-order: alphabetical
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.core.SlideDeckAI.rst
DELETED
|
@@ -1,27 +0,0 @@
|
|
| 1 |
-
slidedeckai.core.SlideDeckAI
|
| 2 |
-
============================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.core
|
| 5 |
-
|
| 6 |
-
.. autoclass:: SlideDeckAI
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~SlideDeckAI.__init__
|
| 17 |
-
~SlideDeckAI.generate
|
| 18 |
-
~SlideDeckAI.reset
|
| 19 |
-
~SlideDeckAI.revise
|
| 20 |
-
~SlideDeckAI.set_model
|
| 21 |
-
~SlideDeckAI.set_template
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.core.rst
DELETED
|
@@ -1,24 +0,0 @@
|
|
| 1 |
-
slidedeckai.core
|
| 2 |
-
================
|
| 3 |
-
===================================
|
| 4 |
-
|
| 5 |
-
.. currentmodule:: slidedeckai
|
| 6 |
-
|
| 7 |
-
.. automodule:: slidedeckai.core
|
| 8 |
-
:noindex:
|
| 9 |
-
|
| 10 |
-
.. autosummary::
|
| 11 |
-
:toctree:
|
| 12 |
-
:nosignatures:
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
SlideDeckAI
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
.. automodule:: slidedeckai.core
|
| 21 |
-
:members:
|
| 22 |
-
:undoc-members:
|
| 23 |
-
:show-inheritance:
|
| 24 |
-
:member-order: alphabetical
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.chat_helper.AIMessage.rst
DELETED
|
@@ -1,22 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.chat\_helper.AIMessage
|
| 2 |
-
==========================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.chat_helper
|
| 5 |
-
|
| 6 |
-
.. autoclass:: AIMessage
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~AIMessage.__init__
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.chat_helper.ChatMessage.rst
DELETED
|
@@ -1,22 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.chat\_helper.ChatMessage
|
| 2 |
-
============================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.chat_helper
|
| 5 |
-
|
| 6 |
-
.. autoclass:: ChatMessage
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~ChatMessage.__init__
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.chat_helper.ChatMessageHistory.rst
DELETED
|
@@ -1,24 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.chat\_helper.ChatMessageHistory
|
| 2 |
-
===================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.chat_helper
|
| 5 |
-
|
| 6 |
-
.. autoclass:: ChatMessageHistory
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~ChatMessageHistory.__init__
|
| 17 |
-
~ChatMessageHistory.add_ai_message
|
| 18 |
-
~ChatMessageHistory.add_user_message
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.chat_helper.ChatPromptTemplate.rst
DELETED
|
@@ -1,24 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.chat\_helper.ChatPromptTemplate
|
| 2 |
-
===================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.chat_helper
|
| 5 |
-
|
| 6 |
-
.. autoclass:: ChatPromptTemplate
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~ChatPromptTemplate.__init__
|
| 17 |
-
~ChatPromptTemplate.format
|
| 18 |
-
~ChatPromptTemplate.from_template
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.chat_helper.HumanMessage.rst
DELETED
|
@@ -1,22 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.chat\_helper.HumanMessage
|
| 2 |
-
=============================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.chat_helper
|
| 5 |
-
|
| 6 |
-
.. autoclass:: HumanMessage
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
.. automethod:: __init__
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
.. rubric:: Methods
|
| 13 |
-
|
| 14 |
-
.. autosummary::
|
| 15 |
-
|
| 16 |
-
~HumanMessage.__init__
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.chat_helper.rst
DELETED
|
@@ -1,32 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.chat\_helper
|
| 2 |
-
================================
|
| 3 |
-
===================================
|
| 4 |
-
|
| 5 |
-
.. currentmodule:: slidedeckai.helpers
|
| 6 |
-
|
| 7 |
-
.. automodule:: slidedeckai.helpers.chat_helper
|
| 8 |
-
:noindex:
|
| 9 |
-
|
| 10 |
-
.. autosummary::
|
| 11 |
-
:toctree:
|
| 12 |
-
:nosignatures:
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
AIMessage
|
| 18 |
-
|
| 19 |
-
ChatMessage
|
| 20 |
-
|
| 21 |
-
ChatMessageHistory
|
| 22 |
-
|
| 23 |
-
ChatPromptTemplate
|
| 24 |
-
|
| 25 |
-
HumanMessage
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
.. automodule:: slidedeckai.helpers.chat_helper
|
| 29 |
-
:members:
|
| 30 |
-
:undoc-members:
|
| 31 |
-
:show-inheritance:
|
| 32 |
-
:member-order: alphabetical
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.file_manager.get_pdf_contents.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.file\_manager.get\_pdf\_contents
|
| 2 |
-
====================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.file_manager
|
| 5 |
-
|
| 6 |
-
.. autofunction:: get_pdf_contents
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.file_manager.rst
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.file\_manager
|
| 2 |
-
=================================
|
| 3 |
-
===================================
|
| 4 |
-
|
| 5 |
-
.. currentmodule:: slidedeckai.helpers
|
| 6 |
-
|
| 7 |
-
.. automodule:: slidedeckai.helpers.file_manager
|
| 8 |
-
:noindex:
|
| 9 |
-
|
| 10 |
-
.. autosummary::
|
| 11 |
-
:toctree:
|
| 12 |
-
:nosignatures:
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
get_pdf_contents
|
| 16 |
-
|
| 17 |
-
validate_page_range
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
.. automodule:: slidedeckai.helpers.file_manager
|
| 23 |
-
:members:
|
| 24 |
-
:undoc-members:
|
| 25 |
-
:show-inheritance:
|
| 26 |
-
:member-order: alphabetical
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.file_manager.validate_page_range.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.file\_manager.validate\_page\_range
|
| 2 |
-
=======================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.file_manager
|
| 5 |
-
|
| 6 |
-
.. autofunction:: validate_page_range
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.icons_embeddings.find_icons.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.icons\_embeddings.find\_icons
|
| 2 |
-
=================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.icons_embeddings
|
| 5 |
-
|
| 6 |
-
.. autofunction:: find_icons
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.icons_embeddings.get_embeddings.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.icons\_embeddings.get\_embeddings
|
| 2 |
-
=====================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.icons_embeddings
|
| 5 |
-
|
| 6 |
-
.. autofunction:: get_embeddings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.icons_embeddings.get_icons_list.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.icons\_embeddings.get\_icons\_list
|
| 2 |
-
======================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.icons_embeddings
|
| 5 |
-
|
| 6 |
-
.. autofunction:: get_icons_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.icons_embeddings.load_saved_embeddings.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.icons\_embeddings.load\_saved\_embeddings
|
| 2 |
-
=============================================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.icons_embeddings
|
| 5 |
-
|
| 6 |
-
.. autofunction:: load_saved_embeddings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
docs/generated/slidedeckai.helpers.icons_embeddings.main.rst
DELETED
|
@@ -1,6 +0,0 @@
|
|
| 1 |
-
slidedeckai.helpers.icons\_embeddings.main
|
| 2 |
-
==========================================
|
| 3 |
-
|
| 4 |
-
.. currentmodule:: slidedeckai.helpers.icons_embeddings
|
| 5 |
-
|
| 6 |
-
.. autofunction:: main
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|