barunsaha commited on
Commit
fd9232d
·
unverified ·
2 Parent(s): 0966967 ab1a7c1

Merge pull request #91 from barun-saha/visual

Browse files
Files changed (4) hide show
  1. README.md +2 -4
  2. app.py +4 -4
  3. global_config.py +0 -2
  4. helpers/llm_helper.py +2 -8
README.md CHANGED
@@ -48,8 +48,8 @@ The supported LLMs offer different styles of content generation. Use one of the
48
 
49
  | LLM | Provider (code) | Requires API key | Characteristics |
50
  |:---------------------------------| :------- |:-------------------------------------------------------------------------------------------------------------------------|:-------------------------|
51
- | Mistral 7B Instruct v0.2 | Hugging Face (`hf`) | Optional but strongly encouraged; [get here](https://huggingface.co/settings/tokens) | Faster, shorter content |
52
- | Mistral NeMo Instruct 2407 | Hugging Face (`hf`) | Optional but strongly encouraged; [get here](https://huggingface.co/settings/tokens) | Slower, longer content |
53
  | Gemini 2.0 Flash | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
54
  | Gemini 2.0 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Fastest, longer content |
55
  | GPT | Azure OpenAI (`az`) | Mandatory; [get here](https://ai.azure.com/resource/playground) NOTE: You need to have your subscription/billing set up | Faster, longer content |
@@ -57,8 +57,6 @@ The supported LLMs offer different styles of content generation. Use one of the
57
  | Llama 3.3 70B Instruct Turbo | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Detailed, slower |
58
  | Llama 3.1 8B Instruct Turbo 128K | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Shorter |
59
 
60
- The Mistral models (via Hugging Face) do not mandatorily require an access token. In other words, you are always free to use these two LLMs, subject to Hugging Face's usage constrains. However, you are strongly encouraged to get and use your own Hugging Face access token.
61
-
62
  **IMPORTANT**: SlideDeck AI does **NOT** store your API keys/tokens or transmit them elsewhere. If you provide your API key, it is only used to invoke the relevant LLM to generate contents. That's it! This is an
63
  Open-Source project, so feel free to audit the code and convince yourself.
64
 
 
48
 
49
  | LLM | Provider (code) | Requires API key | Characteristics |
50
  |:---------------------------------| :------- |:-------------------------------------------------------------------------------------------------------------------------|:-------------------------|
51
+ | Mistral 7B Instruct v0.2 | Hugging Face (`hf`) | Mandatory; [get here](https://huggingface.co/settings/tokens) | Faster, shorter content |
52
+ | Mistral NeMo Instruct 2407 | Hugging Face (`hf`) | Mandatory; [get here](https://huggingface.co/settings/tokens) | Slower, longer content |
53
  | Gemini 2.0 Flash | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
54
  | Gemini 2.0 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Fastest, longer content |
55
  | GPT | Azure OpenAI (`az`) | Mandatory; [get here](https://ai.azure.com/resource/playground) NOTE: You need to have your subscription/billing set up | Faster, longer content |
 
57
  | Llama 3.3 70B Instruct Turbo | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Detailed, slower |
58
  | Llama 3.1 8B Instruct Turbo 128K | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Shorter |
59
 
 
 
60
  **IMPORTANT**: SlideDeck AI does **NOT** store your API keys/tokens or transmit them elsewhere. If you provide your API key, it is only used to invoke the relevant LLM to generate contents. That's it! This is an
61
  Open-Source project, so feel free to audit the code and convince yourself.
62
 
app.py CHANGED
@@ -184,8 +184,7 @@ with st.sidebar:
184
  api_key_token = st.text_input(
185
  label=(
186
  '3: Paste your API key/access token:\n\n'
187
- '*Mandatory* for Azure OpenAI, Cohere, Google Gemini, and Together AI providers.'
188
- ' *Optional* for HF Mistral LLMs but still encouraged.\n\n'
189
  ),
190
  type='password',
191
  key='api_key_input'
@@ -376,8 +375,9 @@ def set_up_chat_ui():
376
  ' the input field on the sidebar to the left.'
377
  '\n\nDon\'t have a token? Get your free'
378
  ' [HF access token](https://huggingface.co/settings/tokens) now'
379
- ' and start creating your slide deck! Alternatively, choose a different LLM'
380
- ' and provider from the list.',
 
381
  should_log=True
382
  )
383
  else:
 
184
  api_key_token = st.text_input(
185
  label=(
186
  '3: Paste your API key/access token:\n\n'
187
+ '*Mandatory* for all providers.'
 
188
  ),
189
  type='password',
190
  key='api_key_input'
 
375
  ' the input field on the sidebar to the left.'
376
  '\n\nDon\'t have a token? Get your free'
377
  ' [HF access token](https://huggingface.co/settings/tokens) now'
378
+ ' and start creating your slide deck! For gated models, you may need to'
379
+ ' visit the model\'s page and accept the terms or service.'
380
+ '\n\nAlternatively, choose a different LLM and provider from the list.',
381
  should_log=True
382
  )
383
  else:
global_config.py CHANGED
@@ -87,8 +87,6 @@ class GlobalConfig:
87
  LLM_MODEL_MIN_OUTPUT_LENGTH = 100
88
  LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
89
 
90
- HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN', '')
91
-
92
  LOG_LEVEL = 'DEBUG'
93
  COUNT_TOKENS = False
94
  APP_STRINGS_FILE = 'strings.json'
 
87
  LLM_MODEL_MIN_OUTPUT_LENGTH = 100
88
  LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
89
 
 
 
90
  LOG_LEVEL = 'DEBUG'
91
  COUNT_TOKENS = False
92
  APP_STRINGS_FILE = 'strings.json'
helpers/llm_helper.py CHANGED
@@ -22,7 +22,6 @@ LLM_PROVIDER_MODEL_REGEX = re.compile(r'\[(.*?)\](.*)')
22
  OLLAMA_MODEL_REGEX = re.compile(r'[a-zA-Z0-9._:-]+$')
23
  # 94 characters long, only containing alphanumeric characters, hyphens, and underscores
24
  API_KEY_REGEX = re.compile(r'^[a-zA-Z0-9_-]{6,94}$')
25
- HF_API_HEADERS = {'Authorization': f'Bearer {GlobalConfig.HUGGINGFACEHUB_API_TOKEN}'}
26
  REQUEST_TIMEOUT = 35
27
 
28
 
@@ -95,12 +94,7 @@ def is_valid_llm_provider_model(
95
  if not provider or not model or provider not in GlobalConfig.VALID_PROVIDERS:
96
  return False
97
 
98
- if provider in [
99
- GlobalConfig.PROVIDER_GOOGLE_GEMINI,
100
- GlobalConfig.PROVIDER_COHERE,
101
- GlobalConfig.PROVIDER_TOGETHER_AI,
102
- GlobalConfig.PROVIDER_AZURE_OPENAI,
103
- ] and not api_key:
104
  return False
105
 
106
  if api_key and API_KEY_REGEX.match(api_key) is None:
@@ -150,7 +144,7 @@ def get_langchain_llm(
150
  temperature=GlobalConfig.LLM_MODEL_TEMPERATURE,
151
  repetition_penalty=1.03,
152
  streaming=True,
153
- huggingfacehub_api_token=api_key or GlobalConfig.HUGGINGFACEHUB_API_TOKEN,
154
  return_full_text=False,
155
  stop_sequences=['</s>'],
156
  )
 
22
  OLLAMA_MODEL_REGEX = re.compile(r'[a-zA-Z0-9._:-]+$')
23
  # 94 characters long, only containing alphanumeric characters, hyphens, and underscores
24
  API_KEY_REGEX = re.compile(r'^[a-zA-Z0-9_-]{6,94}$')
 
25
  REQUEST_TIMEOUT = 35
26
 
27
 
 
94
  if not provider or not model or provider not in GlobalConfig.VALID_PROVIDERS:
95
  return False
96
 
97
+ if not api_key:
 
 
 
 
 
98
  return False
99
 
100
  if api_key and API_KEY_REGEX.match(api_key) is None:
 
144
  temperature=GlobalConfig.LLM_MODEL_TEMPERATURE,
145
  repetition_penalty=1.03,
146
  streaming=True,
147
+ huggingfacehub_api_token=api_key,
148
  return_full_text=False,
149
  stop_sequences=['</s>'],
150
  )