robinroy03 commited on
Commit
207726a
·
1 Parent(s): f23adce

fixed urls, added demo curl example at the bottom

Browse files
Files changed (2) hide show
  1. app.py +8 -0
  2. utils.py +2 -2
app.py CHANGED
@@ -71,6 +71,14 @@ def ollama_completion():
71
  }
72
 
73
  """
 
 
 
 
 
 
 
 
74
  curl -X POST http://localhost:8000/api/ollama/generate -H "Content-Type: application/json" -d '{
75
  "query": "How do I create a sphere in FURY?",
76
  "llm": "phi3",
 
71
  }
72
 
73
  """
74
+ curl -X POST http://localhost:8000/api/groq/generate -H "Content-Type: application/json" -d '{
75
+ "query": "How do I create a sphere in FURY?",
76
+ "llm": "llama3-70b-8192",
77
+ "knn": "3",
78
+ "stream": false
79
+ }'
80
+
81
+
82
  curl -X POST http://localhost:8000/api/ollama/generate -H "Content-Type: application/json" -d '{
83
  "query": "How do I create a sphere in FURY?",
84
  "llm": "phi3",
utils.py CHANGED
@@ -71,8 +71,8 @@ def db_output(embedding: list, knn: int) -> dict:
71
 
72
 
73
  def ollama_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]:
74
- # URL_LLM = 'https://robinroy03-ollama-server-backend.hf.space'
75
- URL_LLM = "http://localhost:11434"
76
  prompt, context, references = prompt_generator(question, db_knn)
77
  obj = {
78
  "model": llm,
 
71
 
72
 
73
  def ollama_llm_output(question: str, db_knn: dict, llm: str, stream: bool) -> tuple[str, str]:
74
+ URL_LLM = 'https://robinroy03-ollama-server-backend.hf.space'
75
+ # URL_LLM = "http://localhost:11434"
76
  prompt, context, references = prompt_generator(question, db_knn)
77
  obj = {
78
  "model": llm,