king-eagle commited on
Commit
8319bea
·
1 Parent(s): 1b9d2d8

test-1: loading ollama llm through langchain

Browse files
Files changed (2) hide show
  1. .gitignore +3 -0
  2. main.py +23 -3
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .env
2
+ __pycache__/
3
+ *.pyc
main.py CHANGED
@@ -1,10 +1,30 @@
 
 
1
  import ollama
 
 
2
 
 
 
3
  res = ollama.generate(
4
  model="hf.co/bartowski/Llama-3.2-3B-Instruct-uncensored-GGUF:Q5_K_S",
5
  # model="qwen2.5:3b",
6
- prompt="i am a human",
7
- system="Provide answers with funny and creative answers",
 
8
  )
9
 
10
- print(res['response'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
  import ollama
4
+ from langchain_community.llms import Ollama
5
+ from langchain_ollama import OllamaLLM
6
 
7
+ load_dotenv()
8
+ """
9
  res = ollama.generate(
10
  model="hf.co/bartowski/Llama-3.2-3B-Instruct-uncensored-GGUF:Q5_K_S",
11
  # model="qwen2.5:3b",
12
+ prompt="what was our previous conversation?",
13
+ system="only funny responses",
14
+ # options={'num_predict': 50}
15
  )
16
 
17
+ print("\n",res['response'],"\n")
18
+ """
19
+
20
+ model = OllamaLLM(
21
+ model=os.getenv('OLLAMA_MODEL_1', ''),
22
+ # metadata={"num_predict": 50},
23
+ num_predict=50,
24
+ # num_ctx=50
25
+ )
26
+
27
+ prompt_input = "Why did the tomato cry?"
28
+ response = model.invoke(prompt_input)
29
+
30
+ print("\n",response,"\n")