Spaces:
Sleeping
Sleeping
Added Ollama server files
Browse files- Dockerfile +57 -0
- entrypoint.sh +31 -0
- requirements.txt +32 -0
- src/run1.py +24 -0
Dockerfile
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.10-slim-buster
|
2 |
+
|
3 |
+
# Set the working directory
|
4 |
+
WORKDIR /app
|
5 |
+
|
6 |
+
# Copy requirements file
|
7 |
+
COPY requirements.txt requirements.txt
|
8 |
+
|
9 |
+
# Update package list and install necessary packages in a single step
|
10 |
+
RUN apt-get update && apt-get install -y \
|
11 |
+
curl \
|
12 |
+
build-essential \
|
13 |
+
libffi-dev \
|
14 |
+
cmake \
|
15 |
+
libcurl4-openssl-dev \
|
16 |
+
tini && \
|
17 |
+
apt-get clean
|
18 |
+
|
19 |
+
# Upgrade pip and install dependencies
|
20 |
+
RUN python -m venv venv && \
|
21 |
+
. /app/venv/bin/activate && \
|
22 |
+
pip install --upgrade pip && \
|
23 |
+
pip install --no-cache-dir -r requirements.txt
|
24 |
+
|
25 |
+
# Install Ollama
|
26 |
+
RUN curl https://ollama.ai/install.sh | sh
|
27 |
+
|
28 |
+
# Create the directory and give appropriate permissions
|
29 |
+
RUN mkdir -p /.ollama && chmod 777 /.ollama
|
30 |
+
|
31 |
+
# Ensure Ollama binary is in the PATH
|
32 |
+
ENV PATH="/app/venv/bin:/root/.ollama/bin:$PATH"
|
33 |
+
|
34 |
+
# Expose the server port
|
35 |
+
EXPOSE 7860
|
36 |
+
EXPOSE 11434
|
37 |
+
EXPOSE 1338
|
38 |
+
# Copy the entry point script
|
39 |
+
COPY entrypoint.sh /entrypoint.sh
|
40 |
+
RUN chmod +x /entrypoint.sh
|
41 |
+
|
42 |
+
# Set the model as an environment variable (this can be overridden)
|
43 |
+
ENV model="default_model"
|
44 |
+
|
45 |
+
# Copy the entire application
|
46 |
+
COPY . .
|
47 |
+
|
48 |
+
# Set proper permissions for the translations directory
|
49 |
+
# RUN chmod -R 777 translations
|
50 |
+
|
51 |
+
# Copy the startup script and make it executable
|
52 |
+
#COPY start.sh .
|
53 |
+
#RUN chmod +x start.sh
|
54 |
+
|
55 |
+
# Define the command to run the application
|
56 |
+
# Set the entry point script as the default command
|
57 |
+
ENTRYPOINT ["/entrypoint.sh"]
|
entrypoint.sh
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Source the virtual environment
|
4 |
+
source /app/venv/bin/activate
|
5 |
+
|
6 |
+
# Starting server
|
7 |
+
echo "Starting Ollama server"
|
8 |
+
ollama serve &
|
9 |
+
sleep 1
|
10 |
+
|
11 |
+
# Try to get the model environment variable
|
12 |
+
if [ -n "${MODEL}" ]; then
|
13 |
+
# Split the MODEL variable into an array
|
14 |
+
IFS=',' read -ra MODELS <<< "${MODEL}"
|
15 |
+
else
|
16 |
+
# Use the default list of models
|
17 |
+
MODELS=(llava llama3.2-vision)
|
18 |
+
fi
|
19 |
+
|
20 |
+
|
21 |
+
# Splitting the models by comma and pulling each
|
22 |
+
#IFS=',' read -ra MODELS <<< "$model"
|
23 |
+
for m in "${MODELS[@]}"; do
|
24 |
+
echo "Pulling $m"
|
25 |
+
ollama pull "$m"
|
26 |
+
sleep 5
|
27 |
+
done
|
28 |
+
|
29 |
+
|
30 |
+
# Run the Python application
|
31 |
+
exec python ./src/run1.py
|
requirements.txt
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
websocket-client
|
2 |
+
python-dotenv
|
3 |
+
requests
|
4 |
+
tls-client
|
5 |
+
pypasser
|
6 |
+
names
|
7 |
+
colorama
|
8 |
+
curl_cffi
|
9 |
+
aiohttp
|
10 |
+
flask
|
11 |
+
flask_cors
|
12 |
+
flask-babel
|
13 |
+
streamlit
|
14 |
+
selenium
|
15 |
+
fake-useragent
|
16 |
+
twocaptcha
|
17 |
+
pydantic
|
18 |
+
pymailtm
|
19 |
+
Levenshtein
|
20 |
+
retrying
|
21 |
+
numpy>=1.22.2 # pinned to avoid a vulnerability
|
22 |
+
tornado>=6.3.2 # pinned to avoid a vulnerability
|
23 |
+
PyExecJS
|
24 |
+
openai==0.28.1
|
25 |
+
ipykernel
|
26 |
+
notebook
|
27 |
+
jupyter
|
28 |
+
g4f
|
29 |
+
langchain-community
|
30 |
+
ollama
|
31 |
+
fastapi
|
32 |
+
uvicorn
|
src/run1.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
import requests
|
3 |
+
|
4 |
+
URL = "http://localhost:11434/api/chat"
|
5 |
+
headers = {"Content-Type": "application/json"}
|
6 |
+
|
7 |
+
app = FastAPI()
|
8 |
+
|
9 |
+
@app.get("/")
|
10 |
+
def read_root():
|
11 |
+
return {"message": "hello world"}
|
12 |
+
|
13 |
+
@app.post("/api/chat")
|
14 |
+
def get_chat_response(body: dict):
|
15 |
+
print(f"Received body: {body}")
|
16 |
+
try:
|
17 |
+
response = requests.post(url=URL, headers=headers, json=body, timeout=600)
|
18 |
+
return response.json()
|
19 |
+
except ConnectionRefusedError as error:
|
20 |
+
return {"error": f"Connection refused from backend with error: {error}"}
|
21 |
+
|
22 |
+
if __name__ == "__main__":
|
23 |
+
import uvicorn
|
24 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|