Spaces:
				
			
			
	
			
			
					
		Running
		
	
	
	
			
			
	
	
	
	
		
		
					
		Running
		
	add 4 new models
Browse files
    	
        README.md
    CHANGED
    
    | @@ -8,28 +8,32 @@ sdk_version: 1.44.1 | |
| 8 | 
             
            app_file: app.py
         | 
| 9 | 
             
            pinned: false
         | 
| 10 | 
             
            license: apache-2.0
         | 
| 11 | 
            -
            short_description: Run GGUF models (Qwen2.5, Gemma-3, Phi-4) with llama.cpp
         | 
| 12 | 
             
            ---
         | 
| 13 |  | 
| 14 | 
            -
            This Streamlit app  | 
| 15 |  | 
| 16 | 
             
            ### 🔄 Supported Models:
         | 
| 17 | 
             
            - `Qwen/Qwen2.5-7B-Instruct-GGUF` → `qwen2.5-7b-instruct-q2_k.gguf`
         | 
| 18 | 
            -
            - `unsloth/gemma-3-4b-it-GGUF` → `gemma-3-4b-it- | 
| 19 | 
            -
            - `unsloth/Phi-4-mini-instruct-GGUF` → `Phi-4-mini-instruct- | 
|  | |
|  | |
|  | |
|  | |
| 20 |  | 
| 21 | 
             
            ### ⚙️ Features:
         | 
| 22 | 
            -
            - Model selection in sidebar
         | 
| 23 | 
            -
            -  | 
| 24 | 
             
            - Chat-style UI with streaming responses
         | 
| 25 |  | 
| 26 | 
             
            ### 🧠 Memory-Safe Design (for HuggingFace Spaces):
         | 
| 27 | 
            -
            -  | 
| 28 | 
            -
            -  | 
| 29 | 
            -
            -  | 
| 30 | 
            -
            - Automatically downloads models  | 
| 31 | 
            -
            -  | 
| 32 |  | 
| 33 | 
            -
             | 
| 34 |  | 
| 35 | 
            -
             | 
|  | |
| 8 | 
             
            app_file: app.py
         | 
| 9 | 
             
            pinned: false
         | 
| 10 | 
             
            license: apache-2.0
         | 
| 11 | 
            +
            short_description: Run GGUF models (Qwen2.5, Gemma-3, Phi-4, Meta-Llama-3.1, DeepSeek-R1-Distill-Llama, Mistral-7B, Qwen2.5-Coder) with llama.cpp
         | 
| 12 | 
             
            ---
         | 
| 13 |  | 
| 14 | 
            +
            This Streamlit app enables **chat-based inference** on various GGUF models using `llama.cpp` and `llama-cpp-python`.
         | 
| 15 |  | 
| 16 | 
             
            ### 🔄 Supported Models:
         | 
| 17 | 
             
            - `Qwen/Qwen2.5-7B-Instruct-GGUF` → `qwen2.5-7b-instruct-q2_k.gguf`
         | 
| 18 | 
            +
            - `unsloth/gemma-3-4b-it-GGUF` → `gemma-3-4b-it-Q4_K_M.gguf`
         | 
| 19 | 
            +
            - `unsloth/Phi-4-mini-instruct-GGUF` → `Phi-4-mini-instruct-Q4_K_M.gguf`
         | 
| 20 | 
            +
            - `MaziyarPanahi/Meta-Llama-3.1-8B-Instruct-GGUF` → `Meta-Llama-3.1-8B-Instruct.Q2_K.gguf`
         | 
| 21 | 
            +
            - `unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF` → `DeepSeek-R1-Distill-Llama-8B-Q2_K.gguf`
         | 
| 22 | 
            +
            - `MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF` → `Mistral-7B-Instruct-v0.3.IQ3_XS.gguf`
         | 
| 23 | 
            +
            - `Qwen/Qwen2.5-Coder-7B-Instruct-GGUF` → `qwen2.5-coder-7b-instruct-q2_k.gguf`
         | 
| 24 |  | 
| 25 | 
             
            ### ⚙️ Features:
         | 
| 26 | 
            +
            - Model selection in the sidebar
         | 
| 27 | 
            +
            - Customizable system prompt and generation parameters
         | 
| 28 | 
             
            - Chat-style UI with streaming responses
         | 
| 29 |  | 
| 30 | 
             
            ### 🧠 Memory-Safe Design (for HuggingFace Spaces):
         | 
| 31 | 
            +
            - Loads only **one model at a time** to prevent memory bloat
         | 
| 32 | 
            +
            - Utilizes **manual unloading and `gc.collect()`** to free memory when switching models
         | 
| 33 | 
            +
            - Adjusts `n_ctx` context length to operate within a 16 GB RAM limit
         | 
| 34 | 
            +
            - Automatically downloads models as needed
         | 
| 35 | 
            +
            - Limits history to the **last 8 user-assistant turns** to prevent context overflow
         | 
| 36 |  | 
| 37 | 
            +
            Ideal for deploying multiple GGUF chat models on **free-tier HuggingFace Spaces**!
         | 
| 38 |  | 
| 39 | 
            +
            Refer to the configuration guide at https://huggingface.co/docs/hub/spaces-config-reference
         | 
    	
        app.py
    CHANGED
    
    | @@ -3,8 +3,6 @@ from llama_cpp import Llama | |
| 3 | 
             
            from huggingface_hub import hf_hub_download
         | 
| 4 | 
             
            import os
         | 
| 5 | 
             
            import gc
         | 
| 6 | 
            -
            import shutil
         | 
| 7 | 
            -
            import subprocess
         | 
| 8 |  | 
| 9 | 
             
            # Available models
         | 
| 10 | 
             
            MODELS = {
         | 
| @@ -23,8 +21,29 @@ MODELS = { | |
| 23 | 
             
                    "filename": "Phi-4-mini-instruct-Q4_K_M.gguf",
         | 
| 24 | 
             
                    "description": "Phi-4 Mini Instruct (Q4_K_M)"
         | 
| 25 | 
             
                },
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 26 | 
             
            }
         | 
| 27 |  | 
|  | |
| 28 | 
             
            with st.sidebar:
         | 
| 29 | 
             
                st.header("⚙️ Settings")
         | 
| 30 | 
             
                selected_model_name = st.selectbox("Select Model", list(MODELS.keys()))
         | 
| @@ -35,37 +54,14 @@ with st.sidebar: | |
| 35 | 
             
                top_p = st.slider("Top-P", 0.1, 1.0, 0.95)
         | 
| 36 | 
             
                repeat_penalty = st.slider("Repetition Penalty", 1.0, 2.0, 1.1)
         | 
| 37 |  | 
| 38 | 
            -
                if st.button("🧹 Clear All Cached Models"):
         | 
| 39 | 
            -
                    try:
         | 
| 40 | 
            -
                        for f in os.listdir("models"):
         | 
| 41 | 
            -
                            if f.endswith(".gguf"):
         | 
| 42 | 
            -
                                os.remove(os.path.join("models", f))
         | 
| 43 | 
            -
                        st.success("Model cache cleared.")
         | 
| 44 | 
            -
                    except Exception as e:
         | 
| 45 | 
            -
                        st.error(f"Failed to clear models: {e}")
         | 
| 46 | 
            -
             | 
| 47 | 
            -
                if st.button("📦 Show Disk Usage"):
         | 
| 48 | 
            -
                    try:
         | 
| 49 | 
            -
                        usage = shutil.disk_usage(".")
         | 
| 50 | 
            -
                        used = usage.used / (1024**3)
         | 
| 51 | 
            -
                        free = usage.free / (1024**3)
         | 
| 52 | 
            -
                        st.info(f"Disk Used: {used:.2f} GB | Free: {free:.2f} GB")
         | 
| 53 | 
            -
                    except Exception as e:
         | 
| 54 | 
            -
                        st.error(f"Disk usage error: {e}")
         | 
| 55 | 
            -
             | 
| 56 | 
             
            # Model info
         | 
| 57 | 
             
            selected_model = MODELS[selected_model_name]
         | 
| 58 | 
             
            model_path = os.path.join("models", selected_model["filename"])
         | 
| 59 |  | 
| 60 | 
            -
            # Init state
         | 
| 61 | 
            -
            if "model_name" not in st.session_state:
         | 
| 62 | 
            -
                st.session_state.model_name = None
         | 
| 63 | 
            -
            if "llm" not in st.session_state:
         | 
| 64 | 
            -
                st.session_state.llm = None
         | 
| 65 | 
            -
             | 
| 66 | 
             
            # Ensure model directory exists
         | 
| 67 | 
             
            os.makedirs("models", exist_ok=True)
         | 
| 68 |  | 
|  | |
| 69 | 
             
            def cleanup_old_models():
         | 
| 70 | 
             
                for f in os.listdir("models"):
         | 
| 71 | 
             
                    if f.endswith(".gguf") and f != selected_model["filename"]:
         | 
| @@ -74,6 +70,7 @@ def cleanup_old_models(): | |
| 74 | 
             
                        except Exception as e:
         | 
| 75 | 
             
                            st.warning(f"Couldn't delete old model {f}: {e}")
         | 
| 76 |  | 
|  | |
| 77 | 
             
            def download_model():
         | 
| 78 | 
             
                with st.spinner(f"Downloading {selected_model['filename']}..."):
         | 
| 79 | 
             
                    hf_hub_download(
         | 
| @@ -83,86 +80,26 @@ def download_model(): | |
| 83 | 
             
                        local_dir_use_symlinks=False,
         | 
| 84 | 
             
                    )
         | 
| 85 |  | 
| 86 | 
            -
             | 
| 87 | 
            -
                try:
         | 
| 88 | 
            -
                    return Llama(model_path=path, n_ctx=1024, n_threads=2, n_threads_batch=2, n_batch=4, n_gpu_layers=0, use_mlock=False, use_mmap=True, verbose=False)
         | 
| 89 | 
            -
                except Exception as e:
         | 
| 90 | 
            -
                    return str(e)
         | 
| 91 | 
            -
             | 
| 92 | 
             
            def validate_or_download_model():
         | 
| 93 | 
             
                if not os.path.exists(model_path):
         | 
| 94 | 
             
                    cleanup_old_models()
         | 
| 95 | 
             
                    download_model()
         | 
| 96 | 
            -
             | 
| 97 | 
            -
             | 
| 98 | 
            -
             | 
| 99 | 
            -
                 | 
| 100 | 
            -
                    st.warning(f" | 
| 101 | 
             
                    try:
         | 
| 102 | 
             
                        os.remove(model_path)
         | 
| 103 | 
             
                    except:
         | 
| 104 | 
             
                        pass
         | 
| 105 | 
             
                    cleanup_old_models()
         | 
| 106 | 
             
                    download_model()
         | 
| 107 | 
            -
                    result = try_load_model(model_path)
         | 
| 108 | 
            -
                    if isinstance(result, str):
         | 
| 109 | 
            -
                        st.error(f"Model still failed after re-download: {result}")
         | 
| 110 | 
            -
                        st.stop()
         | 
| 111 | 
            -
                    return result
         | 
| 112 | 
            -
                return result
         | 
| 113 | 
            -
             | 
| 114 | 
            -
            # Load model if changed
         | 
| 115 | 
            -
            if st.session_state.model_name != selected_model_name:
         | 
| 116 | 
            -
                if st.session_state.llm is not None:
         | 
| 117 | 
            -
                    del st.session_state.llm
         | 
| 118 | 
            -
                    gc.collect()
         | 
| 119 | 
            -
                st.session_state.llm = validate_or_download_model()
         | 
| 120 | 
            -
                st.session_state.model_name = selected_model_name
         | 
| 121 | 
            -
             | 
| 122 | 
            -
            llm = st.session_state.llm
         | 
| 123 | 
            -
             | 
| 124 | 
            -
            # Chat history state
         | 
| 125 | 
            -
            if "chat_history" not in st.session_state:
         | 
| 126 | 
            -
                st.session_state.chat_history = []
         | 
| 127 | 
            -
             | 
| 128 | 
            -
            st.title(f"🧠 {selected_model['description']} (Streamlit + GGUF)")
         | 
| 129 | 
            -
            st.caption(f"Powered by `llama.cpp` | Model: {selected_model['filename']}")
         | 
| 130 | 
            -
             | 
| 131 | 
            -
            user_input = st.chat_input("Ask something...")
         | 
| 132 |  | 
| 133 | 
            -
             | 
| 134 | 
            -
             | 
| 135 | 
            -
                if len(st.session_state.chat_history) % 2 == 1:
         | 
| 136 | 
            -
                    st.warning("Please wait for the assistant to respond before sending another message.")
         | 
| 137 | 
            -
                else:
         | 
| 138 | 
            -
                    st.session_state.chat_history.append({"role": "user", "content": user_input})
         | 
| 139 |  | 
| 140 | 
            -
             | 
| 141 | 
            -
             | 
| 142 | 
            -
             | 
| 143 | 
            -
                    # Trim conversation history to max 8 turns (user+assistant)
         | 
| 144 | 
            -
                    MAX_TURNS = 8
         | 
| 145 | 
            -
                    trimmed_history = st.session_state.chat_history[-MAX_TURNS * 2:]
         | 
| 146 | 
            -
                    messages = [{"role": "system", "content": system_prompt}] + trimmed_history
         | 
| 147 | 
            -
             | 
| 148 | 
            -
                    with st.chat_message("assistant"):
         | 
| 149 | 
            -
                        full_response = ""
         | 
| 150 | 
            -
                        response_area = st.empty()
         | 
| 151 | 
            -
             | 
| 152 | 
            -
                        stream = llm.create_chat_completion(
         | 
| 153 | 
            -
                            messages=messages,
         | 
| 154 | 
            -
                            max_tokens=max_tokens,
         | 
| 155 | 
            -
                            temperature=temperature,
         | 
| 156 | 
            -
                            top_k=top_k,
         | 
| 157 | 
            -
                            top_p=top_p,
         | 
| 158 | 
            -
                            repeat_penalty=repeat_penalty,
         | 
| 159 | 
            -
                            stream=True,
         | 
| 160 | 
            -
                        )
         | 
| 161 | 
            -
             | 
| 162 | 
            -
                        for chunk in stream:
         | 
| 163 | 
            -
                            if "choices" in chunk:
         | 
| 164 | 
            -
                                delta = chunk["choices"][0]["delta"].get("content", "")
         | 
| 165 | 
            -
                                full_response += delta
         | 
| 166 | 
            -
                                response_area.markdown(full_response)
         | 
| 167 | 
            -
             | 
| 168 | 
            -
                        st.session_state.chat_history.append({"role": "assistant", "content": full_response})
         | 
|  | |
| 3 | 
             
            from huggingface_hub import hf_hub_download
         | 
| 4 | 
             
            import os
         | 
| 5 | 
             
            import gc
         | 
|  | |
|  | |
| 6 |  | 
| 7 | 
             
            # Available models
         | 
| 8 | 
             
            MODELS = {
         | 
|  | |
| 21 | 
             
                    "filename": "Phi-4-mini-instruct-Q4_K_M.gguf",
         | 
| 22 | 
             
                    "description": "Phi-4 Mini Instruct (Q4_K_M)"
         | 
| 23 | 
             
                },
         | 
| 24 | 
            +
                "Meta-Llama-3.1-8B-Instruct (Q2_K)": {
         | 
| 25 | 
            +
                    "repo_id": "MaziyarPanahi/Meta-Llama-3.1-8B-Instruct-GGUF",
         | 
| 26 | 
            +
                    "filename": "Meta-Llama-3.1-8B-Instruct.Q2_K.gguf",
         | 
| 27 | 
            +
                    "description": "Meta Llama 3.1 8B Instruct (Q2_K)"
         | 
| 28 | 
            +
                },
         | 
| 29 | 
            +
                "DeepSeek-R1-Distill-Llama-8B (Q2_K)": {
         | 
| 30 | 
            +
                    "repo_id": "unsloth/DeepSeek-R1-Distill-Llama-8B-GGUF",
         | 
| 31 | 
            +
                    "filename": "DeepSeek-R1-Distill-Llama-8B-Q2_K.gguf",
         | 
| 32 | 
            +
                    "description": "DeepSeek R1 Distill Llama 8B (Q2_K)"
         | 
| 33 | 
            +
                },
         | 
| 34 | 
            +
                "Mistral-7B-Instruct-v0.3 (IQ3_XS)": {
         | 
| 35 | 
            +
                    "repo_id": "MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF",
         | 
| 36 | 
            +
                    "filename": "Mistral-7B-Instruct-v0.3.IQ3_XS.gguf",
         | 
| 37 | 
            +
                    "description": "Mistral 7B Instruct v0.3 (IQ3_XS)"
         | 
| 38 | 
            +
                },
         | 
| 39 | 
            +
                "Qwen2.5-Coder-7B-Instruct (Q2_K)": {
         | 
| 40 | 
            +
                    "repo_id": "Qwen/Qwen2.5-Coder-7B-Instruct-GGUF",
         | 
| 41 | 
            +
                    "filename": "qwen2.5-coder-7b-instruct-q2_k.gguf",
         | 
| 42 | 
            +
                    "description": "Qwen2.5 Coder 7B Instruct (Q2_K)"
         | 
| 43 | 
            +
                },
         | 
| 44 | 
             
            }
         | 
| 45 |  | 
| 46 | 
            +
            # Sidebar for model selection and settings
         | 
| 47 | 
             
            with st.sidebar:
         | 
| 48 | 
             
                st.header("⚙️ Settings")
         | 
| 49 | 
             
                selected_model_name = st.selectbox("Select Model", list(MODELS.keys()))
         | 
|  | |
| 54 | 
             
                top_p = st.slider("Top-P", 0.1, 1.0, 0.95)
         | 
| 55 | 
             
                repeat_penalty = st.slider("Repetition Penalty", 1.0, 2.0, 1.1)
         | 
| 56 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 57 | 
             
            # Model info
         | 
| 58 | 
             
            selected_model = MODELS[selected_model_name]
         | 
| 59 | 
             
            model_path = os.path.join("models", selected_model["filename"])
         | 
| 60 |  | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 61 | 
             
            # Ensure model directory exists
         | 
| 62 | 
             
            os.makedirs("models", exist_ok=True)
         | 
| 63 |  | 
| 64 | 
            +
            # Function to clean up old models
         | 
| 65 | 
             
            def cleanup_old_models():
         | 
| 66 | 
             
                for f in os.listdir("models"):
         | 
| 67 | 
             
                    if f.endswith(".gguf") and f != selected_model["filename"]:
         | 
|  | |
| 70 | 
             
                        except Exception as e:
         | 
| 71 | 
             
                            st.warning(f"Couldn't delete old model {f}: {e}")
         | 
| 72 |  | 
| 73 | 
            +
            # Function to download the selected model
         | 
| 74 | 
             
            def download_model():
         | 
| 75 | 
             
                with st.spinner(f"Downloading {selected_model['filename']}..."):
         | 
| 76 | 
             
                    hf_hub_download(
         | 
|  | |
| 80 | 
             
                        local_dir_use_symlinks=False,
         | 
| 81 | 
             
                    )
         | 
| 82 |  | 
| 83 | 
            +
            # Function to validate or download the model
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 84 | 
             
            def validate_or_download_model():
         | 
| 85 | 
             
                if not os.path.exists(model_path):
         | 
| 86 | 
             
                    cleanup_old_models()
         | 
| 87 | 
             
                    download_model()
         | 
| 88 | 
            +
                try:
         | 
| 89 | 
            +
                    # Attempt to load the model with minimal resources to validate
         | 
| 90 | 
            +
                    _ = Llama(model_path=model_path, n_ctx=16, n_threads=1)
         | 
| 91 | 
            +
                except Exception as e:
         | 
| 92 | 
            +
                    st.warning(f"Model file was invalid or corrupt: {e}\nRedownloading...")
         | 
| 93 | 
             
                    try:
         | 
| 94 | 
             
                        os.remove(model_path)
         | 
| 95 | 
             
                    except:
         | 
| 96 | 
             
                        pass
         | 
| 97 | 
             
                    cleanup_old_models()
         | 
| 98 | 
             
                    download_model()
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 99 |  | 
| 100 | 
            +
            # Validate or download the selected model
         | 
| 101 | 
            +
            validate_or_download_model()
         | 
|  | |
|  | |
|  | |
|  | |
| 102 |  | 
| 103 | 
            +
            # Load model if changed
         | 
| 104 | 
            +
            if "model_name" not in st.session_state or st.session_state.model_name != selected_model_name:
         | 
| 105 | 
            +
                if "llm" in st.session_state and st.session_state.llm
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
