Spaces:
				
			
			
	
			
			
		Sleeping
		
	
	
	
			
			
	
	
	
	
		
		
		Sleeping
		
	
		Tuchuanhuhuhu
		
	commited on
		
		
					Commit 
							
							·
						
						ccade22
	
1
								Parent(s):
							
							731a989
								
支持通过环境变量禁用本地模型,支持在config中设置默认model
Browse files- config_example.json +2 -1
 - modules/config.py +8 -0
 - modules/presets.py +12 -4
 
    	
        config_example.json
    CHANGED
    
    | 
         @@ -1,12 +1,13 @@ 
     | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
             
                // 你的OpenAI API Key,一般必填,
         
     | 
| 3 | 
         
             
                // 若缺省填为 "openai_api_key": "" 则必须再在图形界面中填入API Key
         
     | 
| 4 | 
         
            -
                "openai_api_key": " 
     | 
| 5 | 
         
             
                // 如果使用代理,请取消注释下面的两行,并替换代理URL
         
     | 
| 6 | 
         
             
                // "https_proxy": "http://127.0.0.1:1079",
         
     | 
| 7 | 
         
             
                // "http_proxy": "http://127.0.0.1:1079",
         
     | 
| 8 | 
         
             
                "users": [], // 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
         
     | 
| 9 | 
         
             
                "local_embedding": false, //是否在本地编制索引
         
     | 
| 
         | 
|
| 10 | 
         
             
                "advance_docs": {
         
     | 
| 11 | 
         
             
                    "pdf": {
         
     | 
| 12 | 
         
             
                        // 是否认为PDF是双栏的
         
     | 
| 
         | 
|
| 1 | 
         
             
            {
         
     | 
| 2 | 
         
             
                // 你的OpenAI API Key,一般必填,
         
     | 
| 3 | 
         
             
                // 若缺省填为 "openai_api_key": "" 则必须再在图形界面中填入API Key
         
     | 
| 4 | 
         
            +
                "openai_api_key": "",
         
     | 
| 5 | 
         
             
                // 如果使用代理,请取消注释下面的两行,并替换代理URL
         
     | 
| 6 | 
         
             
                // "https_proxy": "http://127.0.0.1:1079",
         
     | 
| 7 | 
         
             
                // "http_proxy": "http://127.0.0.1:1079",
         
     | 
| 8 | 
         
             
                "users": [], // 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
         
     | 
| 9 | 
         
             
                "local_embedding": false, //是否在本地编制索引
         
     | 
| 10 | 
         
            +
                "default_model": "gpt-3.5-turbo", // 默认模型
         
     | 
| 11 | 
         
             
                "advance_docs": {
         
     | 
| 12 | 
         
             
                    "pdf": {
         
     | 
| 13 | 
         
             
                        // 是否认为PDF是双栏的
         
     | 
    	
        modules/config.py
    CHANGED
    
    | 
         @@ -6,6 +6,7 @@ import sys 
     | 
|
| 6 | 
         
             
            import commentjson as json
         
     | 
| 7 | 
         | 
| 8 | 
         
             
            from . import shared
         
     | 
| 
         | 
|
| 9 | 
         | 
| 10 | 
         | 
| 11 | 
         
             
            __all__ = [
         
     | 
| 
         @@ -155,4 +156,11 @@ if server_port is None: 
     | 
|
| 155 | 
         | 
| 156 | 
         
             
            assert server_port is None or type(server_port) == int, "要求port设置为int类型"
         
     | 
| 157 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 158 | 
         
             
            share = config.get("share", False)
         
     | 
| 
         | 
|
| 6 | 
         
             
            import commentjson as json
         
     | 
| 7 | 
         | 
| 8 | 
         
             
            from . import shared
         
     | 
| 9 | 
         
            +
            from . import presets
         
     | 
| 10 | 
         | 
| 11 | 
         | 
| 12 | 
         
             
            __all__ = [
         
     | 
| 
         | 
|
| 156 | 
         | 
| 157 | 
         
             
            assert server_port is None or type(server_port) == int, "要求port设置为int类型"
         
     | 
| 158 | 
         | 
| 159 | 
         
            +
            # 设置默认model
         
     | 
| 160 | 
         
            +
            default_model = config.get("default_model", "")
         
     | 
| 161 | 
         
            +
            try:
         
     | 
| 162 | 
         
            +
                presets.DEFAULT_MODEL = presets.MODELS.index(default_model)
         
     | 
| 163 | 
         
            +
            except ValueError:
         
     | 
| 164 | 
         
            +
                pass
         
     | 
| 165 | 
         
            +
             
     | 
| 166 | 
         
             
            share = config.get("share", False)
         
     | 
    	
        modules/presets.py
    CHANGED
    
    | 
         @@ -65,7 +65,7 @@ APPEARANCE_SWITCHER = """ 
     | 
|
| 65 | 
         | 
| 66 | 
         
             
            SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?"  # 总结对话时的 prompt
         
     | 
| 67 | 
         | 
| 68 | 
         
            -
             
     | 
| 69 | 
         
             
                "gpt-3.5-turbo",
         
     | 
| 70 | 
         
             
                "gpt-3.5-turbo-0301",
         
     | 
| 71 | 
         
             
                "gpt-4",
         
     | 
| 
         @@ -73,6 +73,9 @@ MODELS = [ 
     | 
|
| 73 | 
         
             
                "gpt-4-32k",
         
     | 
| 74 | 
         
             
                "gpt-4-32k-0314",
         
     | 
| 75 | 
         
             
                "xmbot",
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 76 | 
         
             
                "chatglm-6b",
         
     | 
| 77 | 
         
             
                "chatglm-6b-int4",
         
     | 
| 78 | 
         
             
                "chatglm-6b-int4-qe",
         
     | 
| 
         @@ -83,10 +86,15 @@ MODELS = [ 
     | 
|
| 83 | 
         
             
                "llama-13b-hf-int4",
         
     | 
| 84 | 
         
             
                "llama-30b-hf",
         
     | 
| 85 | 
         
             
                "llama-30b-hf-int4",
         
     | 
| 86 | 
         
            -
                "llama-65b-hf" 
     | 
| 87 | 
         
            -
            ] 
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 88 | 
         | 
| 89 | 
         
            -
            DEFAULT_MODEL = 0 
     | 
| 90 | 
         | 
| 91 | 
         
             
            os.makedirs("models", exist_ok=True)
         
     | 
| 92 | 
         
             
            os.makedirs("lora", exist_ok=True)
         
     | 
| 
         | 
|
| 65 | 
         | 
| 66 | 
         
             
            SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?"  # 总结对话时的 prompt
         
     | 
| 67 | 
         | 
| 68 | 
         
            +
            ONLINE_MODELS = [
         
     | 
| 69 | 
         
             
                "gpt-3.5-turbo",
         
     | 
| 70 | 
         
             
                "gpt-3.5-turbo-0301",
         
     | 
| 71 | 
         
             
                "gpt-4",
         
     | 
| 
         | 
|
| 73 | 
         
             
                "gpt-4-32k",
         
     | 
| 74 | 
         
             
                "gpt-4-32k-0314",
         
     | 
| 75 | 
         
             
                "xmbot",
         
     | 
| 76 | 
         
            +
            ]
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            LOCAL_MODELS = [
         
     | 
| 79 | 
         
             
                "chatglm-6b",
         
     | 
| 80 | 
         
             
                "chatglm-6b-int4",
         
     | 
| 81 | 
         
             
                "chatglm-6b-int4-qe",
         
     | 
| 
         | 
|
| 86 | 
         
             
                "llama-13b-hf-int4",
         
     | 
| 87 | 
         
             
                "llama-30b-hf",
         
     | 
| 88 | 
         
             
                "llama-30b-hf-int4",
         
     | 
| 89 | 
         
            +
                "llama-65b-hf"
         
     | 
| 90 | 
         
            +
            ]
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
            if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
         
     | 
| 93 | 
         
            +
                MODELS = ONLINE_MODELS
         
     | 
| 94 | 
         
            +
            else:
         
     | 
| 95 | 
         
            +
                MODELS = ONLINE_MODELS + LOCAL_MODELS
         
     | 
| 96 | 
         | 
| 97 | 
         
            +
            DEFAULT_MODEL = 0
         
     | 
| 98 | 
         | 
| 99 | 
         
             
            os.makedirs("models", exist_ok=True)
         
     | 
| 100 | 
         
             
            os.makedirs("lora", exist_ok=True)
         
     |