ganchengguang commited on
Commit
2d380eb
·
verified ·
1 Parent(s): e2009a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -10
app.py CHANGED
@@ -1,19 +1,10 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
- import bitsandbytes as bnb
4
 
5
  # 加载本地模型和tokenizer
6
  model_name = "ganchengguang/OIELLM-8B-Instruction" # 替换为你的模型名称
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(
9
- model_name,
10
- device_map="auto",
11
- load_in_8bit=True,
12
- quantization_config=bnb.configs.BitsAndBytesConfig(
13
- load_in_8bit=True,
14
- load_in_8bit_fp32_cpu_offload=True
15
- )
16
- )
17
 
18
  # 定义语言和选项的映射
19
  options = {
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
  # 加载本地模型和tokenizer
5
  model_name = "ganchengguang/OIELLM-8B-Instruction" # 替换为你的模型名称
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name, load_in_8bit=True, device_map='auto')
 
 
 
 
 
 
 
 
8
 
9
  # 定义语言和选项的映射
10
  options = {