sugiv commited on
Commit
9fd8a2f
·
verified ·
1 Parent(s): ed06025

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +19 -6
README.md CHANGED
@@ -14,11 +14,26 @@ This model is a instruction fine-tuned version of FLAN-T5 Large for text transfo
14
  ## Usage
15
 
16
  ```python
17
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
18
 
19
  # Load model and tokenizer
20
- model = AutoModelForSeq2SeqLM.from_pretrained("sugiv/bluey-poor-flant5")
21
- tokenizer = AutoTokenizer.from_pretrained("sugiv/bluey-poor-flant5")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def generate_transform_prompt(input_text, filter_combination):
24
  return f'''You are an advanced text transformation AI. Your task is to {filter_combination['Task']} the given input text according to the specified parameters. {filter_combination['Task'].capitalize()}ing means expressing the same meaning using different words, while maintaining the original intent. Always correct spelling and grammatical errors implicitly.
@@ -60,6 +75,4 @@ prompt = generate_transform_prompt(input_text, filter_combination)
60
  inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
61
  outputs = model.generate(**inputs, max_length=150, num_return_sequences=1)
62
  transformed_text = tokenizer.decode(outputs, skip_special_tokens=True)
63
- print(transformed_text)
64
-
65
- This model is fine-tuned on a synthetic dataset for text transformation tasks. It can perform various transformations based on specified parameters such as tone, target audience, complexity, purpose, style, and verbosity.
 
14
  ## Usage
15
 
16
  ```python
 
17
 
18
  # Load model and tokenizer
19
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
20
+ from peft import PeftModel, PeftConfig
21
+
22
+ # Load the PEFT configuration
23
+ peft_model_id = "sugiv/bluey-poor-flant5"
24
+ peft_config = PeftConfig.from_pretrained(peft_model_id)
25
+
26
+ # Load the base model
27
+ base_model = AutoModelForSeq2SeqLM.from_pretrained(peft_config.base_model_name_or_path)
28
+
29
+ # Load the PEFT model
30
+ model = PeftModel.from_pretrained(base_model, peft_model_id)
31
+
32
+ # Load the tokenizer
33
+ tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path)
34
+
35
+ # Set the model to evaluation mode
36
+ model.eval()
37
 
38
  def generate_transform_prompt(input_text, filter_combination):
39
  return f'''You are an advanced text transformation AI. Your task is to {filter_combination['Task']} the given input text according to the specified parameters. {filter_combination['Task'].capitalize()}ing means expressing the same meaning using different words, while maintaining the original intent. Always correct spelling and grammatical errors implicitly.
 
75
  inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
76
  outputs = model.generate(**inputs, max_length=150, num_return_sequences=1)
77
  transformed_text = tokenizer.decode(outputs, skip_special_tokens=True)
78
+ print(transformed_text)