File size: 2,311 Bytes
4ed95aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import ast
import openai
from transformers import GPT2Tokenizer

# Initialize tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")

# Prompt engineering
def get_prompt(text):
    # prompt_prefix = """Generate exactly 3 different and thought provoking discussion questions about given article below, and return the answers of these questions with the evidence.
    
    # Desired output format: [{"Q":<question>,"A":<answer>},{"Q":<question>,"A":<answer>},{"Q":<question>,"A":<answer>}].
    # """ 
    prompt_prefix = """Generate exactly 3 different and thought provoking discussion questions about given article below, and return the answers of these questions with the evidence. 
    
    Desired output should be a markdown format like this: 

    ## Q1: <question>

   <answer>

    ## Q2: <question>

   <answer>   

    ## Q3: <question>

   <answer> 

    """     
    prompt_postfix ="""
    Given article content: \"""{}.\"""
    """
    prompt = prompt_prefix + prompt_postfix.format(text)
    return prompt

def limit_tokens(text, n=3000):
    # Get the first n tokens from the input text
    input_ids = tokenizer.encode(text, return_tensors="pt")
    first_n_tokens = input_ids[:, :n]
    # Convert the first n tokens back to text format
    processed_text = tokenizer.decode(first_n_tokens[0], skip_special_tokens=True)    
    return processed_text


# Chat completion
def get_openai_chatcompletion(text):
    """Get OpenAI Chat Completion result.
    """
    messages = []
    processed_text = limit_tokens(text)
    augmented_prompt = get_prompt(processed_text)
    messages.append({"role":"user","content": augmented_prompt})
    
    try:
        result = openai.ChatCompletion.create(
            model="gpt-3.5-turbo",
            messages=messages,
            temperature=0.7
        )
    except:
        raise
    return result


def get_analyze(result):
    try:
        # analyze = ast.literal_eval(result["choices"][0]['text'])
        # analyze = eval(result["choices"][0]['text'])
        # analyze = result["choices"][0]['text']
        analyze = result["choices"][0]["message"]["content"]
    except:
        raise    
    return analyze


def get_analyze_result(text):
    result = get_openai_chatcompletion(text)
    analyze = get_analyze(result)
    return analyze