tushar310 commited on
Commit
30199bd
·
verified ·
1 Parent(s): bed21f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -4,8 +4,8 @@ from groq import Groq
4
  import markdown
5
 
6
  # Retrieve system prompts from environment variables
7
- GENERATE_PROMPT = os.environ.get("GENERATE_PROMPT_2")
8
- HUMANIZE_PROMPT = os.environ.get("HUMANIZE_CONTENT_2")
9
 
10
  def generate_mba_content(topic, api_key):
11
  if not GENERATE_PROMPT:
@@ -19,12 +19,12 @@ def generate_mba_content(topic, api_key):
19
  prompt = GENERATE_PROMPT.replace("[TOPIC]", topic)
20
  try:
21
  response = client.chat.completions.create(
22
- model="llama3-70b-8192",
23
  messages=[
24
  {"role": "system", "content": prompt},
25
  {"role": "user", "content": f"Generate content for the topic: {topic}"}
26
  ],
27
- temperature=0.7,
28
  max_tokens=4000
29
  )
30
  content = response.choices[0].message.content
@@ -51,12 +51,12 @@ def humanize_text(text, api_key):
51
  prompt = HUMANIZE_PROMPT.replace("[TEXT]", text)
52
  try:
53
  response = client.chat.completions.create(
54
- model="llama3-70b-8192",
55
  messages=[
56
  {"role": "system", "content": prompt},
57
  {"role": "user", "content": f"Rewrite the following text: {text}"}
58
  ],
59
- temperature=0.7,
60
  max_tokens=4000
61
  )
62
  content = response.choices[0].message.content
 
4
  import markdown
5
 
6
  # Retrieve system prompts from environment variables
7
+ GENERATE_PROMPT = os.environ.get("GENERATE_PROMPT")
8
+ HUMANIZE_PROMPT = os.environ.get("HUMANIZE_PROMPT")
9
 
10
  def generate_mba_content(topic, api_key):
11
  if not GENERATE_PROMPT:
 
19
  prompt = GENERATE_PROMPT.replace("[TOPIC]", topic)
20
  try:
21
  response = client.chat.completions.create(
22
+ model="llama-3.3-70b-versatile",
23
  messages=[
24
  {"role": "system", "content": prompt},
25
  {"role": "user", "content": f"Generate content for the topic: {topic}"}
26
  ],
27
+ temperature=0.5,
28
  max_tokens=4000
29
  )
30
  content = response.choices[0].message.content
 
51
  prompt = HUMANIZE_PROMPT.replace("[TEXT]", text)
52
  try:
53
  response = client.chat.completions.create(
54
+ model="llama-3.3-70b-versatile",
55
  messages=[
56
  {"role": "system", "content": prompt},
57
  {"role": "user", "content": f"Rewrite the following text: {text}"}
58
  ],
59
+ temperature=0.5,
60
  max_tokens=4000
61
  )
62
  content = response.choices[0].message.content