kantundpeterpan commited on
Commit
25d6c5f
·
1 Parent(s): 1c1c1db
Files changed (2) hide show
  1. app.py +0 -2
  2. tasks/text.py +20 -22
app.py CHANGED
@@ -2,8 +2,6 @@ from fastapi import FastAPI
2
  from dotenv import load_dotenv
3
  from tasks import text, image, audio
4
 
5
- from skops.hub_utils import download
6
-
7
  # Load environment variables
8
  load_dotenv()
9
 
 
2
  from dotenv import load_dotenv
3
  from tasks import text, image, audio
4
 
 
 
5
  # Load environment variables
6
  load_dotenv()
7
 
tasks/text.py CHANGED
@@ -13,7 +13,11 @@ from huggingface_hub import hf_hub_download
13
  import joblib
14
 
15
  REPO_ID = "kantundpeterpan/frugal-ai-toy"
16
- FILENAME = "tfidf_rf.skops"
 
 
 
 
17
 
18
  #add model directory to python path to be able to load tools.py
19
  import sys
@@ -21,18 +25,15 @@ import os
21
  sys.path.append(os.path.abspath('tasks/model'))
22
 
23
 
24
- print("### App Dir")
25
- print(os.listdir("./"))
26
- print()
27
- print("### Task Dir")
28
- print(os.listdir("./tasks"))
29
 
30
- if not os.path.exists("tasks/model"):
31
- #download model for text task
32
- download(repo_id = "kantundpeterpan/frugal-ai-toy", dst = "tasks/model")
33
 
34
- print("### Model Dir")
35
- print(os.listdir("./tasks/model"))
36
 
37
  import random
38
 
@@ -79,7 +80,12 @@ async def evaluate_text(request: TextEvaluationRequest):
79
  train_test = dataset["train"].train_test_split(test_size=request.test_size, seed=request.test_seed)
80
  test_dataset = train_test["test"]
81
  test_df = pd.DataFrame(test_dataset)
82
- print(test_df.head())
 
 
 
 
 
83
 
84
  # Start tracking emissions
85
  tracker.start()
@@ -87,18 +93,10 @@ async def evaluate_text(request: TextEvaluationRequest):
87
 
88
  #--------------------------------------------------------------------------------------------
89
  # YOUR MODEL INFERENCE CODE HERE
90
- # Update the code below to replace the random baseline by your model inference within the inference pass where the energy consumption and emissions are tracked.
 
91
  #--------------------------------------------------------------------------------------------
92
 
93
- #get unknwown types
94
- unknown = skops.io.get_untrusted_types(file = "tasks/model/tfidf_rf.skops")
95
-
96
- print("### Task Dir")
97
- print(os.listdir("./tasks"))
98
-
99
- #load model
100
- model = model = load("tasks/model/tfidf_rf.skops", trusted = unknown)
101
-
102
  # Make predictions
103
  true_labels = test_dataset["label"]
104
  predictions = [
 
13
  import joblib
14
 
15
  REPO_ID = "kantundpeterpan/frugal-ai-toy"
16
+ MODEL = "tfidf_rf.skops"
17
+
18
+ if not os.path.exists("tasks/model"):
19
+ #download model for text task
20
+ download(repo_id = REPO_ID, dst = "tasks/model")
21
 
22
  #add model directory to python path to be able to load tools.py
23
  import sys
 
25
  sys.path.append(os.path.abspath('tasks/model'))
26
 
27
 
28
+ # print("### App Dir")
29
+ # print(os.listdir("./"))
30
+ # print()
31
+ # print("### Task Dir")
32
+ # print(os.listdir("./tasks"))
33
 
 
 
 
34
 
35
+ # print("### Model Dir")
36
+ # print(os.listdir("./tasks/model"))
37
 
38
  import random
39
 
 
80
  train_test = dataset["train"].train_test_split(test_size=request.test_size, seed=request.test_seed)
81
  test_dataset = train_test["test"]
82
  test_df = pd.DataFrame(test_dataset)
83
+ # print(test_df.head())
84
+
85
+ #get unknwown types
86
+ unknown = skops.io.get_untrusted_types(file = "tasks/model/" + MODEL)
87
+ #load model
88
+ model = model = load("tasks/model/" + MODEL, trusted = unknown)
89
 
90
  # Start tracking emissions
91
  tracker.start()
 
93
 
94
  #--------------------------------------------------------------------------------------------
95
  # YOUR MODEL INFERENCE CODE HERE
96
+ # Update the code below to replace the random baseline by your model inference within
97
+ # the inference pass where the energy consumption and emissions are tracked.
98
  #--------------------------------------------------------------------------------------------
99
 
 
 
 
 
 
 
 
 
 
100
  # Make predictions
101
  true_labels = test_dataset["label"]
102
  predictions = [