Tri4 commited on
Commit
06ad402
·
verified ·
1 Parent(s): fb7b6d3

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +6 -18
main.py CHANGED
@@ -1,6 +1,5 @@
1
  from flask import Flask, request, jsonify
2
  from huggingface_hub import InferenceClient
3
- import time
4
 
5
  # Initialize Flask app
6
  app = Flask(__name__)
@@ -48,22 +47,11 @@ def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, r
48
  )
49
 
50
  output = ""
51
- complete = False
52
-
53
- # Use a loop to ensure response is fully received
54
- while not complete:
55
- for token in response:
56
- if hasattr(token, 'token') and hasattr(token.token, 'text'):
57
- output += token.token.text
58
- else:
59
- print(f"Unexpected token structure: {token}", flush=True)
60
-
61
- # Check if the response seems complete
62
- if token.token.text.endswith('</s>'):
63
- complete = True
64
- break
65
- # Introduce a delay to handle streaming responses more smoothly
66
- time.sleep(0.1)
67
 
68
  # Print AI response
69
  print(f"\nSema AI: {output}\n", flush=True)
@@ -95,4 +83,4 @@ def generate_text():
95
  return jsonify({"error": str(e)}), 500
96
 
97
  if __name__ == "__main__":
98
- app.run(debug=True, port=5000)
 
1
  from flask import Flask, request, jsonify
2
  from huggingface_hub import InferenceClient
 
3
 
4
  # Initialize Flask app
5
  app = Flask(__name__)
 
47
  )
48
 
49
  output = ""
50
+ for token in response:
51
+ if hasattr(token, 'token') and hasattr(token.token, 'text'):
52
+ output += token.token.text
53
+ else:
54
+ print(f"Unexpected token structure: {token}", flush=True)
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  # Print AI response
57
  print(f"\nSema AI: {output}\n", flush=True)
 
83
  return jsonify({"error": str(e)}), 500
84
 
85
  if __name__ == "__main__":
86
+ app.run(debug=True, port=5000)