JulianPhillips commited on
Commit
6326d5f
·
verified ·
1 Parent(s): 69a4674

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -12
app.py CHANGED
@@ -1,5 +1,8 @@
1
  from flask import Flask, request, jsonify
2
  import torch
 
 
 
3
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
4
 
5
  # Load Meta Sapiens Pose model
@@ -10,25 +13,46 @@ sapiens_model.eval()
10
  motionbert_model = AutoModelForSequenceClassification.from_pretrained('/models/motionbert')
11
  motionbert_tokenizer = AutoTokenizer.from_pretrained('/models/motionbert')
12
 
 
13
  app = Flask(__name__)
14
 
 
 
 
 
 
 
15
  @app.route('/pose_estimation', methods=['POST'])
16
  def pose_estimation():
17
- # Accept an image file as input for pose estimation
18
- image = request.files['image'].read()
19
- # Perform pose estimation
20
- with torch.no_grad():
21
- pose_result = sapiens_model(torch.tensor(image))
22
- return jsonify({"pose_result": pose_result.tolist()})
 
 
 
 
 
 
 
 
 
23
 
24
  @app.route('/sequence_analysis', methods=['POST'])
25
  def sequence_analysis():
26
- # Accept keypoint data as input for sequence analysis
27
- keypoints = request.json['keypoints']
28
- inputs = motionbert_tokenizer(keypoints, return_tensors="pt")
29
- with torch.no_grad():
30
- sequence_output = motionbert_model(**inputs)
31
- return jsonify({"sequence_analysis": sequence_output.logits.tolist()})
 
 
 
 
 
32
 
33
  if __name__ == '__main__':
34
  app.run(host='0.0.0.0', port=7860)
 
1
  from flask import Flask, request, jsonify
2
  import torch
3
+ from PIL import Image
4
+ from io import BytesIO
5
+ import torchvision.transforms as transforms
6
  from transformers import AutoModelForSequenceClassification, AutoTokenizer
7
 
8
  # Load Meta Sapiens Pose model
 
13
  motionbert_model = AutoModelForSequenceClassification.from_pretrained('/models/motionbert')
14
  motionbert_tokenizer = AutoTokenizer.from_pretrained('/models/motionbert')
15
 
16
+ # Flask app
17
  app = Flask(__name__)
18
 
19
+ # Define a transformation for input images
20
+ transform = transforms.Compose([
21
+ transforms.Resize((256, 256)), # Resize image to the required size
22
+ transforms.ToTensor(), # Convert image to PyTorch tensor
23
+ ])
24
+
25
  @app.route('/pose_estimation', methods=['POST'])
26
  def pose_estimation():
27
+ try:
28
+ # Accept an image file as input for pose estimation
29
+ image = request.files['image']
30
+ img = Image.open(BytesIO(image.read()))
31
+
32
+ # Preprocess the image
33
+ img_tensor = transform(img).unsqueeze(0) # Add batch dimension
34
+
35
+ # Perform pose estimation
36
+ with torch.no_grad():
37
+ pose_result = sapiens_model(img_tensor)
38
+
39
+ return jsonify({"pose_result": pose_result.tolist()})
40
+ except Exception as e:
41
+ return jsonify({"error": str(e)}), 500
42
 
43
  @app.route('/sequence_analysis', methods=['POST'])
44
  def sequence_analysis():
45
+ try:
46
+ # Accept keypoint data as input for sequence analysis
47
+ keypoints = request.json['keypoints']
48
+ inputs = motionbert_tokenizer(keypoints, return_tensors="pt")
49
+
50
+ with torch.no_grad():
51
+ sequence_output = motionbert_model(**inputs)
52
+
53
+ return jsonify({"sequence_analysis": sequence_output.logits.tolist()})
54
+ except Exception as e:
55
+ return jsonify({"error": str(e)}), 500
56
 
57
  if __name__ == '__main__':
58
  app.run(host='0.0.0.0', port=7860)