Kroy commited on
Commit
4bda9c3
·
1 Parent(s): f6d2d6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -49
app.py CHANGED
@@ -3,40 +3,22 @@ warnings.filterwarnings("ignore")
3
  ## import necessary packages
4
 
5
  import os
6
- import io
7
- import sys
8
- import base64
9
- import random
10
- import argparse
11
- import math
12
- import numpy as np
13
-
14
  from typing import Any, Union,Dict, List
15
  import numpy as np
 
 
16
  import requests
 
 
17
  from PIL import Image
18
- from imageio import imread
19
- from keras import backend as K
20
-
21
- import coco
22
- import utils
23
- import model as modellib
24
- import visualize
25
- from classes import class_names
26
  from fastapi import FastAPI
27
 
28
  # Create a new FastAPI app instance
29
  app = FastAPI()
30
 
31
- # Root directory of the project
32
- ROOT_DIR = os.getcwd()
33
-
34
- # Directory to save logs and trained model
35
- MODEL_DIR = os.path.join(ROOT_DIR, "logs")
36
-
37
- # Local path to trained weights file
38
- COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
39
- os.system("pip install pycocotools==2.0.0")
40
 
41
 
42
 
@@ -45,42 +27,39 @@ os.system("pip install pycocotools==2.0.0")
45
  # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
46
  # containing the generated text under the key "output"
47
  @app.get("/generate")
48
- def generate(path: str):
49
  """
50
  Using the text summarization pipeline from `transformers`, summerize text
51
  from the given input text. The model used is `philschmid/bart-large-cnn-samsum`, which
52
  can be found [here](<https://huggingface.co/philschmid/bart-large-cnn-samsum>).
53
  """
54
  # Use the pipeline to generate text from the given input text
55
- K.clear_session()
56
-
57
- if not os.path.exists(COCO_MODEL_PATH):
58
- utils.download_trained_weights(COCO_MODEL_PATH)
59
-
60
- class InferenceConfig(coco.CocoConfig):
61
- GPU_COUNT = 1
62
- IMAGES_PER_GPU = 1
63
- config = InferenceConfig()
64
 
65
- model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
66
- model.load_weights(COCO_MODEL_PATH, by_name=True)
67
- r = requests.get(path, stream=True)
68
- img = Image.open(io.BytesIO(r.content)).convert('RGB')
69
- open_cv_image = np.array(img)
70
- image = open_cv_image
71
 
72
- results = model.detect([image], verbose=1)
 
 
73
 
74
- # Get results and save them
75
- r = results[0]
76
- output_image = visualize.display_instances_and_save(image,
77
- r['rois'], r['masks'], r['class_ids'], class_names, r['scores'])
78
 
 
 
 
 
79
 
80
- image = Image.fromarray(output_image)
 
81
  im_file = io.BytesIO()
82
- image.save(im_file, format="JPEG")
83
- im_bytes = im_file.getvalue() # im_bytes: image in binary for
84
  # Return the generated text in a JSON response
85
  return {"output": im_bytes}
86
 
 
3
  ## import necessary packages
4
 
5
  import os
 
 
 
 
 
 
 
 
6
  from typing import Any, Union,Dict, List
7
  import numpy as np
8
+ import io
9
+ import base64
10
  import requests
11
+ import tensorflow as tf
12
+ import tensorflow_hub as hub
13
  from PIL import Image
14
+ from helper import *
 
 
 
 
 
 
 
15
  from fastapi import FastAPI
16
 
17
  # Create a new FastAPI app instance
18
  app = FastAPI()
19
 
20
+ hub_handle = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2'
21
+ hub_module = hub.load(hub_handle)
 
 
 
 
 
 
 
22
 
23
 
24
 
 
27
  # string parameter called text. The function generates text based on the # input using the pipeline() object, and returns a JSON response
28
  # containing the generated text under the key "output"
29
  @app.get("/generate")
30
+ def generate(features: Dict[str, str]):
31
  """
32
  Using the text summarization pipeline from `transformers`, summerize text
33
  from the given input text. The model used is `philschmid/bart-large-cnn-samsum`, which
34
  can be found [here](<https://huggingface.co/philschmid/bart-large-cnn-samsum>).
35
  """
36
  # Use the pipeline to generate text from the given input text
 
 
 
 
 
 
 
 
 
37
 
38
+ content_image_url = features['url']
39
+ if 'style_url' not in features:
40
+ style_image_url = 'https://upload.wikimedia.org/wikipedia/commons/c/c5/Edvard_Munch%2C_1893%2C_The_Scream%2C_oil%2C_tempera_and_pastel_on_cardboard%2C_91_x_73_cm%2C_National_Gallery_of_Norway.jpg'
41
+ else:
42
+ style_image_url = features['style_url']
 
43
 
44
+ # Load images
45
+ content_img_size = (500, 500)
46
+ style_img_size = (300, 300)
47
 
48
+ style_image = load_image(style_image_url, style_img_size)
49
+ content_image = load_image(content_image_url, content_img_size)
50
+ style_image = tf.nn.avg_pool(
51
+ style_image, ksize=[3, 3], strides=[1, 1], padding='SAME')
52
 
53
+ # Stylize content image with given style image.
54
+ outputs = hub_module(tf.constant(content_image),
55
+ tf.constant(style_image))
56
+ stylized_image = outputs[0]
57
 
58
+ # get PIL image and convert to base64
59
+ img = Image.fromarray(np.uint8(stylized_image.numpy()[0] * 255))
60
  im_file = io.BytesIO()
61
+ img.save(im_file, format="PNG")
62
+ im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8")
63
  # Return the generated text in a JSON response
64
  return {"output": im_bytes}
65