hasibzunair commited on
Commit
ad0d661
·
1 Parent(s): a5f83aa

update intro

Browse files
Files changed (2) hide show
  1. app.py +20 -8
  2. description.html +21 -0
app.py CHANGED
@@ -1,16 +1,20 @@
1
  import os
2
- os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
3
  import tensorflow as tf
4
  import keras.backend.tensorflow_backend as tb
5
- tb._SYMBOLIC_SCOPE.value = True
6
  import numpy as np
7
  import gradio as gr
8
  import cv2
9
  from PIL import Image
10
  from tensorflow.keras.models import load_model
11
 
 
 
 
12
  # Get model weights
13
- os.system("wget https://github.com/hasibzunair/adversarial-lesions/releases/latest/download/MelaNet.h5")
 
 
14
 
15
  # Load model
16
  model = None
@@ -24,7 +28,7 @@ labels = ["Benign", "Malignant"]
24
  # Helpers
25
  def preprocess_image(img_array):
26
  # Normalize to [0,1]
27
- img_array = img_array.astype('float32')
28
  img_array /= 255
29
  # Check that images are 2D arrays
30
  if len(img_array.shape) > 2:
@@ -46,16 +50,24 @@ def inference(img):
46
  labels_probs = {labels[i]: float(preds[i]) for i, _ in enumerate(labels)}
47
  return labels_probs
48
 
49
- title = "Melanoma Detection Demo"
 
50
  description = "This model predicts if the given image has benign or malignant symptoms. To use it, simply upload a skin lesion image, or click one of the examples to load them. Read more at the links below."
51
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2004.06824' target='_blank'>Melanoma Detection using Adversarial Training and Deep Transfer Learning</a> | <a href='https://github.com/hasibzunair/adversarial-lesions' target='_blank'>Github</a></p>"
52
 
53
- gr.Interface(
 
54
  fn=inference,
55
  title=title,
56
- description = description,
57
  article=article,
58
  inputs="image",
59
  outputs="label",
60
  examples=examples,
61
- ).launch(debug=True, enable_queue=True)
 
 
 
 
 
 
 
1
  import os
2
+ import codecs
3
  import tensorflow as tf
4
  import keras.backend.tensorflow_backend as tb
 
5
  import numpy as np
6
  import gradio as gr
7
  import cv2
8
  from PIL import Image
9
  from tensorflow.keras.models import load_model
10
 
11
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
12
+ tb._SYMBOLIC_SCOPE.value = True
13
+
14
  # Get model weights
15
+ os.system(
16
+ "wget https://github.com/hasibzunair/adversarial-lesions/releases/latest/download/MelaNet.h5"
17
+ )
18
 
19
  # Load model
20
  model = None
 
28
  # Helpers
29
  def preprocess_image(img_array):
30
  # Normalize to [0,1]
31
+ img_array = img_array.astype("float32")
32
  img_array /= 255
33
  # Check that images are 2D arrays
34
  if len(img_array.shape) > 2:
 
50
  labels_probs = {labels[i]: float(preds[i]) for i, _ in enumerate(labels)}
51
  return labels_probs
52
 
53
+
54
+ title = "Melanoma Detection using Adversarial Training and Deep Transfer Learning"
55
  description = "This model predicts if the given image has benign or malignant symptoms. To use it, simply upload a skin lesion image, or click one of the examples to load them. Read more at the links below."
56
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2004.06824' target='_blank'>Melanoma Detection using Adversarial Training and Deep Transfer Learning</a> | <a href='https://github.com/hasibzunair/adversarial-lesions' target='_blank'>Github</a></p>"
57
 
58
+
59
+ demo = gr.Interface(
60
  fn=inference,
61
  title=title,
62
+ description=description,
63
  article=article,
64
  inputs="image",
65
  outputs="label",
66
  examples=examples,
67
+ )
68
+
69
+
70
+ demo.launch(
71
+ # debug=True,
72
+ # enable_queue=True
73
+ )
description.html ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <title>Title</title>
6
+ </head>
7
+ <body>
8
+ This is a demo of <a href="https://arxiv.org/abs/2004.06824">Melanoma Detection using Adversarial Training and Deep Transfer Learning</a> (Physics in Medicine and Biology, 2020).</br>
9
+
10
+ We introduce an over-sampling method for learning the inter-class mapping between under-represented
11
+ class samples and over-represented samples in a bid to generate under-represented class samples
12
+ using unpaired image-to-image translation. These synthetic images are then used as additional
13
+ training data in the task of detecting abnormalities in binary classification use-cases.
14
+ Code is publicly available in <a href='https://github.com/hasibzunair/adversarial-lesions' target='_blank'>Github</a>.<br>
15
+
16
+ This method was also effective for COVID-19 detection from chest radiography images which led to
17
+ <a href="https://github.com/hasibzunair/synthetic-covid-cxr-dataset">Synthetic COVID-19 Chest X-ray Dataset for Computer-Aided Diagnosis</a>.
18
+ The synthetic images not only improved performance of various deep learning architectures when used as additional training data
19
+ under heavy imbalance conditions, but also detect the target class (e.g. COVID-19) with high confidence.
20
+ </body>
21
+ </html>