text
stringlengths 2
11.8k
|
---|
Next, prepare an instance of a CocoDetection class that can be used with cocoevaluator.
import torchvision
class CocoDetection(torchvision.datasets.CocoDetection):
def init(self, img_folder, image_processor, ann_file):
super().init(img_folder, ann_file)
self.image_processor = image_processor |
def getitem(self, idx):
# read in PIL image and target in COCO format
img, target = super(CocoDetection, self).getitem(idx)
# preprocess image and target: converting target to DETR format,
# resizing + normalization of both image and target)
image_id = self.ids[idx]
target = {"image_id": image_id, "annotations": target}
encoding = self.image_processor(images=img, annotations=target, return_tensors="pt")
pixel_values = encoding["pixel_values"].squeeze() # remove batch dimension
target = encoding["labels"][0] # remove batch dimension
return {"pixel_values": pixel_values, "labels": target} |
im_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5")
path_output_cppe5, path_anno = save_cppe5_annotation_file_images(cppe5["test"])
test_ds_coco_format = CocoDetection(path_output_cppe5, im_processor, path_anno)
Finally, load the metrics and run the evaluation. |
import evaluate
from tqdm import tqdm
model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5")
module = evaluate.load("ybelkada/cocoevaluate", coco=test_ds_coco_format.coco)
val_dataloader = torch.utils.data.DataLoader(
test_ds_coco_format, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn
)
with torch.no_grad():
for idx, batch in enumerate(tqdm(val_dataloader)):
pixel_values = batch["pixel_values"]
pixel_mask = batch["pixel_mask"] |
labels = [
{k: v for k, v in t.items()} for t in batch["labels"]
] # these are in DETR format, resized + normalized
# forward pass
outputs = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
orig_target_sizes = torch.stack([target["orig_size"] for target in labels], dim=0)
results = im_processor.post_process(outputs, orig_target_sizes) # convert outputs of model to Pascal VOC format (xmin, ymin, xmax, ymax)
module.add(prediction=results, reference=labels)
del batch |
results = module.compute()
print(results)
Accumulating evaluation results
DONE (t=0.08s).
IoU metric: bbox
Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.352
Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.681
Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.292
Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.168
Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.208
Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.429
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.274
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.484
Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501
Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.191
Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.323
Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.590
``
These results can be further improved by adjusting the hyperparameters in [~transformers.TrainingArguments`]. Give it a go! |
Inference
Now that you have finetuned a DETR model, evaluated it, and uploaded it to the Hugging Face Hub, you can use it for inference.
The simplest way to try out your finetuned model for inference is to use it in a [Pipeline]. Instantiate a pipeline
for object detection with your model, and pass an image to it: |
from transformers import pipeline
import requests
url = "https://i.imgur.com/2lnWoly.jpg"
image = Image.open(requests.get(url, stream=True).raw)
obj_detector = pipeline("object-detection", model="devonho/detr-resnet-50_finetuned_cppe5")
obj_detector(image)
You can also manually replicate the results of the pipeline if you'd like: |
image_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5")
model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5")
with torch.no_grad():
inputs = image_processor(images=image, return_tensors="pt")
outputs = model(**inputs)
target_sizes = torch.tensor([image.size[::-1]])
results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0]
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
box = [round(i, 2) for i in box.tolist()]
print(
f"Detected {model.config.id2label[label.item()]} with confidence "
f"{round(score.item(), 3)} at location {box}"
)
Detected Coverall with confidence 0.566 at location [1215.32, 147.38, 4401.81, 3227.08]
Detected Mask with confidence 0.584 at location [2449.06, 823.19, 3256.43, 1413.9] |
Let's plot the result:
draw = ImageDraw.Draw(image)
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
box = [round(i, 2) for i in box.tolist()]
x, y, x2, y2 = tuple(box)
draw.rectangle((x, y, x2, y2), outline="red", width=1)
draw.text((x, y), model.config.id2label[label.item()], fill="white")
image |
Before you begin, make sure you have all the necessary libraries installed:
pip install transformers datasets evaluate seqeval
We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login:
from huggingface_hub import notebook_login
notebook_login()
Load WNUT 17 dataset
Start by loading the WNUT 17 dataset from the 🤗 Datasets library:
from datasets import load_dataset
wnut = load_dataset("wnut_17") |
Load WNUT 17 dataset
Start by loading the WNUT 17 dataset from the 🤗 Datasets library:
from datasets import load_dataset
wnut = load_dataset("wnut_17")
Then take a look at an example: |
from datasets import load_dataset
wnut = load_dataset("wnut_17")
Then take a look at an example:
wnut["train"][0]
{'id': '0',
'ner_tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
'tokens': ['@paulwalk', 'It', "'s", 'the', 'view', 'from', 'where', 'I', "'m", 'living', 'for', 'two', 'weeks', '.', 'Empire', 'State', 'Building', '=', 'ESB', '.', 'Pretty', 'bad', 'storm', 'here', 'last', 'evening', '.']
} |
Each number in ner_tags represents an entity. Convert the numbers to their label names to find out what the entities are:
label_list = wnut["train"].features[f"ner_tags"].feature.names
label_list
[
"O",
"B-corporation",
"I-corporation",
"B-creative-work",
"I-creative-work",
"B-group",
"I-group",
"B-location",
"I-location",
"B-person",
"I-person",
"B-product",
"I-product",
] |
The letter that prefixes each ner_tag indicates the token position of the entity:
B- indicates the beginning of an entity.
I- indicates a token is contained inside the same entity (for example, the State token is a part of an entity like
Empire State Building).
0 indicates the token doesn't correspond to any entity.
Preprocess
The next step is to load a DistilBERT tokenizer to preprocess the tokens field: |
Preprocess
The next step is to load a DistilBERT tokenizer to preprocess the tokens field:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
As you saw in the example tokens field above, it looks like the input has already been tokenized. But the input actually hasn't been tokenized yet and you'll need to set is_split_into_words=True to tokenize the words into subwords. For example: |
example = wnut["train"][0]
tokenized_input = tokenizer(example["tokens"], is_split_into_words=True)
tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"])
tokens
['[CLS]', '@', 'paul', '##walk', 'it', "'", 's', 'the', 'view', 'from', 'where', 'i', "'", 'm', 'living', 'for', 'two', 'weeks', '.', 'empire', 'state', 'building', '=', 'es', '##b', '.', 'pretty', 'bad', 'storm', 'here', 'last', 'evening', '.', '[SEP]'] |
However, this adds some special tokens [CLS] and [SEP] and the subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may now be split into two subwords. You'll need to realign the tokens and labels by: |
Mapping all tokens to their corresponding word with the word_ids method.
Assigning the label -100 to the special tokens [CLS] and [SEP] so they're ignored by the PyTorch loss function (see CrossEntropyLoss).
Only labeling the first token of a given word. Assign -100 to other subtokens from the same word.
Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT's maximum input length: |
Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT's maximum input length:
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) |
labels = []
for i, label in enumerate(examples[f"ner_tags"]):
word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word.
previous_word_idx = None
label_ids = []
for word_idx in word_ids: # Set the special tokens to -100.
if word_idx is None:
label_ids.append(-100)
elif word_idx != previous_word_idx: # Only label the first token of a given word.
label_ids.append(label[word_idx])
else:
label_ids.append(-100)
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs |
To apply the preprocessing function over the entire dataset, use 🤗 Datasets [~datasets.Dataset.map] function. You can speed up the map function by setting batched=True to process multiple elements of the dataset at once:
tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True) |
tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)
Now create a batch of examples using [DataCollatorWithPadding]. It's more efficient to dynamically pad the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length. |
from transformers import DataCollatorForTokenClassification
data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
</pt>
<tf>py
from transformers import DataCollatorForTokenClassification
data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") |
Evaluate
Including a metric during training is often helpful for evaluating your model's performance. You can quickly load a evaluation method with the 🤗 Evaluate library. For this task, load the seqeval framework (see the 🤗 Evaluate quick tour to learn more about how to load and compute a metric). Seqeval actually produces several scores: precision, recall, F1, and accuracy.
import evaluate
seqeval = evaluate.load("seqeval") |
import evaluate
seqeval = evaluate.load("seqeval")
Get the NER labels first, and then create a function that passes your true predictions and true labels to [~evaluate.EvaluationModule.compute] to calculate the scores:
import numpy as np
labels = [label_list[i] for i in example[f"ner_tags"]]
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2) |
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = seqeval.compute(predictions=true_predictions, references=true_labels)
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
} |
Your compute_metrics function is ready to go now, and you'll return to it when you setup your training.
Train
Before you start training your model, create a map of the expected ids to their labels with id2label and label2id: |
id2label = {
0: "O",
1: "B-corporation",
2: "I-corporation",
3: "B-creative-work",
4: "I-creative-work",
5: "B-group",
6: "I-group",
7: "B-location",
8: "I-location",
9: "B-person",
10: "I-person",
11: "B-product",
12: "I-product",
}
label2id = {
"O": 0,
"B-corporation": 1,
"I-corporation": 2,
"B-creative-work": 3,
"I-creative-work": 4,
"B-group": 5,
"I-group": 6,
"B-location": 7,
"I-location": 8,
"B-person": 9,
"I-person": 10,
"B-product": 11,
"I-product": 12,
} |
If you aren't familiar with finetuning a model with the [Trainer], take a look at the basic tutorial here!
You're ready to start training your model now! Load DistilBERT with [AutoModelForTokenClassification] along with the number of expected labels, and the label mappings: |
You're ready to start training your model now! Load DistilBERT with [AutoModelForTokenClassification] along with the number of expected labels, and the label mappings:
from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
model = AutoModelForTokenClassification.from_pretrained(
"distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id
)
At this point, only three steps remain: |
Define your training hyperparameters in [TrainingArguments]. The only required parameter is output_dir which specifies where to save your model. You'll push this model to the Hub by setting push_to_hub=True (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [Trainer] will evaluate the seqeval scores and save the training checkpoint.
Pass the training arguments to [Trainer] along with the model, dataset, tokenizer, data collator, and compute_metrics function.
Call [~Trainer.train] to finetune your model. |
training_args = TrainingArguments(
output_dir="my_awesome_wnut_model",
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=2,
weight_decay=0.01,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_wnut["train"],
eval_dataset=tokenized_wnut["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
trainer.train() |
Once training is completed, share your model to the Hub with the [~transformers.Trainer.push_to_hub] method so everyone can use your model:
trainer.push_to_hub()
If you aren't familiar with finetuning a model with Keras, take a look at the basic tutorial here!
To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters: |
To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters:
from transformers import create_optimizer
batch_size = 16
num_train_epochs = 3
num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs
optimizer, lr_schedule = create_optimizer(
init_lr=2e-5,
num_train_steps=num_train_steps,
weight_decay_rate=0.01,
num_warmup_steps=0,
) |
Then you can load DistilBERT with [TFAutoModelForTokenClassification] along with the number of expected labels, and the label mappings:
from transformers import TFAutoModelForTokenClassification
model = TFAutoModelForTokenClassification.from_pretrained(
"distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id
)
Convert your datasets to the tf.data.Dataset format with [~transformers.TFPreTrainedModel.prepare_tf_dataset]: |
Convert your datasets to the tf.data.Dataset format with [~transformers.TFPreTrainedModel.prepare_tf_dataset]:
tf_train_set = model.prepare_tf_dataset(
tokenized_wnut["train"],
shuffle=True,
batch_size=16,
collate_fn=data_collator,
)
tf_validation_set = model.prepare_tf_dataset(
tokenized_wnut["validation"],
shuffle=False,
batch_size=16,
collate_fn=data_collator,
) |
Configure the model for training with compile. Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to:
import tensorflow as tf
model.compile(optimizer=optimizer) # No loss argument! |
import tensorflow as tf
model.compile(optimizer=optimizer) # No loss argument!
The last two things to setup before you start training is to compute the seqeval scores from the predictions, and provide a way to push your model to the Hub. Both are done by using Keras callbacks.
Pass your compute_metrics function to [~transformers.KerasMetricCallback]: |
from transformers.keras_callbacks import KerasMetricCallback
metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
Specify where to push your model and tokenizer in the [~transformers.PushToHubCallback]:
from transformers.keras_callbacks import PushToHubCallback
push_to_hub_callback = PushToHubCallback(
output_dir="my_awesome_wnut_model",
tokenizer=tokenizer,
)
Then bundle your callbacks together:
callbacks = [metric_callback, push_to_hub_callback] |
Then bundle your callbacks together:
callbacks = [metric_callback, push_to_hub_callback]
Finally, you're ready to start training your model! Call fit with your training and validation datasets, the number of epochs, and your callbacks to finetune the model:
model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
Once training is completed, your model is automatically uploaded to the Hub so everyone can use it! |
model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
Once training is completed, your model is automatically uploaded to the Hub so everyone can use it!
For a more in-depth example of how to finetune a model for token classification, take a look at the corresponding
PyTorch notebook
or TensorFlow notebook.
Inference
Great, now that you've finetuned a model, you can use it for inference!
Grab some text you'd like to run inference on: |
Inference
Great, now that you've finetuned a model, you can use it for inference!
Grab some text you'd like to run inference on:
text = "The Golden State Warriors are an American professional basketball team based in San Francisco."
The simplest way to try out your finetuned model for inference is to use it in a [pipeline]. Instantiate a pipeline for NER with your model, and pass your text to it: |
from transformers import pipeline
classifier = pipeline("ner", model="stevhliu/my_awesome_wnut_model")
classifier(text)
[{'entity': 'B-location',
'score': 0.42658573,
'index': 2,
'word': 'golden',
'start': 4,
'end': 10},
{'entity': 'I-location',
'score': 0.35856336,
'index': 3,
'word': 'state',
'start': 11,
'end': 16},
{'entity': 'B-group',
'score': 0.3064001,
'index': 4,
'word': 'warriors',
'start': 17,
'end': 25},
{'entity': 'B-location',
'score': 0.65523505,
'index': 13,
'word': 'san',
'start': 80,
'end': 83},
{'entity': 'B-location',
'score': 0.4668663,
'index': 14,
'word': 'francisco',
'start': 84,
'end': 93}] |
You can also manually replicate the results of the pipeline if you'd like:
Tokenize the text and return PyTorch tensors:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model")
inputs = tokenizer(text, return_tensors="pt")
Pass your inputs to the model and return the logits: |
Pass your inputs to the model and return the logits:
from transformers import AutoModelForTokenClassification
model = AutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model")
with torch.no_grad():
logits = model(**inputs).logits
Get the class with the highest probability, and use the model's id2label mapping to convert it to a text label: |
Get the class with the highest probability, and use the model's id2label mapping to convert it to a text label:
predictions = torch.argmax(logits, dim=2)
predicted_token_class = [model.config.id2label[t.item()] for t in predictions[0]]
predicted_token_class
['O',
'O',
'B-location',
'I-location',
'B-group',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'B-location',
'B-location',
'O',
'O']
Tokenize the text and return TensorFlow tensors: |
Tokenize the text and return TensorFlow tensors:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model")
inputs = tokenizer(text, return_tensors="tf")
Pass your inputs to the model and return the logits:
from transformers import TFAutoModelForTokenClassification
model = TFAutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model")
logits = model(**inputs).logits |
from transformers import TFAutoModelForTokenClassification
model = TFAutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model")
logits = model(**inputs).logits
Get the class with the highest probability, and use the model's id2label mapping to convert it to a text label: |
Get the class with the highest probability, and use the model's id2label mapping to convert it to a text label:
predicted_token_class_ids = tf.math.argmax(logits, axis=-1)
predicted_token_class = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()]
predicted_token_class
['O',
'O',
'B-location',
'I-location',
'B-group',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'B-location',
'B-location',
'O',
'O'] |
Before you begin, make sure you have all the necessary libraries installed:
pip install transformers datasets evaluate accelerate
We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login:
from huggingface_hub import notebook_login
notebook_login()
Load IMDb dataset
Start by loading the IMDb dataset from the 🤗 Datasets library:
from datasets import load_dataset
imdb = load_dataset("imdb") |
Load IMDb dataset
Start by loading the IMDb dataset from the 🤗 Datasets library:
from datasets import load_dataset
imdb = load_dataset("imdb")
Then take a look at an example: |
imdb["test"][0]
{
"label": 0,
"text": "I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn't match the background, and painfully one-dimensional characters cannot be overcome with a 'sci-fi' setting. (I'm sure there are those of you out there who think Babylon 5 is good sci-fi TV. It's not. It's clichéd and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It's really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it's rubbish as they have to always say \"Gene Roddenberry's Earth\" otherwise people would not continue watching. Roddenberry's ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.",
} |
There are two fields in this dataset:
text: the movie review text.
label: a value that is either 0 for a negative review or 1 for a positive review.
Preprocess
The next step is to load a DistilBERT tokenizer to preprocess the text field:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
Create a preprocessing function to tokenize text and truncate sequences to be no longer than DistilBERT's maximum input length: |
Create a preprocessing function to tokenize text and truncate sequences to be no longer than DistilBERT's maximum input length:
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True) |
To apply the preprocessing function over the entire dataset, use 🤗 Datasets [~datasets.Dataset.map] function. You can speed up map by setting batched=True to process multiple elements of the dataset at once:
py
tokenized_imdb = imdb.map(preprocess_function, batched=True)
Now create a batch of examples using [DataCollatorWithPadding]. It's more efficient to dynamically pad the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length. |
from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
</pt>
<tf>py
from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf") |
Evaluate
Including a metric during training is often helpful for evaluating your model's performance. You can quickly load a evaluation method with the 🤗 Evaluate library. For this task, load the accuracy metric (see the 🤗 Evaluate quick tour to learn more about how to load and compute a metric):
import evaluate
accuracy = evaluate.load("accuracy")
Then create a function that passes your predictions and labels to [~evaluate.EvaluationModule.compute] to calculate the accuracy: |
import evaluate
accuracy = evaluate.load("accuracy")
Then create a function that passes your predictions and labels to [~evaluate.EvaluationModule.compute] to calculate the accuracy:
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return accuracy.compute(predictions=predictions, references=labels) |
Your compute_metrics function is ready to go now, and you'll return to it when you setup your training.
Train
Before you start training your model, create a map of the expected ids to their labels with id2label and label2id:
id2label = {0: "NEGATIVE", 1: "POSITIVE"}
label2id = {"NEGATIVE": 0, "POSITIVE": 1}
If you aren't familiar with finetuning a model with the [Trainer], take a look at the basic tutorial here! |
id2label = {0: "NEGATIVE", 1: "POSITIVE"}
label2id = {"NEGATIVE": 0, "POSITIVE": 1}
If you aren't familiar with finetuning a model with the [Trainer], take a look at the basic tutorial here!
You're ready to start training your model now! Load DistilBERT with [AutoModelForSequenceClassification] along with the number of expected labels, and the label mappings: |
You're ready to start training your model now! Load DistilBERT with [AutoModelForSequenceClassification] along with the number of expected labels, and the label mappings:
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
model = AutoModelForSequenceClassification.from_pretrained(
"distilbert/distilbert-base-uncased", num_labels=2, id2label=id2label, label2id=label2id
)
At this point, only three steps remain: |
Define your training hyperparameters in [TrainingArguments]. The only required parameter is output_dir which specifies where to save your model. You'll push this model to the Hub by setting push_to_hub=True (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [Trainer] will evaluate the accuracy and save the training checkpoint.
Pass the training arguments to [Trainer] along with the model, dataset, tokenizer, data collator, and compute_metrics function.
Call [~Trainer.train] to finetune your model. |
training_args = TrainingArguments(
output_dir="my_awesome_model",
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=2,
weight_decay=0.01,
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_imdb["train"],
eval_dataset=tokenized_imdb["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
trainer.train() |
[Trainer] applies dynamic padding by default when you pass tokenizer to it. In this case, you don't need to specify a data collator explicitly.
Once training is completed, share your model to the Hub with the [~transformers.Trainer.push_to_hub] method so everyone can use your model:
trainer.push_to_hub()
If you aren't familiar with finetuning a model with Keras, take a look at the basic tutorial here! |
trainer.push_to_hub()
If you aren't familiar with finetuning a model with Keras, take a look at the basic tutorial here!
To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters: |
To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters:
from transformers import create_optimizer
import tensorflow as tf
batch_size = 16
num_epochs = 5
batches_per_epoch = len(tokenized_imdb["train"]) // batch_size
total_train_steps = int(batches_per_epoch * num_epochs)
optimizer, schedule = create_optimizer(init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps) |
Then you can load DistilBERT with [TFAutoModelForSequenceClassification] along with the number of expected labels, and the label mappings:
from transformers import TFAutoModelForSequenceClassification
model = TFAutoModelForSequenceClassification.from_pretrained(
"distilbert/distilbert-base-uncased", num_labels=2, id2label=id2label, label2id=label2id
)
Convert your datasets to the tf.data.Dataset format with [~transformers.TFPreTrainedModel.prepare_tf_dataset]: |
Convert your datasets to the tf.data.Dataset format with [~transformers.TFPreTrainedModel.prepare_tf_dataset]:
tf_train_set = model.prepare_tf_dataset(
tokenized_imdb["train"],
shuffle=True,
batch_size=16,
collate_fn=data_collator,
)
tf_validation_set = model.prepare_tf_dataset(
tokenized_imdb["test"],
shuffle=False,
batch_size=16,
collate_fn=data_collator,
) |
Configure the model for training with compile. Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to:
import tensorflow as tf
model.compile(optimizer=optimizer) # No loss argument! |
import tensorflow as tf
model.compile(optimizer=optimizer) # No loss argument!
The last two things to setup before you start training is to compute the accuracy from the predictions, and provide a way to push your model to the Hub. Both are done by using Keras callbacks.
Pass your compute_metrics function to [~transformers.KerasMetricCallback]:
from transformers.keras_callbacks import KerasMetricCallback
metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) |
from transformers.keras_callbacks import KerasMetricCallback
metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
Specify where to push your model and tokenizer in the [~transformers.PushToHubCallback]:
from transformers.keras_callbacks import PushToHubCallback
push_to_hub_callback = PushToHubCallback(
output_dir="my_awesome_model",
tokenizer=tokenizer,
)
Then bundle your callbacks together:
callbacks = [metric_callback, push_to_hub_callback] |
Then bundle your callbacks together:
callbacks = [metric_callback, push_to_hub_callback]
Finally, you're ready to start training your model! Call fit with your training and validation datasets, the number of epochs, and your callbacks to finetune the model:
model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
Once training is completed, your model is automatically uploaded to the Hub so everyone can use it! |
model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
Once training is completed, your model is automatically uploaded to the Hub so everyone can use it!
For a more in-depth example of how to finetune a model for text classification, take a look at the corresponding
PyTorch notebook
or TensorFlow notebook.
Inference
Great, now that you've finetuned a model, you can use it for inference!
Grab some text you'd like to run inference on: |
Inference
Great, now that you've finetuned a model, you can use it for inference!
Grab some text you'd like to run inference on:
text = "This was a masterpiece. Not completely faithful to the books, but enthralling from beginning to end. Might be my favorite of the three."
The simplest way to try out your finetuned model for inference is to use it in a [pipeline]. Instantiate a pipeline for sentiment analysis with your model, and pass your text to it: |
The simplest way to try out your finetuned model for inference is to use it in a [pipeline]. Instantiate a pipeline for sentiment analysis with your model, and pass your text to it:
from transformers import pipeline
classifier = pipeline("sentiment-analysis", model="stevhliu/my_awesome_model")
classifier(text)
[{'label': 'POSITIVE', 'score': 0.9994940757751465}]
You can also manually replicate the results of the pipeline if you'd like:
Tokenize the text and return PyTorch tensors: |
You can also manually replicate the results of the pipeline if you'd like:
Tokenize the text and return PyTorch tensors:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_model")
inputs = tokenizer(text, return_tensors="pt")
Pass your inputs to the model and return the logits: |
Pass your inputs to the model and return the logits:
from transformers import AutoModelForSequenceClassification
model = AutoModelForSequenceClassification.from_pretrained("stevhliu/my_awesome_model")
with torch.no_grad():
logits = model(**inputs).logits
Get the class with the highest probability, and use the model's id2label mapping to convert it to a text label:
predicted_class_id = logits.argmax().item()
model.config.id2label[predicted_class_id]
'POSITIVE' |
predicted_class_id = logits.argmax().item()
model.config.id2label[predicted_class_id]
'POSITIVE'
Tokenize the text and return TensorFlow tensors:
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_model")
inputs = tokenizer(text, return_tensors="tf")
Pass your inputs to the model and return the logits: |
Pass your inputs to the model and return the logits:
from transformers import TFAutoModelForSequenceClassification
model = TFAutoModelForSequenceClassification.from_pretrained("stevhliu/my_awesome_model")
logits = model(**inputs).logits
Get the class with the highest probability, and use the model's id2label mapping to convert it to a text label:
predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0])
model.config.id2label[predicted_class_id]
'POSITIVE' |
Knowledge Distillation for Computer Vision
[[open-in-colab]]
Knowledge distillation is a technique used to transfer knowledge from a larger, more complex model (teacher) to a smaller, simpler model (student). To distill knowledge from one model to another, we take a pre-trained teacher model trained on a certain task (image classification for this case) and randomly initialize a student model to be trained on image classification. Next, we train the student model to minimize the difference between it's outputs and the teacher's outputs, thus making it mimic the behavior. It was first introduced in Distilling the Knowledge in a Neural Network by Hinton et al. In this guide, we will do task-specific knowledge distillation. We will use the beans dataset for this.
This guide demonstrates how you can distill a fine-tuned ViT model (teacher model) to a MobileNet (student model) using the Trainer API of 🤗 Transformers.
Let's install the libraries needed for distillation and evaluating the process. |
pip install transformers datasets accelerate tensorboard evaluate --upgrade
In this example, we are using the merve/beans-vit-224 model as teacher model. It's an image classification model, based on google/vit-base-patch16-224-in21k fine-tuned on beans dataset. We will distill this model to a randomly initialized MobileNetV2.
We will now load the dataset.
thon
from datasets import load_dataset
dataset = load_dataset("beans") |
We can use an image processor from either of the models, as in this case they return the same output with same resolution. We will use the map() method of dataset to apply the preprocessing to every split of the dataset.
thon
from transformers import AutoImageProcessor
teacher_processor = AutoImageProcessor.from_pretrained("merve/beans-vit-224")
def process(examples):
processed_inputs = teacher_processor(examples["image"])
return processed_inputs
processed_datasets = dataset.map(process, batched=True) |
Essentially, we want the student model (a randomly initialized MobileNet) to mimic the teacher model (fine-tuned vision transformer). To achieve this, we first get the logits output from the teacher and the student. Then, we divide each of them by the parameter temperature which controls the importance of each soft target. A parameter called lambda weighs the importance of the distillation loss. In this example, we will use temperature=5 and lambda=0.5. We will use the Kullback-Leibler Divergence loss to compute the divergence between the student and teacher. Given two data P and Q, KL Divergence explains how much extra information we need to represent P using Q. If two are identical, their KL divergence is zero, as there's no other information needed to explain P from Q. Thus, in the context of knowledge distillation, KL divergence is useful.
thon
from transformers import TrainingArguments, Trainer
import torch
import torch.nn as nn
import torch.nn.functional as F
class ImageDistilTrainer(Trainer):
def init(self, teacher_model=None, student_model=None, temperature=None, lambda_param=None, args, kwargs):
super().init(model=student_model, args, **kwargs)
self.teacher = teacher_model
self.student = student_model
self.loss_function = nn.KLDivLoss(reduction="batchmean")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.teacher.to(device)
self.teacher.eval()
self.temperature = temperature
self.lambda_param = lambda_param
def compute_loss(self, student, inputs, return_outputs=False):
student_output = self.student(**inputs) |
with torch.no_grad():
teacher_output = self.teacher(**inputs)
# Compute soft targets for teacher and student
soft_teacher = F.softmax(teacher_output.logits / self.temperature, dim=-1)
soft_student = F.log_softmax(student_output.logits / self.temperature, dim=-1)
# Compute the loss
distillation_loss = self.loss_function(soft_student, soft_teacher) * (self.temperature ** 2)
# Compute the true label loss
student_target_loss = student_output.loss |
# Compute the true label loss
student_target_loss = student_output.loss
# Calculate final loss
loss = (1. - self.lambda_param) * student_target_loss + self.lambda_param * distillation_loss
return (loss, student_output) if return_outputs else loss
We will now login to Hugging Face Hub so we can push our model to the Hugging Face Hub through the Trainer.
thon
from huggingface_hub import notebook_login
notebook_login() |
Let's set the TrainingArguments, the teacher model and the student model.
thon
from transformers import AutoModelForImageClassification, MobileNetV2Config, MobileNetV2ForImageClassification
training_args = TrainingArguments(
output_dir="my-awesome-model",
num_train_epochs=30,
fp16=True,
logging_dir=f"{repo_name}/logs",
logging_strategy="epoch",
evaluation_strategy="epoch",
save_strategy="epoch",
load_best_model_at_end=True,
metric_for_best_model="accuracy",
report_to="tensorboard",
push_to_hub=True,
hub_strategy="every_save",
hub_model_id=repo_name,
)
num_labels = len(processed_datasets["train"].features["labels"].names)
initialize models
teacher_model = AutoModelForImageClassification.from_pretrained(
"merve/beans-vit-224",
num_labels=num_labels,
ignore_mismatched_sizes=True
)
training MobileNetV2 from scratch
student_config = MobileNetV2Config()
student_config.num_labels = num_labels
student_model = MobileNetV2ForImageClassification(student_config) |
We can use compute_metrics function to evaluate our model on the test set. This function will be used during the training process to compute the accuracy & f1 of our model.
thon
import evaluate
import numpy as np
accuracy = evaluate.load("accuracy")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
acc = accuracy.compute(references=labels, predictions=np.argmax(predictions, axis=1))
return {"accuracy": acc["accuracy"]} |
Let's initialize the Trainer with the training arguments we defined. We will also initialize our data collator.
thon
from transformers import DefaultDataCollator
data_collator = DefaultDataCollator()
trainer = ImageDistilTrainer(
student_model=student_model,
teacher_model=teacher_model,
training_args=training_args,
train_dataset=processed_datasets["train"],
eval_dataset=processed_datasets["validation"],
data_collator=data_collator,
tokenizer=teacher_processor,
compute_metrics=compute_metrics,
temperature=5,
lambda_param=0.5
) |
We can now train our model.
python
trainer.train()
We can evaluate the model on the test set.
python
trainer.evaluate(processed_datasets["test"])
On test set, our model reaches 72 percent accuracy. To have a sanity check over efficiency of distillation, we also trained MobileNet on the beans dataset from scratch with the same hyperparameters and observed 63 percent accuracy on the test set. We invite the readers to try different pre-trained teacher models, student architectures, distillation parameters and report their findings. The training logs and checkpoints for distilled model can be found in this repository, and MobileNetV2 trained from scratch can be found in this repository. |
Zero-shot image classification
[[open-in-colab]]
Zero-shot image classification is a task that involves classifying images into different categories using a model that was
not explicitly trained on data containing labeled examples from those specific categories.
Traditionally, image classification requires training a model on a specific set of labeled images, and this model learns to
"map" certain image features to labels. When there's a need to use such model for a classification task that introduces a
new set of labels, fine-tuning is required to "recalibrate" the model.
In contrast, zero-shot or open vocabulary image classification models are typically multi-modal models that have been trained on a large
dataset of images and associated descriptions. These models learn aligned vision-language representations that can be used for many downstream tasks including zero-shot image classification.
This is a more flexible approach to image classification that allows models to generalize to new and unseen categories
without the need for additional training data and enables users to query images with free-form text descriptions of their target objects .
In this guide you'll learn how to: |
create a zero-shot image classification pipeline
run zero-shot image classification inference by hand
Before you begin, make sure you have all the necessary libraries installed:
pip install -q transformers
Zero-shot image classification pipeline
The simplest way to try out inference with a model supporting zero-shot image classification is to use the corresponding [pipeline].
Instantiate a pipeline from a checkpoint on the Hugging Face Hub:
thon |
from transformers import pipeline
checkpoint = "openai/clip-vit-large-patch14"
detector = pipeline(model=checkpoint, task="zero-shot-image-classification")
Next, choose an image you'd like to classify.
from PIL import Image
import requests
url = "https://unsplash.com/photos/g8oS8-82DxI/download?ixid=MnwxMjA3fDB8MXx0b3BpY3x8SnBnNktpZGwtSGt8fHx8fDJ8fDE2NzgxMDYwODc&force=true&w=640"
image = Image.open(requests.get(url, stream=True).raw)
image |
Pass the image and the candidate object labels to the pipeline. Here we pass the image directly; other suitable options
include a local path to an image or an image url.
The candidate labels can be simple words like in this example, or more descriptive. |
predictions = detector(image, candidate_labels=["fox", "bear", "seagull", "owl"])
predictions
[{'score': 0.9996670484542847, 'label': 'owl'},
{'score': 0.000199399160919711, 'label': 'seagull'},
{'score': 7.392891711788252e-05, 'label': 'fox'},
{'score': 5.96074532950297e-05, 'label': 'bear'}] |
Zero-shot image classification by hand
Now that you've seen how to use the zero-shot image classification pipeline, let's take a look how you can run zero-shot
image classification manually.
Start by loading the model and associated processor from a checkpoint on the Hugging Face Hub.
Here we'll use the same checkpoint as before: |
from transformers import AutoProcessor, AutoModelForZeroShotImageClassification
model = AutoModelForZeroShotImageClassification.from_pretrained(checkpoint)
processor = AutoProcessor.from_pretrained(checkpoint)
Let's take a different image to switch things up.
from PIL import Image
import requests
url = "https://unsplash.com/photos/xBRQfR2bqNI/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjc4Mzg4ODEx&force=true&w=640"
image = Image.open(requests.get(url, stream=True).raw)
image |
Use the processor to prepare the inputs for the model. The processor combines an image processor that prepares the
image for the model by resizing and normalizing it, and a tokenizer that takes care of the text inputs.
candidate_labels = ["tree", "car", "bike", "cat"]
inputs = processor(images=image, text=candidate_labels, return_tensors="pt", padding=True)
Pass the inputs through the model, and post-process the results: |
import torch
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits_per_image[0]
probs = logits.softmax(dim=-1).numpy()
scores = probs.tolist()
result = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(probs, candidate_labels), key=lambda x: -x[0])
]
result
[{'score': 0.998572, 'label': 'car'},
{'score': 0.0010570387, 'label': 'bike'},
{'score': 0.0003393686, 'label': 'tree'},
{'score': 3.1572064e-05, 'label': 'cat'}] |
Visual Question Answering
[[open-in-colab]]
Visual Question Answering (VQA) is the task of answering open-ended questions based on an image.
The input to models supporting this task is typically a combination of an image and a question, and the output is an
answer expressed in natural language.
Some noteworthy use case examples for VQA include:
* Accessibility applications for visually impaired individuals.
* Education: posing questions about visual materials presented in lectures or textbooks. VQA can also be utilized in interactive museum exhibits or historical sites.
* Customer service and e-commerce: VQA can enhance user experience by letting users ask questions about products.
* Image retrieval: VQA models can be used to retrieve images with specific characteristics. For example, the user can ask "Is there a dog?" to find all images with dogs from a set of images.
In this guide you'll learn how to: |
Subsets and Splits