diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..9155161c081d239a7203ddeff07605215f8616c8
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Jie Lei
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4fe2fd9d14a0ad2994f3696ea9b0fbf046cd364
--- /dev/null
+++ b/app.py
@@ -0,0 +1,38 @@
+import gradio as gr
+
+TITLE = """
✍️ Highlight Detection with MomentDETR
"""
+
+def submit_video(input_video, retrieval_text):
+ print(input_video)
+ print(retrieval_text)
+ return input_video
+
+
+with gr.Blocks() as demo:
+ gr.HTML(TITLE)
+ with gr.Row():
+ with gr.Blocks():
+ with gr.Column():
+ gr.Markdown("### Input Video")
+ input_video = gr.PlayableVideo().style(height=500)
+ retrieval_text = gr.Textbox(
+ placeholder="What should be highlighted?",
+ visible=True
+ )
+ submit =gr.Button("Submit")
+ with gr.Blocks():
+ with gr.Column():
+ gr.Markdown("### Results")
+ with gr.Row():
+ output_video = gr.PlayableVideo().style(height=500)
+
+
+
+
+ submit.click(
+ fn=submit_video,
+ inputs=[input_video, retrieval_text],
+ outputs=[output_video]
+ )
+
+demo.launch()
\ No newline at end of file
diff --git a/data/LICENSE b/data/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..bfef380bf7d9cb74ec9ba533b37c3fbeef3bdc09
--- /dev/null
+++ b/data/LICENSE
@@ -0,0 +1,437 @@
+Attribution-NonCommercial-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International
+Public License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-NonCommercial-ShareAlike 4.0 International Public License
+("Public License"). To the extent this Public License may be
+interpreted as a contract, You are granted the Licensed Rights in
+consideration of Your acceptance of these terms and conditions, and the
+Licensor grants You such rights in consideration of benefits the
+Licensor receives from making the Licensed Material available under
+these terms and conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-NC-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution, NonCommercial, and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. NonCommercial means not primarily intended for or directed towards
+ commercial advantage or monetary compensation. For purposes of
+ this Public License, the exchange of the Licensed Material for
+ other material subject to Copyright and Similar Rights by digital
+ file-sharing or similar means is NonCommercial provided there is
+ no payment of monetary compensation in connection with the
+ exchange.
+
+ l. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ m. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ n. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part, for NonCommercial purposes only; and
+
+ b. produce, reproduce, and Share Adapted Material for
+ NonCommercial purposes only.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties, including when
+ the Licensed Material is used other than for NonCommercial
+ purposes.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-NC-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database for NonCommercial purposes
+ only;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+ including for purposes of Section 3(b); and
+
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+=======================================================================
+
+Creative Commons is not a party to its public
+licenses. Notwithstanding, Creative Commons may elect to apply one of
+its public licenses to material it publishes and in those instances
+will be considered the “Licensor.” The text of the Creative Commons
+public licenses is dedicated to the public domain under the CC0 Public
+Domain Dedication. Except for the limited purpose of indicating that
+material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the
+public licenses.
+
+Creative Commons may be contacted at creativecommons.org.
\ No newline at end of file
diff --git a/data/README.md b/data/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6660f0ce3cc65be6f85b43996be6dedb7331bdc
--- /dev/null
+++ b/data/README.md
@@ -0,0 +1,26 @@
+## QVHighlights Dataset
+
+All raw video data can be downloaded from this [link](https://nlp.cs.unc.edu/data/jielei/qvh/qvhilights_videos.tar.gz).
+
+Our annotation files include 3 splits: `train`, `val` and `test`. Each file is in [JSON Line](https://jsonlines.org/) format, each row of the files can be loaded as a single `dict` in Python. Below is an example of the annotation:
+
+```
+{
+ "qid": 8737,
+ "query": "A family is playing basketball together on a green court outside.",
+ "duration": 126,
+ "vid": "bP5KfdFJzC4_660.0_810.0",
+ "relevant_windows": [[0, 16]],
+ "relevant_clip_ids": [0, 1, 2, 3, 4, 5, 6, 7],
+ "saliency_scores": [[4, 1, 1], [4, 1, 1], [4, 2, 1], [4, 3, 2], [4, 3, 2], [4, 3, 3], [4, 3, 3], [4, 3, 2]]
+}
+```
+`qid` is a unique identifier of a `query`. This query corresponds to a video identified by its video id `vid`. The `vid` is formatted as `{youtube_id}_{start_time}_{end_time}`. Use this information, one can retrieve the YouTube video from a url `https://www.youtube.com/embed/{youtube_id}?start={start_time}&end={end_time}&version=3`. For example, the video in this example is `https://www.youtube.com/embed/bP5KfdFJzC4?start=660&end=810&version=3`.
+`duration` is an integer indicating the duration of this video.
+`relevant_windows` is the list of windows that localize the moments, each window has two numbers, one indicates the start time of the moment, another one indicates the end time. `relevant_clip_ids` is the list of ids to the segmented 2-second clips that fall into the moments specified by `relevant_windows`, starting from 0.
+`saliency_scores` contains the saliency scores annotations, each sublist corresponds to a clip in `relevant_clip_ids`. There are 3 elements in each sublist, they are the scores from three different annotators. A score of `4` means `Very Good`, while `0` means `Very Bad`.
+
+Note that the three fields `relevant_clip_ids`, `relevant_windows` and `saliency_scores` for `test` split is not included. Please refer to [../standalone_eval/README.md](../standalone_eval/README.md) for details on evaluating predictions on `test`.
+
+In addition to the annotation files, we also provided the subtitle file for our weakly supervised ASR pre-training: [subs_train.jsonl](./subs_train.jsonl). This file is formatted similarly as our annotation files, but without the `saliency_scores` entry. This file is not needed if you do not plan to pretrain models using it.
+
diff --git a/data/highlight_test_release.jsonl b/data/highlight_test_release.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..99be557e5355ea7d582aa740807ea8d8f4326752
--- /dev/null
+++ b/data/highlight_test_release.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0462f0cb582b54913671ba5ebfc325931ffa6d422c60cb72520749fc6d5d05a3
+size 204566
diff --git a/data/highlight_train_release.jsonl b/data/highlight_train_release.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..d2c9ce90bd2d92b7b8d00966b10aba7e6a8eca5a
--- /dev/null
+++ b/data/highlight_train_release.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fd28404187468f57cf99242be2963127dc9b4aef26c7de3cb9469569801f2625
+size 3956580
diff --git a/data/highlight_val_release.jsonl b/data/highlight_val_release.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b7097e6b1c5ef4bd8abcc9a668513c73aeb939ab
--- /dev/null
+++ b/data/highlight_val_release.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f668a1eaea156ec5315e14718999cea043a8cf948d3cafbd8e8d655318c3cd02
+size 821101
diff --git a/data/subs_train.jsonl b/data/subs_train.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..a4672bc9d35550f37690eab9a20ff7b0ae8b28fd
--- /dev/null
+++ b/data/subs_train.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:33086f20181724a477d7da5b2a063e7935d04d886548917b8ba9912f5a0c7dc5
+size 46786687
diff --git a/moment_detr/__init__.py b/moment_detr/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/moment_detr/config.py b/moment_detr/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb12952b5a9533553f18a79582fefd011274efe1
--- /dev/null
+++ b/moment_detr/config.py
@@ -0,0 +1,226 @@
+import os
+import time
+import torch
+import argparse
+
+from utils.basic_utils import mkdirp, load_json, save_json, make_zipfile, dict_to_markdown
+
+
+class BaseOptions(object):
+ saved_option_filename = "opt.json"
+ ckpt_filename = "model.ckpt"
+ tensorboard_log_dir = "tensorboard_log"
+ train_log_filename = "train.log.txt"
+ eval_log_filename = "eval.log.txt"
+
+ def __init__(self):
+ self.parser = None
+ self.initialized = False
+ self.opt = None
+
+ def initialize(self):
+ self.initialized = True
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--dset_name", type=str, choices=["hl"])
+ parser.add_argument("--eval_split_name", type=str, default="val",
+ help="should match keys in video_duration_idx_path, must set for VCMR")
+ parser.add_argument("--debug", action="store_true",
+ help="debug (fast) mode, break all loops, do not load all data into memory.")
+ parser.add_argument("--data_ratio", type=float, default=1.0,
+ help="how many training and eval data to use. 1.0: use all, 0.1: use 10%."
+ "Use small portion for debug purposes. Note this is different from --debug, "
+ "which works by breaking the loops, typically they are not used together.")
+ parser.add_argument("--results_root", type=str, default="results")
+ parser.add_argument("--exp_id", type=str, default=None, help="id of this run, required at training")
+ parser.add_argument("--seed", type=int, default=2018, help="random seed")
+ parser.add_argument("--device", type=int, default=0, help="0 cuda, -1 cpu")
+ parser.add_argument("--num_workers", type=int, default=4,
+ help="num subprocesses used to load the data, 0: use main process")
+ parser.add_argument("--no_pin_memory", action="store_true",
+ help="Don't use pin_memory=True for dataloader. "
+ "ref: https://discuss.pytorch.org/t/should-we-set-non-blocking-to-true/38234/4")
+
+ # training config
+ parser.add_argument("--lr", type=float, default=1e-4, help="learning rate")
+ parser.add_argument("--lr_drop", type=int, default=400, help="drop learning rate to 1/10 every lr_drop epochs")
+ parser.add_argument("--wd", type=float, default=1e-4, help="weight decay")
+ parser.add_argument("--n_epoch", type=int, default=200, help="number of epochs to run")
+ parser.add_argument("--max_es_cnt", type=int, default=200,
+ help="number of epochs to early stop, use -1 to disable early stop")
+ parser.add_argument("--bsz", type=int, default=32, help="mini-batch size")
+ parser.add_argument("--eval_bsz", type=int, default=100,
+ help="mini-batch size at inference, for query")
+ parser.add_argument("--grad_clip", type=float, default=0.1, help="perform gradient clip, -1: disable")
+ parser.add_argument("--eval_untrained", action="store_true", help="Evaluate on un-trained model")
+ parser.add_argument("--resume", type=str, default=None,
+ help="checkpoint path to resume or evaluate, without --resume_all this only load weights")
+ parser.add_argument("--resume_all", action="store_true",
+ help="if --resume_all, load optimizer/scheduler/epoch as well")
+ parser.add_argument("--start_epoch", type=int, default=None,
+ help="if None, will be set automatically when using --resume_all")
+
+ # Data config
+ parser.add_argument("--max_q_l", type=int, default=32)
+ parser.add_argument("--max_v_l", type=int, default=75)
+ parser.add_argument("--clip_length", type=int, default=2)
+ parser.add_argument("--max_windows", type=int, default=5)
+
+ parser.add_argument("--train_path", type=str, default=None)
+ parser.add_argument("--eval_path", type=str, default=None,
+ help="Evaluating during training, for Dev set. If None, will only do training, ")
+ parser.add_argument("--no_norm_vfeat", action="store_true", help="Do not do normalize video feat")
+ parser.add_argument("--no_norm_tfeat", action="store_true", help="Do not do normalize text feat")
+ parser.add_argument("--v_feat_dirs", type=str, nargs="+",
+ help="video feature dirs. If more than one, will concat their features. "
+ "Note that sub ctx features are also accepted here.")
+ parser.add_argument("--t_feat_dir", type=str, help="text/query feature dir")
+ parser.add_argument("--v_feat_dim", type=int, help="video feature dim")
+ parser.add_argument("--t_feat_dim", type=int, help="text/query feature dim")
+ parser.add_argument("--ctx_mode", type=str, default="video_tef")
+
+ # Model config
+ parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
+ help="Type of positional embedding to use on top of the image features")
+ # * Transformer
+ parser.add_argument('--enc_layers', default=2, type=int,
+ help="Number of encoding layers in the transformer")
+ parser.add_argument('--dec_layers', default=2, type=int,
+ help="Number of decoding layers in the transformer")
+ parser.add_argument('--dim_feedforward', default=1024, type=int,
+ help="Intermediate size of the feedforward layers in the transformer blocks")
+ parser.add_argument('--hidden_dim', default=256, type=int,
+ help="Size of the embeddings (dimension of the transformer)")
+ parser.add_argument('--input_dropout', default=0.5, type=float,
+ help="Dropout applied in input")
+ parser.add_argument('--dropout', default=0.1, type=float,
+ help="Dropout applied in the transformer")
+ parser.add_argument("--txt_drop_ratio", default=0, type=float,
+ help="drop txt_drop_ratio tokens from text input. 0.1=10%")
+ parser.add_argument("--use_txt_pos", action="store_true", help="use position_embedding for text as well.")
+ parser.add_argument('--nheads', default=8, type=int,
+ help="Number of attention heads inside the transformer's attentions")
+ parser.add_argument('--num_queries', default=10, type=int,
+ help="Number of query slots")
+ parser.add_argument('--pre_norm', action='store_true')
+ # other model configs
+ parser.add_argument("--n_input_proj", type=int, default=2, help="#layers to encoder input")
+ parser.add_argument("--contrastive_hdim", type=int, default=64, help="dim for contrastive embeddings")
+ parser.add_argument("--temperature", type=float, default=0.07, help="temperature nce contrastive_align_loss")
+ # Loss
+ parser.add_argument("--lw_saliency", type=float, default=1.,
+ help="weight for saliency loss, set to 0 will ignore")
+ parser.add_argument("--saliency_margin", type=float, default=0.2)
+ parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
+ help="Disables auxiliary decoding losses (loss at each layer)")
+ parser.add_argument("--span_loss_type", default="l1", type=str, choices=['l1', 'ce'],
+ help="l1: (center-x, width) regression. ce: (st_idx, ed_idx) classification.")
+ parser.add_argument("--contrastive_align_loss", action="store_true",
+ help="Disable contrastive_align_loss between matched query spans and the text.")
+ # * Matcher
+ parser.add_argument('--set_cost_span', default=10, type=float,
+ help="L1 span coefficient in the matching cost")
+ parser.add_argument('--set_cost_giou', default=1, type=float,
+ help="giou span coefficient in the matching cost")
+ parser.add_argument('--set_cost_class', default=4, type=float,
+ help="Class coefficient in the matching cost")
+
+ # * Loss coefficients
+ parser.add_argument('--span_loss_coef', default=10, type=float)
+ parser.add_argument('--giou_loss_coef', default=1, type=float)
+ parser.add_argument('--label_loss_coef', default=4, type=float)
+ parser.add_argument('--eos_coef', default=0.1, type=float,
+ help="Relative classification weight of the no-object class")
+ parser.add_argument("--contrastive_align_loss_coef", default=0.0, type=float)
+
+ parser.add_argument("--no_sort_results", action="store_true",
+ help="do not sort results, use this for moment query visualization")
+ parser.add_argument("--max_before_nms", type=int, default=10)
+ parser.add_argument("--max_after_nms", type=int, default=10)
+ parser.add_argument("--conf_thd", type=float, default=0.0, help="only keep windows with conf >= conf_thd")
+ parser.add_argument("--nms_thd", type=float, default=-1,
+ help="additionally use non-maximum suppression "
+ "(or non-minimum suppression for distance)"
+ "to post-processing the predictions. "
+ "-1: do not use nms. [0, 1]")
+ self.parser = parser
+
+ def display_save(self, opt):
+ args = vars(opt)
+ # Display settings
+ print(dict_to_markdown(vars(opt), max_str_len=120))
+ # Save settings
+ if not isinstance(self, TestOptions):
+ option_file_path = os.path.join(opt.results_dir, self.saved_option_filename) # not yaml file indeed
+ save_json(args, option_file_path, save_pretty=True)
+
+ def parse(self):
+ if not self.initialized:
+ self.initialize()
+ opt = self.parser.parse_args()
+
+ if opt.debug:
+ opt.results_root = os.path.sep.join(opt.results_root.split(os.path.sep)[:-1] + ["debug_results", ])
+ opt.num_workers = 0
+
+ if isinstance(self, TestOptions):
+ # modify model_dir to absolute path
+ # opt.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "results", opt.model_dir)
+ opt.model_dir = os.path.dirname(opt.resume)
+ saved_options = load_json(os.path.join(opt.model_dir, self.saved_option_filename))
+ for arg in saved_options: # use saved options to overwrite all BaseOptions args.
+ if arg not in ["results_root", "num_workers", "nms_thd", "debug", # "max_before_nms", "max_after_nms"
+ "max_pred_l", "min_pred_l",
+ "resume", "resume_all", "no_sort_results"]:
+ setattr(opt, arg, saved_options[arg])
+ # opt.no_core_driver = True
+ if opt.eval_results_dir is not None:
+ opt.results_dir = opt.eval_results_dir
+ else:
+ if opt.exp_id is None:
+ raise ValueError("--exp_id is required for at a training option!")
+
+ ctx_str = opt.ctx_mode + "_sub" if any(["sub_ctx" in p for p in opt.v_feat_dirs]) else opt.ctx_mode
+ opt.results_dir = os.path.join(opt.results_root,
+ "-".join([opt.dset_name, ctx_str, opt.exp_id,
+ time.strftime("%Y_%m_%d_%H_%M_%S")]))
+ mkdirp(opt.results_dir)
+ # save a copy of current code
+ code_dir = os.path.dirname(os.path.realpath(__file__))
+ code_zip_filename = os.path.join(opt.results_dir, "code.zip")
+ make_zipfile(code_dir, code_zip_filename,
+ enclosing_dir="code",
+ exclude_dirs_substring="results",
+ exclude_dirs=["results", "debug_results", "__pycache__"],
+ exclude_extensions=[".pyc", ".ipynb", ".swap"], )
+
+ self.display_save(opt)
+
+ opt.ckpt_filepath = os.path.join(opt.results_dir, self.ckpt_filename)
+ opt.train_log_filepath = os.path.join(opt.results_dir, self.train_log_filename)
+ opt.eval_log_filepath = os.path.join(opt.results_dir, self.eval_log_filename)
+ opt.tensorboard_log_dir = os.path.join(opt.results_dir, self.tensorboard_log_dir)
+ opt.device = torch.device("cuda" if opt.device >= 0 else "cpu")
+ opt.pin_memory = not opt.no_pin_memory
+
+ opt.use_tef = "tef" in opt.ctx_mode
+ opt.use_video = "video" in opt.ctx_mode
+ if not opt.use_video:
+ opt.v_feat_dim = 0
+ if opt.use_tef:
+ opt.v_feat_dim += 2
+
+ self.opt = opt
+ return opt
+
+
+class TestOptions(BaseOptions):
+ """add additional options for evaluating"""
+
+ def initialize(self):
+ BaseOptions.initialize(self)
+ # also need to specify --eval_split_name
+ self.parser.add_argument("--eval_id", type=str, help="evaluation id")
+ self.parser.add_argument("--eval_results_dir", type=str, default=None,
+ help="dir to save results, if not set, fall back to training results_dir")
+ self.parser.add_argument("--model_dir", type=str,
+ help="dir contains the model file, will be converted to absolute path afterwards")
diff --git a/moment_detr/inference.py b/moment_detr/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..87617a82183e4445fb12b60ea93d8cbfd7d45126
--- /dev/null
+++ b/moment_detr/inference.py
@@ -0,0 +1,259 @@
+import pprint
+from tqdm import tqdm, trange
+import numpy as np
+import os
+from collections import OrderedDict, defaultdict
+from utils.basic_utils import AverageMeter
+
+import torch
+import torch.nn.functional as F
+import torch.backends.cudnn as cudnn
+from torch.utils.data import DataLoader
+
+from moment_detr.config import TestOptions
+from moment_detr.model import build_model
+from moment_detr.span_utils import span_cxw_to_xx
+from moment_detr.start_end_dataset import StartEndDataset, start_end_collate, prepare_batch_inputs
+from moment_detr.postprocessing_moment_detr import PostProcessorDETR
+from standalone_eval.eval import eval_submission
+from utils.basic_utils import save_jsonl, save_json
+from utils.temporal_nms import temporal_nms
+
+import logging
+
+logger = logging.getLogger(__name__)
+logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ level=logging.INFO)
+
+
+def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms):
+ mr_res_after_nms = []
+ for e in mr_res:
+ e["pred_relevant_windows"] = temporal_nms(
+ e["pred_relevant_windows"][:max_before_nms],
+ nms_thd=nms_thd,
+ max_after_nms=max_after_nms
+ )
+ mr_res_after_nms.append(e)
+ return mr_res_after_nms
+
+
+def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename):
+ # IOU_THDS = (0.5, 0.7)
+ logger.info("Saving/Evaluating before nms results")
+ submission_path = os.path.join(opt.results_dir, save_submission_filename)
+ save_jsonl(submission, submission_path)
+
+ if opt.eval_split_name in ["val", "test"]: # since test_public has no GT
+ metrics = eval_submission(
+ submission, gt_data,
+ verbose=opt.debug, match_number=not opt.debug
+ )
+ save_metrics_path = submission_path.replace(".jsonl", "_metrics.json")
+ save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False)
+ latest_file_paths = [submission_path, save_metrics_path]
+ else:
+ metrics = None
+ latest_file_paths = [submission_path, ]
+
+ if opt.nms_thd != -1:
+ logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd))
+ submission_after_nms = post_processing_mr_nms(
+ submission, nms_thd=opt.nms_thd,
+ max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms
+ )
+
+ logger.info("Saving/Evaluating nms results")
+ submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd))
+ save_jsonl(submission_after_nms, submission_nms_path)
+ if opt.eval_split_name == "val":
+ metrics_nms = eval_submission(
+ submission_after_nms, gt_data,
+ verbose=opt.debug, match_number=not opt.debug
+ )
+ save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json")
+ save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False)
+ latest_file_paths += [submission_nms_path, save_metrics_nms_path]
+ else:
+ metrics_nms = None
+ latest_file_paths = [submission_nms_path, ]
+ else:
+ metrics_nms = None
+ return metrics, metrics_nms, latest_file_paths
+
+
+@torch.no_grad()
+def compute_mr_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None):
+ model.eval()
+ if criterion:
+ assert eval_loader.dataset.load_labels
+ criterion.eval()
+
+ loss_meters = defaultdict(AverageMeter)
+ write_tb = tb_writer is not None and epoch_i is not None
+
+ mr_res = []
+ for batch in tqdm(eval_loader, desc="compute st ed scores"):
+ query_meta = batch[0]
+ model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory)
+ outputs = model(**model_inputs)
+ prob = F.softmax(outputs["pred_logits"], -1) # (batch_size, #queries, #classes=2)
+ if opt.span_loss_type == "l1":
+ scores = prob[..., 0] # * (batch_size, #queries) foreground label is 0, we directly take it
+ pred_spans = outputs["pred_spans"] # (bsz, #queries, 2)
+ _saliency_scores = outputs["saliency_scores"].half() # (bsz, L)
+ saliency_scores = []
+ valid_vid_lengths = model_inputs["src_vid_mask"].sum(1).cpu().tolist()
+ for j in range(len(valid_vid_lengths)):
+ saliency_scores.append(_saliency_scores[j, :int(valid_vid_lengths[j])].tolist())
+ else:
+ bsz, n_queries = outputs["pred_spans"].shape[:2] # # (bsz, #queries, max_v_l *2)
+ pred_spans_logits = outputs["pred_spans"].view(bsz, n_queries, 2, opt.max_v_l)
+ # TODO use more advanced decoding method with st_ed product
+ pred_span_scores, pred_spans = F.softmax(pred_spans_logits, dim=-1).max(-1) # 2 * (bsz, #queries, 2)
+ scores = torch.prod(pred_span_scores, 2) # (bsz, #queries)
+ pred_spans[:, 1] += 1
+ pred_spans *= opt.clip_length
+
+ # compose predictions
+ for idx, (meta, spans, score) in enumerate(zip(query_meta, pred_spans.cpu(), scores.cpu())):
+ if opt.span_loss_type == "l1":
+ spans = span_cxw_to_xx(spans) * meta["duration"]
+ # # (#queries, 3), [st(float), ed(float), score(float)]
+ cur_ranked_preds = torch.cat([spans, score[:, None]], dim=1).tolist()
+ if not opt.no_sort_results:
+ cur_ranked_preds = sorted(cur_ranked_preds, key=lambda x: x[2], reverse=True)
+ cur_ranked_preds = [[float(f"{e:.4f}") for e in row] for row in cur_ranked_preds]
+ cur_query_pred = dict(
+ qid=meta["qid"],
+ query=meta["query"],
+ vid=meta["vid"],
+ pred_relevant_windows=cur_ranked_preds,
+ pred_saliency_scores=saliency_scores[idx]
+ )
+ mr_res.append(cur_query_pred)
+
+ if criterion:
+ loss_dict = criterion(outputs, targets)
+ weight_dict = criterion.weight_dict
+ losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
+ loss_dict["loss_overall"] = float(losses) # for logging only
+ for k, v in loss_dict.items():
+ loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v))
+
+ if opt.debug:
+ break
+
+ if write_tb and criterion:
+ for k, v in loss_meters.items():
+ tb_writer.add_scalar("Eval/{}".format(k), v.avg, epoch_i + 1)
+
+ post_processor = PostProcessorDETR(
+ clip_length=2, min_ts_val=0, max_ts_val=150,
+ min_w_l=2, max_w_l=150, move_window_method="left",
+ process_func_names=("clip_ts", "round_multiple")
+ )
+ mr_res = post_processor(mr_res)
+ return mr_res, loss_meters
+
+
+def get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer):
+ """compute and save query and video proposal embeddings"""
+ eval_res, eval_loss_meters = compute_mr_results(model, eval_loader, opt, epoch_i, criterion, tb_writer) # list(dict)
+ return eval_res, eval_loss_meters
+
+
+def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None):
+ logger.info("Generate submissions")
+ model.eval()
+ if criterion is not None and eval_dataset.load_labels:
+ criterion.eval()
+ else:
+ criterion = None
+
+ eval_loader = DataLoader(
+ eval_dataset,
+ collate_fn=start_end_collate,
+ batch_size=opt.eval_bsz,
+ num_workers=opt.num_workers,
+ shuffle=False,
+ pin_memory=opt.pin_memory
+ )
+
+ submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer)
+ if opt.no_sort_results:
+ save_submission_filename = save_submission_filename.replace(".jsonl", "_unsorted.jsonl")
+ metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing(
+ submission, opt, eval_dataset.data, save_submission_filename)
+ return metrics, metrics_nms, eval_loss_meters, latest_file_paths
+
+
+def setup_model(opt):
+ """setup model/optimizer/scheduler and load checkpoints when needed"""
+ logger.info("setup model/optimizer/scheduler")
+ model, criterion = build_model(opt)
+ if opt.device.type == "cuda":
+ logger.info("CUDA enabled.")
+ model.to(opt.device)
+ criterion.to(opt.device)
+
+ param_dicts = [{"params": [p for n, p in model.named_parameters() if p.requires_grad]}]
+ optimizer = torch.optim.AdamW(param_dicts, lr=opt.lr, weight_decay=opt.wd)
+ lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_drop)
+
+ if opt.resume is not None:
+ logger.info(f"Load checkpoint from {opt.resume}")
+ checkpoint = torch.load(opt.resume, map_location="cpu")
+ model.load_state_dict(checkpoint["model"])
+ if opt.resume_all:
+ optimizer.load_state_dict(checkpoint['optimizer'])
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
+ opt.start_epoch = checkpoint['epoch'] + 1
+ logger.info(f"Loaded model saved at epoch {checkpoint['epoch']} from checkpoint: {opt.resume}")
+ else:
+ logger.warning("If you intend to evaluate the model, please specify --resume with ckpt path")
+
+ return model, criterion, optimizer, lr_scheduler
+
+
+def start_inference():
+ logger.info("Setup config, data and model...")
+ opt = TestOptions().parse()
+ cudnn.benchmark = True
+ cudnn.deterministic = False
+
+ assert opt.eval_path is not None
+ eval_dataset = StartEndDataset(
+ dset_name=opt.dset_name,
+ data_path=opt.eval_path,
+ v_feat_dirs=opt.v_feat_dirs,
+ q_feat_dir=opt.t_feat_dir,
+ q_feat_type="last_hidden_state",
+ max_q_l=opt.max_q_l,
+ max_v_l=opt.max_v_l,
+ ctx_mode=opt.ctx_mode,
+ data_ratio=opt.data_ratio,
+ normalize_v=not opt.no_norm_vfeat,
+ normalize_t=not opt.no_norm_tfeat,
+ clip_len=opt.clip_length,
+ max_windows=opt.max_windows,
+ load_labels=True, # opt.eval_split_name == "val",
+ span_loss_type=opt.span_loss_type,
+ txt_drop_ratio=0
+ )
+
+ model, criterion, _, _ = setup_model(opt)
+ save_submission_filename = "inference_{}_{}_{}_preds.jsonl".format(
+ opt.dset_name, opt.eval_split_name, opt.eval_id)
+ logger.info("Starting inference...")
+ with torch.no_grad():
+ metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \
+ eval_epoch(model, eval_dataset, opt, save_submission_filename, criterion=criterion)
+ logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4)))
+ if metrics_nms is not None:
+ logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4)))
+
+
+if __name__ == '__main__':
+ start_inference()
diff --git a/moment_detr/matcher.py b/moment_detr/matcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d1eeebf94c25201fe366f8e924c3fbca8db5c87
--- /dev/null
+++ b/moment_detr/matcher.py
@@ -0,0 +1,107 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+Modules to compute the matching cost and solve the corresponding LSAP.
+"""
+import torch
+from scipy.optimize import linear_sum_assignment
+from torch import nn
+import torch.nn.functional as F
+from moment_detr.span_utils import generalized_temporal_iou, span_cxw_to_xx
+
+
+class HungarianMatcher(nn.Module):
+ """This class computes an assignment between the targets and the predictions of the network
+
+ For efficiency reasons, the targets don't include the no_object. Because of this, in general,
+ there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
+ while the others are un-matched (and thus treated as non-objects).
+ """
+ def __init__(self, cost_class: float = 1, cost_span: float = 1, cost_giou: float = 1,
+ span_loss_type: str = "l1", max_v_l: int = 75):
+ """Creates the matcher
+
+ Params:
+ cost_span: This is the relative weight of the L1 error of the span coordinates in the matching cost
+ cost_giou: This is the relative weight of the giou loss of the spans in the matching cost
+ """
+ super().__init__()
+ self.cost_class = cost_class
+ self.cost_span = cost_span
+ self.cost_giou = cost_giou
+ self.span_loss_type = span_loss_type
+ self.max_v_l = max_v_l
+ self.foreground_label = 0
+ assert cost_class != 0 or cost_span != 0 or cost_giou != 0, "all costs cant be 0"
+
+ @torch.no_grad()
+ def forward(self, outputs, targets):
+ """ Performs the matching
+
+ Params:
+ outputs: This is a dict that contains at least these entries:
+ "pred_spans": Tensor of dim [batch_size, num_queries, 2] with the predicted span coordinates,
+ in normalized (cx, w) format
+ ""pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
+
+ targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
+ "spans": Tensor of dim [num_target_spans, 2] containing the target span coordinates. The spans are
+ in normalized (cx, w) format
+
+ Returns:
+ A list of size batch_size, containing tuples of (index_i, index_j) where:
+ - index_i is the indices of the selected predictions (in order)
+ - index_j is the indices of the corresponding selected targets (in order)
+ For each batch element, it holds:
+ len(index_i) = len(index_j) = min(num_queries, num_target_spans)
+ """
+ bs, num_queries = outputs["pred_spans"].shape[:2]
+ targets = targets["span_labels"]
+
+ # Also concat the target labels and spans
+ out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
+ tgt_spans = torch.cat([v["spans"] for v in targets]) # [num_target_spans in batch, 2]
+ tgt_ids = torch.full([len(tgt_spans)], self.foreground_label) # [total #spans in the batch]
+
+ # Compute the classification cost. Contrary to the loss, we don't use the NLL,
+ # but approximate it in 1 - prob[target class].
+ # The 1 is a constant that doesn't change the matching, it can be omitted.
+ cost_class = -out_prob[:, tgt_ids] # [batch_size * num_queries, total #spans in the batch]
+
+ if self.span_loss_type == "l1":
+ # We flatten to compute the cost matrices in a batch
+ out_spans = outputs["pred_spans"].flatten(0, 1) # [batch_size * num_queries, 2]
+
+ # Compute the L1 cost between spans
+ cost_span = torch.cdist(out_spans, tgt_spans, p=1) # [batch_size * num_queries, total #spans in the batch]
+
+ # Compute the giou cost between spans
+ # [batch_size * num_queries, total #spans in the batch]
+ cost_giou = - generalized_temporal_iou(span_cxw_to_xx(out_spans), span_cxw_to_xx(tgt_spans))
+ else:
+ pred_spans = outputs["pred_spans"] # (bsz, #queries, max_v_l * 2)
+ pred_spans = pred_spans.view(bs * num_queries, 2, self.max_v_l).softmax(-1) # (bsz * #queries, 2, max_v_l)
+ cost_span = - pred_spans[:, 0][:, tgt_spans[:, 0]] - \
+ pred_spans[:, 1][:, tgt_spans[:, 1]] # (bsz * #queries, #spans)
+ # pred_spans = pred_spans.repeat(1, n_spans, 1, 1).flatten(0, 1) # (bsz * #queries * #spans, max_v_l, 2)
+ # tgt_spans = tgt_spans.view(1, n_spans, 2).repeat(bs * num_queries, 1, 1).flatten(0, 1) # (bsz * #queries * #spans, 2)
+ # cost_span = pred_spans[tgt_spans]
+ # cost_span = cost_span.view(bs * num_queries, n_spans)
+
+ # giou
+ cost_giou = 0
+
+ # Final cost matrix
+ # import ipdb; ipdb.set_trace()
+ C = self.cost_span * cost_span + self.cost_giou * cost_giou + self.cost_class * cost_class
+ C = C.view(bs, num_queries, -1).cpu()
+
+ sizes = [len(v["spans"]) for v in targets]
+ indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
+ return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
+
+
+def build_matcher(args):
+ return HungarianMatcher(
+ cost_span=args.set_cost_span, cost_giou=args.set_cost_giou,
+ cost_class=args.set_cost_class, span_loss_type=args.span_loss_type, max_v_l=args.max_v_l
+ )
diff --git a/moment_detr/misc.py b/moment_detr/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..09c4b2445ce614ff3ac371307aa2cc3494c3f862
--- /dev/null
+++ b/moment_detr/misc.py
@@ -0,0 +1,21 @@
+import torch
+
+
+@torch.no_grad()
+def accuracy(output, target, topk=(1,)):
+ """Computes the precision@k for the specified values of k
+ output: (#items, #classes)
+ target: int,
+ """
+ maxk = max(topk)
+ num_items = output.size(0)
+
+ _, pred = output.topk(maxk, 1, True, True)
+ pred = pred.t()
+ correct = pred.eq(target)
+
+ res = []
+ for k in topk:
+ correct_k = correct[:k].view(-1).float().sum(0)
+ res.append(correct_k.mul_(100.0 / num_items))
+ return res
diff --git a/moment_detr/model.py b/moment_detr/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..799d31c04732fb7f28e5fff35ef014db10fb8827
--- /dev/null
+++ b/moment_detr/model.py
@@ -0,0 +1,444 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+DETR model and criterion classes.
+"""
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+from moment_detr.span_utils import generalized_temporal_iou, span_cxw_to_xx
+
+from moment_detr.matcher import build_matcher
+from moment_detr.transformer import build_transformer
+from moment_detr.position_encoding import build_position_encoding
+from moment_detr.misc import accuracy
+
+
+class MomentDETR(nn.Module):
+ """ This is the Moment-DETR module that performs moment localization. """
+
+ def __init__(self, transformer, position_embed, txt_position_embed, txt_dim, vid_dim,
+ num_queries, input_dropout, aux_loss=False,
+ contrastive_align_loss=False, contrastive_hdim=64,
+ max_v_l=75, span_loss_type="l1", use_txt_pos=False, n_input_proj=2):
+ """ Initializes the model.
+ Parameters:
+ transformer: torch module of the transformer architecture. See transformer.py
+ position_embed: torch module of the position_embedding, See position_encoding.py
+ txt_position_embed: position_embedding for text
+ txt_dim: int, text query input dimension
+ vid_dim: int, video feature input dimension
+ num_queries: number of object queries, ie detection slot. This is the maximal number of objects
+ Moment-DETR can detect in a single video.
+ aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
+ contrastive_align_loss: If true, perform span - tokens contrastive learning
+ contrastive_hdim: dimension used for projecting the embeddings before computing contrastive loss
+ max_v_l: int, maximum #clips in videos
+ span_loss_type: str, one of [l1, ce]
+ l1: (center-x, width) regression.
+ ce: (st_idx, ed_idx) classification.
+ # foreground_thd: float, intersection over prediction >= foreground_thd: labeled as foreground
+ # background_thd: float, intersection over prediction <= background_thd: labeled background
+ """
+ super().__init__()
+ self.num_queries = num_queries
+ self.transformer = transformer
+ self.position_embed = position_embed
+ self.txt_position_embed = txt_position_embed
+ hidden_dim = transformer.d_model
+ self.span_loss_type = span_loss_type
+ self.max_v_l = max_v_l
+ span_pred_dim = 2 if span_loss_type == "l1" else max_v_l * 2
+ self.span_embed = MLP(hidden_dim, hidden_dim, span_pred_dim, 3)
+ self.class_embed = nn.Linear(hidden_dim, 2) # 0: background, 1: foreground
+ self.use_txt_pos = use_txt_pos
+ self.n_input_proj = n_input_proj
+ # self.foreground_thd = foreground_thd
+ # self.background_thd = background_thd
+ self.query_embed = nn.Embedding(num_queries, hidden_dim)
+ relu_args = [True] * 3
+ relu_args[n_input_proj-1] = False
+ self.input_txt_proj = nn.Sequential(*[
+ LinearLayer(txt_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[0]),
+ LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[1]),
+ LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[2])
+ ][:n_input_proj])
+ self.input_vid_proj = nn.Sequential(*[
+ LinearLayer(vid_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[0]),
+ LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[1]),
+ LinearLayer(hidden_dim, hidden_dim, layer_norm=True, dropout=input_dropout, relu=relu_args[2])
+ ][:n_input_proj])
+ self.contrastive_align_loss = contrastive_align_loss
+ if contrastive_align_loss:
+ self.contrastive_align_projection_query = nn.Linear(hidden_dim, contrastive_hdim)
+ self.contrastive_align_projection_txt = nn.Linear(hidden_dim, contrastive_hdim)
+ self.contrastive_align_projection_vid = nn.Linear(hidden_dim, contrastive_hdim)
+
+ self.saliency_proj = nn.Linear(hidden_dim, 1)
+ self.aux_loss = aux_loss
+
+ def forward(self, src_txt, src_txt_mask, src_vid, src_vid_mask):
+ """The forward expects two tensors:
+ - src_txt: [batch_size, L_txt, D_txt]
+ - src_txt_mask: [batch_size, L_txt], containing 0 on padded pixels,
+ will convert to 1 as padding later for transformer
+ - src_vid: [batch_size, L_vid, D_vid]
+ - src_vid_mask: [batch_size, L_vid], containing 0 on padded pixels,
+ will convert to 1 as padding later for transformer
+
+ It returns a dict with the following elements:
+ - "pred_spans": The normalized boxes coordinates for all queries, represented as
+ (center_x, width). These values are normalized in [0, 1],
+ relative to the size of each individual image (disregarding possible padding).
+ See PostProcess for information on how to retrieve the unnormalized bounding box.
+ - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
+ dictionnaries containing the two above keys for each decoder layer.
+ """
+ src_vid = self.input_vid_proj(src_vid)
+ src_txt = self.input_txt_proj(src_txt)
+ src = torch.cat([src_vid, src_txt], dim=1) # (bsz, L_vid+L_txt, d)
+ mask = torch.cat([src_vid_mask, src_txt_mask], dim=1).bool() # (bsz, L_vid+L_txt)
+ # TODO should we remove or use different positional embeddings to the src_txt?
+ pos_vid = self.position_embed(src_vid, src_vid_mask) # (bsz, L_vid, d)
+ pos_txt = self.txt_position_embed(src_txt) if self.use_txt_pos else torch.zeros_like(src_txt) # (bsz, L_txt, d)
+ # pos_txt = torch.zeros_like(src_txt)
+ # pad zeros for txt positions
+ pos = torch.cat([pos_vid, pos_txt], dim=1)
+ # (#layers, bsz, #queries, d), (bsz, L_vid+L_txt, d)
+ hs, memory = self.transformer(src, ~mask, self.query_embed.weight, pos)
+ outputs_class = self.class_embed(hs) # (#layers, batch_size, #queries, #classes)
+ outputs_coord = self.span_embed(hs) # (#layers, bsz, #queries, 2 or max_v_l * 2)
+ if self.span_loss_type == "l1":
+ outputs_coord = outputs_coord.sigmoid()
+ out = {'pred_logits': outputs_class[-1], 'pred_spans': outputs_coord[-1]}
+
+ txt_mem = memory[:, src_vid.shape[1]:] # (bsz, L_txt, d)
+ vid_mem = memory[:, :src_vid.shape[1]] # (bsz, L_vid, d)
+ if self.contrastive_align_loss:
+ proj_queries = F.normalize(self.contrastive_align_projection_query(hs), p=2, dim=-1)
+ proj_txt_mem = F.normalize(self.contrastive_align_projection_txt(txt_mem), p=2, dim=-1)
+ proj_vid_mem = F.normalize(self.contrastive_align_projection_vid(vid_mem), p=2, dim=-1)
+ out.update(dict(
+ proj_queries=proj_queries[-1],
+ proj_txt_mem=proj_txt_mem,
+ proj_vid_mem=proj_vid_mem
+ ))
+
+ out["saliency_scores"] = self.saliency_proj(vid_mem).squeeze(-1) # (bsz, L_vid)
+
+ if self.aux_loss:
+ # assert proj_queries and proj_txt_mem
+ out['aux_outputs'] = [
+ {'pred_logits': a, 'pred_spans': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
+ if self.contrastive_align_loss:
+ assert proj_queries is not None
+ for idx, d in enumerate(proj_queries[:-1]):
+ out['aux_outputs'][idx].update(dict(proj_queries=d, proj_txt_mem=proj_txt_mem))
+ return out
+
+ # @torch.jit.unused
+ # def _set_aux_loss(self, outputs_class, outputs_coord):
+ # # this is a workaround to make torchscript happy, as torchscript
+ # # doesn't support dictionary with non-homogeneous values, such
+ # # as a dict having both a Tensor and a list.
+ # return [{'pred_logits': a, 'pred_spans': b}
+ # for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
+
+
+class SetCriterion(nn.Module):
+ """ This class computes the loss for DETR.
+ The process happens in two steps:
+ 1) we compute hungarian assignment between ground truth boxes and the outputs of the model
+ 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
+ """
+
+ def __init__(self, matcher, weight_dict, eos_coef, losses, temperature, span_loss_type, max_v_l,
+ saliency_margin=1):
+ """ Create the criterion.
+ Parameters:
+ matcher: module able to compute a matching between targets and proposals
+ weight_dict: dict containing as key the names of the losses and as values their relative weight.
+ eos_coef: relative classification weight applied to the no-object category
+ losses: list of all the losses to be applied. See get_loss for list of available losses.
+ temperature: float, temperature for NCE loss
+ span_loss_type: str, [l1, ce]
+ max_v_l: int,
+ saliency_margin: float
+ """
+ super().__init__()
+ self.matcher = matcher
+ self.weight_dict = weight_dict
+ self.losses = losses
+ self.temperature = temperature
+ self.span_loss_type = span_loss_type
+ self.max_v_l = max_v_l
+ self.saliency_margin = saliency_margin
+
+ # foreground and background classification
+ self.foreground_label = 0
+ self.background_label = 1
+ self.eos_coef = eos_coef
+ empty_weight = torch.ones(2)
+ empty_weight[-1] = self.eos_coef # lower weight for background (index 1, foreground index 0)
+ self.register_buffer('empty_weight', empty_weight)
+
+ def loss_spans(self, outputs, targets, indices):
+ """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
+ targets dicts must contain the key "spans" containing a tensor of dim [nb_tgt_spans, 2]
+ The target spans are expected in format (center_x, w), normalized by the image size.
+ """
+ assert 'pred_spans' in outputs
+ targets = targets["span_labels"]
+ idx = self._get_src_permutation_idx(indices)
+ src_spans = outputs['pred_spans'][idx] # (#spans, max_v_l * 2)
+ tgt_spans = torch.cat([t['spans'][i] for t, (_, i) in zip(targets, indices)], dim=0) # (#spans, 2)
+ if self.span_loss_type == "l1":
+ loss_span = F.l1_loss(src_spans, tgt_spans, reduction='none')
+ loss_giou = 1 - torch.diag(generalized_temporal_iou(span_cxw_to_xx(src_spans), span_cxw_to_xx(tgt_spans)))
+ else: # ce
+ n_spans = src_spans.shape[0]
+ src_spans = src_spans.view(n_spans, 2, self.max_v_l).transpose(1, 2)
+ loss_span = F.cross_entropy(src_spans, tgt_spans, reduction='none')
+
+ # giou
+ # src_span_indices = src_spans.max(1)[1] # (#spans, 2)
+ # src_span_indices[:, 1] += 1 # ed non-inclusive [st, ed)
+ #
+ # tgt_span_indices = tgt_spans
+ # tgt_span_indices[:, 1] += 1
+ # loss_giou = 1 - torch.diag(generalized_temporal_iou(src_span_indices, tgt_span_indices))
+ loss_giou = loss_span.new_zeros([1])
+
+ losses = {}
+ losses['loss_span'] = loss_span.mean()
+ losses['loss_giou'] = loss_giou.mean()
+ return losses
+
+ def loss_labels(self, outputs, targets, indices, log=True):
+ """Classification loss (NLL)
+ targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
+ """
+ # TODO add foreground and background classifier. use all non-matched as background.
+ assert 'pred_logits' in outputs
+ src_logits = outputs['pred_logits'] # (batch_size, #queries, #classes=2)
+ # idx is a tuple of two 1D tensors (batch_idx, src_idx), of the same length == #objects in batch
+ idx = self._get_src_permutation_idx(indices)
+ target_classes = torch.full(src_logits.shape[:2], self.background_label,
+ dtype=torch.int64, device=src_logits.device) # (batch_size, #queries)
+ target_classes[idx] = self.foreground_label
+
+ loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight, reduction="none")
+ losses = {'loss_label': loss_ce.mean()}
+
+ if log:
+ # TODO this should probably be a separate loss, not hacked in this one here
+ losses['class_error'] = 100 - accuracy(src_logits[idx], self.foreground_label)[0]
+ return losses
+
+ def loss_saliency(self, outputs, targets, indices, log=True):
+ """higher scores for positive clips"""
+ if "saliency_pos_labels" not in targets:
+ return {"loss_saliency": 0}
+ saliency_scores = outputs["saliency_scores"] # (N, L)
+ pos_indices = targets["saliency_pos_labels"] # (N, #pairs)
+ neg_indices = targets["saliency_neg_labels"] # (N, #pairs)
+ num_pairs = pos_indices.shape[1] # typically 2 or 4
+ batch_indices = torch.arange(len(saliency_scores)).to(saliency_scores.device)
+ pos_scores = torch.stack(
+ [saliency_scores[batch_indices, pos_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1)
+ neg_scores = torch.stack(
+ [saliency_scores[batch_indices, neg_indices[:, col_idx]] for col_idx in range(num_pairs)], dim=1)
+ loss_saliency = torch.clamp(self.saliency_margin + neg_scores - pos_scores, min=0).sum() \
+ / (len(pos_scores) * num_pairs) * 2 # * 2 to keep the loss the same scale
+ return {"loss_saliency": loss_saliency}
+
+ def loss_contrastive_align(self, outputs, targets, indices, log=True):
+ """encourage higher scores between matched query span and input text"""
+ normalized_text_embed = outputs["proj_txt_mem"] # (bsz, #tokens, d) text tokens
+ normalized_img_embed = outputs["proj_queries"] # (bsz, #queries, d)
+ logits = torch.einsum(
+ "bmd,bnd->bmn", normalized_img_embed, normalized_text_embed) # (bsz, #queries, #tokens)
+ logits = logits.sum(2) / self.temperature # (bsz, #queries)
+ idx = self._get_src_permutation_idx(indices)
+ positive_map = torch.zeros_like(logits, dtype=torch.bool)
+ positive_map[idx] = True
+ positive_logits = logits.masked_fill(~positive_map, 0)
+
+ pos_term = positive_logits.sum(1) # (bsz, )
+ num_pos = positive_map.sum(1) # (bsz, )
+ neg_term = logits.logsumexp(1) # (bsz, )
+ loss_nce = - pos_term / num_pos + neg_term # (bsz, )
+ losses = {"loss_contrastive_align": loss_nce.mean()}
+ return losses
+
+ def loss_contrastive_align_vid_txt(self, outputs, targets, indices, log=True):
+ """encourage higher scores between matched query span and input text"""
+ # TODO (1) align vid_mem and txt_mem;
+ # TODO (2) change L1 loss as CE loss on 75 labels, similar to soft token prediction in MDETR
+ normalized_text_embed = outputs["proj_txt_mem"] # (bsz, #tokens, d) text tokens
+ normalized_img_embed = outputs["proj_queries"] # (bsz, #queries, d)
+ logits = torch.einsum(
+ "bmd,bnd->bmn", normalized_img_embed, normalized_text_embed) # (bsz, #queries, #tokens)
+ logits = logits.sum(2) / self.temperature # (bsz, #queries)
+ idx = self._get_src_permutation_idx(indices)
+ positive_map = torch.zeros_like(logits, dtype=torch.bool)
+ positive_map[idx] = True
+ positive_logits = logits.masked_fill(~positive_map, 0)
+
+ pos_term = positive_logits.sum(1) # (bsz, )
+ num_pos = positive_map.sum(1) # (bsz, )
+ neg_term = logits.logsumexp(1) # (bsz, )
+ loss_nce = - pos_term / num_pos + neg_term # (bsz, )
+ losses = {"loss_contrastive_align": loss_nce.mean()}
+ return losses
+
+ def _get_src_permutation_idx(self, indices):
+ # permute predictions following indices
+ batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
+ src_idx = torch.cat([src for (src, _) in indices])
+ return batch_idx, src_idx # two 1D tensors of the same length
+
+ def _get_tgt_permutation_idx(self, indices):
+ # permute targets following indices
+ batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
+ tgt_idx = torch.cat([tgt for (_, tgt) in indices])
+ return batch_idx, tgt_idx
+
+ def get_loss(self, loss, outputs, targets, indices, **kwargs):
+ loss_map = {
+ "spans": self.loss_spans,
+ "labels": self.loss_labels,
+ "contrastive_align": self.loss_contrastive_align,
+ "saliency": self.loss_saliency,
+ }
+ assert loss in loss_map, f'do you really want to compute {loss} loss?'
+ return loss_map[loss](outputs, targets, indices, **kwargs)
+
+ def forward(self, outputs, targets):
+ """ This performs the loss computation.
+ Parameters:
+ outputs: dict of tensors, see the output specification of the model for the format
+ targets: list of dicts, such that len(targets) == batch_size.
+ The expected keys in each dict depends on the losses applied, see each loss' doc
+ """
+ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
+
+ # Retrieve the matching between the outputs of the last layer and the targets
+ # list(tuples), each tuple is (pred_span_indices, tgt_span_indices)
+ indices = self.matcher(outputs_without_aux, targets)
+
+ # Compute all the requested losses
+ losses = {}
+ for loss in self.losses:
+ losses.update(self.get_loss(loss, outputs, targets, indices))
+
+ # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
+ if 'aux_outputs' in outputs:
+ for i, aux_outputs in enumerate(outputs['aux_outputs']):
+ indices = self.matcher(aux_outputs, targets)
+ for loss in self.losses:
+ if "saliency" == loss: # skip as it is only in the top layer
+ continue
+ kwargs = {}
+ l_dict = self.get_loss(loss, aux_outputs, targets, indices, **kwargs)
+ l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
+ losses.update(l_dict)
+
+ return losses
+
+
+class MLP(nn.Module):
+ """ Very simple multi-layer perceptron (also called FFN)"""
+
+ def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
+ super().__init__()
+ self.num_layers = num_layers
+ h = [hidden_dim] * (num_layers - 1)
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
+
+ def forward(self, x):
+ for i, layer in enumerate(self.layers):
+ x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
+ return x
+
+
+class LinearLayer(nn.Module):
+ """linear layer configurable with layer normalization, dropout, ReLU."""
+
+ def __init__(self, in_hsz, out_hsz, layer_norm=True, dropout=0.1, relu=True):
+ super(LinearLayer, self).__init__()
+ self.relu = relu
+ self.layer_norm = layer_norm
+ if layer_norm:
+ self.LayerNorm = nn.LayerNorm(in_hsz)
+ layers = [
+ nn.Dropout(dropout),
+ nn.Linear(in_hsz, out_hsz)
+ ]
+ self.net = nn.Sequential(*layers)
+
+ def forward(self, x):
+ """(N, L, D)"""
+ if self.layer_norm:
+ x = self.LayerNorm(x)
+ x = self.net(x)
+ if self.relu:
+ x = F.relu(x, inplace=True)
+ return x # (N, L, D)
+
+
+def build_model(args):
+ # the `num_classes` naming here is somewhat misleading.
+ # it indeed corresponds to `max_obj_id + 1`, where max_obj_id
+ # is the maximum id for a class in your dataset. For example,
+ # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.
+ # As another example, for a dataset that has a single class with id 1,
+ # you should pass `num_classes` to be 2 (max_obj_id + 1).
+ # For more details on this, check the following discussion
+ # https://github.com/facebookresearch/moment_detr/issues/108#issuecomment-650269223
+ device = torch.device(args.device)
+
+ transformer = build_transformer(args)
+ position_embedding, txt_position_embedding = build_position_encoding(args)
+
+ model = MomentDETR(
+ transformer,
+ position_embedding,
+ txt_position_embedding,
+ txt_dim=args.t_feat_dim,
+ vid_dim=args.v_feat_dim,
+ num_queries=args.num_queries,
+ input_dropout=args.input_dropout,
+ aux_loss=args.aux_loss,
+ contrastive_align_loss=args.contrastive_align_loss,
+ contrastive_hdim=args.contrastive_hdim,
+ span_loss_type=args.span_loss_type,
+ use_txt_pos=args.use_txt_pos,
+ n_input_proj=args.n_input_proj,
+ )
+
+ matcher = build_matcher(args)
+ weight_dict = {"loss_span": args.span_loss_coef,
+ "loss_giou": args.giou_loss_coef,
+ "loss_label": args.label_loss_coef,
+ "loss_saliency": args.lw_saliency}
+ if args.contrastive_align_loss:
+ weight_dict["loss_contrastive_align"] = args.contrastive_align_loss_coef
+ # TODO this is a hack
+ if args.aux_loss:
+ aux_weight_dict = {}
+ for i in range(args.dec_layers - 1):
+ aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items() if k != "loss_saliency"})
+ weight_dict.update(aux_weight_dict)
+
+ losses = ['spans', 'labels', 'saliency']
+ if args.contrastive_align_loss:
+ losses += ["contrastive_align"]
+ criterion = SetCriterion(
+ matcher=matcher, weight_dict=weight_dict, losses=losses,
+ eos_coef=args.eos_coef, temperature=args.temperature,
+ span_loss_type=args.span_loss_type, max_v_l=args.max_v_l,
+ saliency_margin=args.saliency_margin
+ )
+ criterion.to(device)
+ return model, criterion
diff --git a/moment_detr/position_encoding.py b/moment_detr/position_encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8bd6ad6b221e67b0b1ffa1c3841474f3c10b897
--- /dev/null
+++ b/moment_detr/position_encoding.py
@@ -0,0 +1,115 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+Various positional encodings for the transformer.
+"""
+import math
+import torch
+from torch import nn
+
+
+class TrainablePositionalEncoding(nn.Module):
+ """Construct the embeddings from word, position and token_type embeddings.
+ """
+ def __init__(self, max_position_embeddings, hidden_size, dropout=0.1):
+ super(TrainablePositionalEncoding, self).__init__()
+ self.position_embeddings = nn.Embedding(max_position_embeddings, hidden_size)
+ self.LayerNorm = nn.LayerNorm(hidden_size)
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(self, input_feat):
+ """
+ Args:
+ input_feat: (N, L, D)
+ """
+ bsz, seq_length = input_feat.shape[:2]
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=input_feat.device)
+ position_ids = position_ids.unsqueeze(0).repeat(bsz, 1) # (N, L)
+
+ position_embeddings = self.position_embeddings(position_ids)
+
+ embeddings = self.LayerNorm(input_feat + position_embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class PositionEmbeddingSine(nn.Module):
+ """
+ This is a more standard version of the position embedding, very similar to the one
+ used by the Attention is all you need paper, generalized to work on images. (To 1D sequences)
+ """
+ def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
+ super().__init__()
+ self.num_pos_feats = num_pos_feats
+ self.temperature = temperature
+ self.normalize = normalize
+ if scale is not None and normalize is False:
+ raise ValueError("normalize should be True if scale is passed")
+ if scale is None:
+ scale = 2 * math.pi
+ self.scale = scale
+
+ def forward(self, x, mask):
+ """
+ Args:
+ x: torch.tensor, (batch_size, L, d)
+ mask: torch.tensor, (batch_size, L), with 1 as valid
+
+ Returns:
+
+ """
+ assert mask is not None
+ x_embed = mask.cumsum(1, dtype=torch.float32) # (bsz, L)
+ if self.normalize:
+ eps = 1e-6
+ x_embed = x_embed / (x_embed[:, -1:] + eps) * self.scale
+
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
+
+ pos_x = x_embed[:, :, None] / dim_t # (bsz, L, num_pos_feats)
+ pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) # (bsz, L, num_pos_feats*2)
+ # import ipdb; ipdb.set_trace()
+ return pos_x # .permute(0, 2, 1) # (bsz, num_pos_feats*2, L)
+
+
+class PositionEmbeddingLearned(nn.Module):
+ """
+ Absolute pos embedding, learned.
+ """
+ def __init__(self, num_pos_feats=256):
+ super().__init__()
+ self.row_embed = nn.Embedding(50, num_pos_feats)
+ self.col_embed = nn.Embedding(50, num_pos_feats)
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ nn.init.uniform_(self.row_embed.weight)
+ nn.init.uniform_(self.col_embed.weight)
+
+ def forward(self, x, mask):
+ h, w = x.shape[-2:]
+ i = torch.arange(w, device=x.device)
+ j = torch.arange(h, device=x.device)
+ x_emb = self.col_embed(i)
+ y_emb = self.row_embed(j)
+ pos = torch.cat([
+ x_emb.unsqueeze(0).repeat(h, 1, 1),
+ y_emb.unsqueeze(1).repeat(1, w, 1),
+ ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
+ return pos
+
+
+def build_position_encoding(args):
+ N_steps = args.hidden_dim
+ if args.position_embedding in ('v2', 'sine'):
+ # TODO find a better way of exposing other arguments
+ position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
+ # elif args.position_embedding in ('v3', 'learned'):
+ # position_embedding = PositionEmbeddingLearned(N_steps)
+ else:
+ raise ValueError(f"not supported {args.position_embedding}")
+
+ txt_pos_embed = TrainablePositionalEncoding(
+ max_position_embeddings=args.max_q_l,
+ hidden_size=args.hidden_dim, dropout=args.input_dropout)
+ return position_embedding, txt_pos_embed
diff --git a/moment_detr/postprocessing_moment_detr.py b/moment_detr/postprocessing_moment_detr.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6e828cef7d630d1ee09dcd261d09d99387c40c9
--- /dev/null
+++ b/moment_detr/postprocessing_moment_detr.py
@@ -0,0 +1,95 @@
+import pprint
+import numpy as np
+import torch
+from utils.basic_utils import load_jsonl
+from standalone_eval.eval import eval_submission
+from tqdm import tqdm
+
+
+class PostProcessorDETR:
+ def __init__(self, clip_length=2, min_ts_val=0, max_ts_val=150,
+ min_w_l=2, max_w_l=70, move_window_method="center",
+ process_func_names=("clip_window_l", "clip_ts", "round_multiple")):
+ self.clip_length = clip_length
+ self.min_ts_val = min_ts_val
+ self.max_ts_val = max_ts_val
+ self.min_w_l = min_w_l
+ self.max_w_l = max_w_l
+ self.move_window_method = move_window_method
+ self.process_func_names = process_func_names
+ self.name2func = dict(
+ clip_ts=self.clip_min_max_timestamps,
+ round_multiple=self.round_to_multiple_clip_lengths,
+ clip_window_l=self.clip_window_lengths
+ )
+
+ def __call__(self, lines):
+ processed_lines = []
+ for line in tqdm(lines, desc=f"convert to multiples of clip_length={self.clip_length}"):
+ windows_and_scores = torch.tensor(line["pred_relevant_windows"])
+ windows = windows_and_scores[:, :2]
+ for func_name in self.process_func_names:
+ windows = self.name2func[func_name](windows)
+ line["pred_relevant_windows"] = torch.cat(
+ [windows, windows_and_scores[:, 2:3]], dim=1).tolist()
+ line["pred_relevant_windows"] = [e[:2] + [float(f"{e[2]:.4f}")] for e in line["pred_relevant_windows"]]
+ processed_lines.append(line)
+ return processed_lines
+
+ def clip_min_max_timestamps(self, windows):
+ """
+ windows: (#windows, 2) torch.Tensor
+ ensure timestamps for all windows is within [min_val, max_val], clip is out of boundaries.
+ """
+ return torch.clamp(windows, min=self.min_ts_val, max=self.max_ts_val)
+
+ def round_to_multiple_clip_lengths(self, windows):
+ """
+ windows: (#windows, 2) torch.Tensor
+ ensure the final window timestamps are multiples of `clip_length`
+ """
+ return torch.round(windows / self.clip_length) * self.clip_length
+
+ def clip_window_lengths(self, windows):
+ """
+ windows: (#windows, 2) np.ndarray
+ ensure the final window duration are within [self.min_w_l, self.max_w_l]
+ """
+ window_lengths = windows[:, 1] - windows[:, 0]
+ small_rows = window_lengths < self.min_w_l
+ if torch.sum(small_rows) > 0:
+ windows = self.move_windows(
+ windows, small_rows, self.min_w_l, move_method=self.move_window_method)
+ large_rows = window_lengths > self.max_w_l
+ if torch.sum(large_rows) > 0:
+ windows = self.move_windows(
+ windows, large_rows, self.max_w_l, move_method=self.move_window_method)
+ return windows
+
+ @classmethod
+ def move_windows(cls, windows, row_selector, new_length, move_method="left"):
+ """
+ Args:
+ windows:
+ row_selector:
+ new_length:
+ move_method: str,
+ left: keep left unchanged
+ center: keep center unchanged
+ right: keep right unchanged
+
+ Returns:
+
+ """
+ # import ipdb;
+ # ipdb.set_trace()
+ if move_method == "left":
+ windows[row_selector, 1] = windows[row_selector, 0] + new_length
+ elif move_method == "right":
+ windows[row_selector, 0] = windows[row_selector, 1] - new_length
+ elif move_method == "center":
+ center = (windows[row_selector, 1] + windows[row_selector, 0]) / 2.
+ windows[row_selector, 0] = center - new_length / 2.
+ windows[row_selector, 1] = center + new_length / 2.
+ return windows
+
diff --git a/moment_detr/scripts/inference.sh b/moment_detr/scripts/inference.sh
new file mode 100644
index 0000000000000000000000000000000000000000..aa6cdf3113f03453681e6d613460a84b2b3f2bb8
--- /dev/null
+++ b/moment_detr/scripts/inference.sh
@@ -0,0 +1,8 @@
+ckpt_path=$1
+eval_split_name=$2
+eval_path=data/highlight_${eval_split_name}_release.jsonl
+PYTHONPATH=$PYTHONPATH:. python moment_detr/inference.py \
+--resume ${ckpt_path} \
+--eval_split_name ${eval_split_name} \
+--eval_path ${eval_path} \
+${@:3}
diff --git a/moment_detr/scripts/pretrain.sh b/moment_detr/scripts/pretrain.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9f28675bbff60a80047a1c4b4b518e5112554c90
--- /dev/null
+++ b/moment_detr/scripts/pretrain.sh
@@ -0,0 +1,61 @@
+dset_name=hl
+ctx_mode=video_tef
+v_feat_types=slowfast_clip
+t_feat_type=clip
+results_root=results
+exp_id=pt
+
+######## data paths
+train_path=data/subs_train.jsonl
+eval_path=data/highlight_val_release.jsonl
+eval_split_name=val
+
+######## setup video+text features
+feat_root=features
+
+# video features
+v_feat_dim=0
+v_feat_dirs=()
+if [[ ${v_feat_types} == *"slowfast"* ]]; then
+ v_feat_dirs+=(${feat_root}/slowfast_features)
+ (( v_feat_dim += 2304 )) # double brackets for arithmetic op, no need to use ${v_feat_dim}
+fi
+if [[ ${v_feat_types} == *"clip"* ]]; then
+ v_feat_dirs+=(${feat_root}/clip_features)
+ (( v_feat_dim += 512 ))
+fi
+
+# text features
+if [[ ${t_feat_type} == "clip" ]]; then
+ t_feat_dir=${feat_root}/clip_sub_features/
+ t_feat_dim=512
+else
+ echo "Wrong arg for t_feat_type."
+ exit 1
+fi
+
+#### training
+bsz=256
+num_workers=8
+n_epoch=100
+max_es_cnt=100
+exp_id=pt
+
+
+PYTHONPATH=$PYTHONPATH:. python moment_detr/train.py \
+--dset_name ${dset_name} \
+--ctx_mode ${ctx_mode} \
+--train_path ${train_path} \
+--eval_path ${eval_path} \
+--eval_split_name ${eval_split_name} \
+--v_feat_dirs ${v_feat_dirs[@]} \
+--v_feat_dim ${v_feat_dim} \
+--t_feat_dir ${t_feat_dir} \
+--t_feat_dim ${t_feat_dim} \
+--bsz ${bsz} \
+--results_root ${results_root} \
+--num_workers ${num_workers} \
+--exp_id ${exp_id} \
+--n_epoch ${n_epoch} \
+--max_es_cnt ${max_es_cnt} \
+${@:1}
diff --git a/moment_detr/scripts/train.sh b/moment_detr/scripts/train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6dbec79751a860a4b1b7b54e7a9bd388a02ba404
--- /dev/null
+++ b/moment_detr/scripts/train.sh
@@ -0,0 +1,54 @@
+dset_name=hl
+ctx_mode=video_tef
+v_feat_types=slowfast_clip
+t_feat_type=clip
+results_root=results
+exp_id=exp
+
+######## data paths
+train_path=data/highlight_train_release.jsonl
+eval_path=data/highlight_val_release.jsonl
+eval_split_name=val
+
+######## setup video+text features
+feat_root=features
+
+# video features
+v_feat_dim=0
+v_feat_dirs=()
+if [[ ${v_feat_types} == *"slowfast"* ]]; then
+ v_feat_dirs+=(${feat_root}/slowfast_features)
+ (( v_feat_dim += 2304 )) # double brackets for arithmetic op, no need to use ${v_feat_dim}
+fi
+if [[ ${v_feat_types} == *"clip"* ]]; then
+ v_feat_dirs+=(${feat_root}/clip_features)
+ (( v_feat_dim += 512 ))
+fi
+
+# text features
+if [[ ${t_feat_type} == "clip" ]]; then
+ t_feat_dir=${feat_root}/clip_text_features/
+ t_feat_dim=512
+else
+ echo "Wrong arg for t_feat_type."
+ exit 1
+fi
+
+#### training
+bsz=32
+
+
+PYTHONPATH=$PYTHONPATH:. python moment_detr/train.py \
+--dset_name ${dset_name} \
+--ctx_mode ${ctx_mode} \
+--train_path ${train_path} \
+--eval_path ${eval_path} \
+--eval_split_name ${eval_split_name} \
+--v_feat_dirs ${v_feat_dirs[@]} \
+--v_feat_dim ${v_feat_dim} \
+--t_feat_dir ${t_feat_dir} \
+--t_feat_dim ${t_feat_dim} \
+--bsz ${bsz} \
+--results_root ${results_root} \
+--exp_id ${exp_id} \
+${@:1}
diff --git a/moment_detr/span_utils.py b/moment_detr/span_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..99f7e1247181a93fcf5ba89ee21ee74cdff208d1
--- /dev/null
+++ b/moment_detr/span_utils.py
@@ -0,0 +1,122 @@
+import torch
+
+
+def span_xx_to_cxw(xx_spans):
+ """
+ Args:
+ xx_spans: tensor, (#windows, 2) or (..., 2), each row is a window of format (st, ed)
+
+ Returns:
+ cxw_spans: tensor, (#windows, 2), each row is a window of format (center=(st+ed)/2, width=(ed-st))
+ >>> spans = torch.Tensor([[0, 1], [0.2, 0.4]])
+ >>> span_xx_to_cxw(spans)
+ tensor([[0.5000, 1.0000],
+ [0.3000, 0.2000]])
+ >>> spans = torch.Tensor([[[0, 1], [0.2, 0.4]]])
+ >>> span_xx_to_cxw(spans)
+ tensor([[[0.5000, 1.0000],
+ [0.3000, 0.2000]]])
+ """
+ center = xx_spans.sum(-1) * 0.5
+ width = xx_spans[..., 1] - xx_spans[..., 0]
+ return torch.stack([center, width], dim=-1)
+
+
+def span_cxw_to_xx(cxw_spans):
+ """
+ Args:
+ cxw_spans: tensor, (#windows, 2) or (..., 2), the last dim is a row denoting a window of format (center, width)
+
+ >>> spans = torch.Tensor([[0.5000, 1.0000], [0.3000, 0.2000]])
+ >>> span_cxw_to_xx(spans)
+ tensor([[0.0000, 1.0000],
+ [0.2000, 0.4000]])
+ >>> spans = torch.Tensor([[[0.5000, 1.0000], [0.3000, 0.2000]]])
+ >>> span_cxw_to_xx(spans)
+ tensor([[[0.0000, 1.0000],
+ [0.2000, 0.4000]]])
+ """
+ x1 = cxw_spans[..., 0] - 0.5 * cxw_spans[..., 1]
+ x2 = cxw_spans[..., 0] + 0.5 * cxw_spans[..., 1]
+ return torch.stack([x1, x2], dim=-1)
+
+
+def temporal_iou(spans1, spans2):
+ """
+ Args:
+ spans1: (N, 2) torch.Tensor, each row defines a span [st, ed]
+ spans2: (M, 2) torch.Tensor, ...
+
+ Returns:
+ iou: (N, M) torch.Tensor
+ union: (N, M) torch.Tensor
+ >>> test_spans1 = torch.Tensor([[0, 0.2], [0.5, 1.0]])
+ >>> test_spans2 = torch.Tensor([[0, 0.3], [0., 1.0]])
+ >>> temporal_iou(test_spans1, test_spans2)
+ (tensor([[0.6667, 0.2000],
+ [0.0000, 0.5000]]),
+ tensor([[0.3000, 1.0000],
+ [0.8000, 1.0000]]))
+ """
+ areas1 = spans1[:, 1] - spans1[:, 0] # (N, )
+ areas2 = spans2[:, 1] - spans2[:, 0] # (M, )
+
+ left = torch.max(spans1[:, None, 0], spans2[:, 0]) # (N, M)
+ right = torch.min(spans1[:, None, 1], spans2[:, 1]) # (N, M)
+
+ inter = (right - left).clamp(min=0) # (N, M)
+ union = areas1[:, None] + areas2 - inter # (N, M)
+
+ iou = inter / union
+ return iou, union
+
+
+def temporal_intersection_over_pred(gt_spans, pred_spans):
+ """ intersection over the second input spans
+ Args:
+ gt_spans: (N, 2),
+ pred_spans: (M, 2)
+
+ Returns:
+
+ """
+ left = torch.max(gt_spans[:, None, 0], pred_spans[:, 0])
+ right = torch.min(gt_spans[:, None, 1], pred_spans[:, 1])
+
+ inter = (right - left).clamp(min=0) # (N, M)
+ inter_over_pred = inter / (pred_spans[:, 1] - pred_spans[:, 0])
+ return inter_over_pred
+
+
+def generalized_temporal_iou(spans1, spans2):
+ """
+ Generalized IoU from https://giou.stanford.edu/
+ Also reference to DETR implementation of generalized_box_iou
+ https://github.com/facebookresearch/detr/blob/master/util/box_ops.py#L40
+
+ Args:
+ spans1: (N, 2) torch.Tensor, each row defines a span in xx format [st, ed]
+ spans2: (M, 2) torch.Tensor, ...
+
+ Returns:
+ giou: (N, M) torch.Tensor
+
+ >>> test_spans1 = torch.Tensor([[0, 0.2], [0.5, 1.0]])
+ >>> test_spans2 = torch.Tensor([[0, 0.3], [0., 1.0]])
+ >>> generalized_temporal_iou(test_spans1, test_spans2)
+ tensor([[ 0.6667, 0.2000],
+ [-0.2000, 0.5000]])
+ """
+ spans1 = spans1.float()
+ spans2 = spans2.float()
+ assert (spans1[:, 1] >= spans1[:, 0]).all()
+ assert (spans2[:, 1] >= spans2[:, 0]).all()
+ iou, union = temporal_iou(spans1, spans2)
+
+ left = torch.min(spans1[:, None, 0], spans2[:, 0]) # (N, M)
+ right = torch.max(spans1[:, None, 1], spans2[:, 1]) # (N, M)
+ enclosing_area = (right - left).clamp(min=0) # (N, M)
+
+ return iou - (enclosing_area - union) / enclosing_area
+
+
diff --git a/moment_detr/start_end_dataset.py b/moment_detr/start_end_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..930c22b15741518a2752c0d73341abb329e4199b
--- /dev/null
+++ b/moment_detr/start_end_dataset.py
@@ -0,0 +1,247 @@
+import torch
+from torch.utils.data import Dataset
+import numpy as np
+from tqdm import tqdm
+import random
+import logging
+from os.path import join, exists
+from utils.basic_utils import load_jsonl, l2_normalize_np_array
+from utils.tensor_utils import pad_sequences_1d
+from moment_detr.span_utils import span_xx_to_cxw
+
+logger = logging.getLogger(__name__)
+
+
+class StartEndDataset(Dataset):
+ Q_FEAT_TYPES = ["pooler_output", "last_hidden_state"]
+ """One line in data loaded from data_path."
+ {
+ "qid": 7803,
+ "query": "Man in gray top walks from outside to inside.",
+ "duration": 150,
+ "vid": "RoripwjYFp8_360.0_510.0",
+ "relevant_clip_ids": [13, 14, 15, 16, 17],
+ "relevant_windows": [[26, 36]]
+ }
+ """
+
+ def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,
+ q_feat_type="last_hidden_state",
+ max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode="video",
+ normalize_v=True, normalize_t=True, load_labels=True,
+ clip_len=2, max_windows=5, span_loss_type="l1", txt_drop_ratio=0):
+ self.dset_name = dset_name
+ self.data_path = data_path
+ self.data_ratio = data_ratio
+ self.v_feat_dirs = v_feat_dirs \
+ if isinstance(v_feat_dirs, list) else [v_feat_dirs]
+ self.q_feat_dir = q_feat_dir
+ self.q_feat_type = q_feat_type
+ self.max_q_l = max_q_l
+ self.max_v_l = max_v_l
+ self.ctx_mode = ctx_mode
+ self.use_tef = "tef" in ctx_mode
+ self.use_video = "video" in ctx_mode
+ self.normalize_t = normalize_t
+ self.normalize_v = normalize_v
+ self.load_labels = load_labels
+ self.clip_len = clip_len
+ self.max_windows = max_windows # maximum number of windows to use as labels
+ self.span_loss_type = span_loss_type
+ self.txt_drop_ratio = txt_drop_ratio
+ if "val" in data_path or "test" in data_path:
+ assert txt_drop_ratio == 0
+
+ # checks
+ assert q_feat_type in self.Q_FEAT_TYPES
+
+ # data
+ self.data = self.load_data()
+
+ def load_data(self):
+ datalist = load_jsonl(self.data_path)
+ if self.data_ratio != 1:
+ n_examples = int(len(datalist) * self.data_ratio)
+ datalist = datalist[:n_examples]
+ logger.info("Using {}% of the data: {} examples"
+ .format(self.data_ratio * 100, n_examples))
+ return datalist
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, index):
+ meta = self.data[index]
+
+ model_inputs = dict()
+ model_inputs["query_feat"] = self._get_query_feat_by_qid(meta["qid"]) # (Dq, ) or (Lq, Dq)
+ if self.use_video:
+ model_inputs["video_feat"] = self._get_video_feat_by_vid(meta["vid"]) # (Lv, Dv)
+ ctx_l = len(model_inputs["video_feat"])
+ else:
+ ctx_l = self.max_v_l
+
+ if self.use_tef:
+ tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l
+ tef_ed = tef_st + 1.0 / ctx_l
+ tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)
+ if self.use_video:
+ model_inputs["video_feat"] = torch.cat(
+ [model_inputs["video_feat"], tef], dim=1) # (Lv, Dv+2)
+ else:
+ model_inputs["video_feat"] = tef
+
+ if self.load_labels:
+ model_inputs["span_labels"] = self.get_span_labels(meta["relevant_windows"], ctx_l) # (#windows, 2)
+ if "subs_train" not in self.data_path:
+ model_inputs["saliency_pos_labels"], model_inputs["saliency_neg_labels"] = \
+ self.get_saliency_labels(meta["relevant_clip_ids"], meta["saliency_scores"], ctx_l)
+ else:
+ model_inputs["saliency_pos_labels"], model_inputs["saliency_neg_labels"] = \
+ self.get_saliency_labels_sub_as_query(meta["relevant_windows"][0], ctx_l) # only one gt
+ return dict(meta=meta, model_inputs=model_inputs)
+
+ def get_saliency_labels_sub_as_query(self, gt_window, ctx_l, max_n=2):
+ gt_st = int(gt_window[0] / self.clip_len)
+ gt_ed = max(0, min(int(gt_window[1] / self.clip_len), ctx_l) - 1)
+ if gt_st > gt_ed:
+ gt_st = gt_ed
+
+ if gt_st != gt_ed:
+ pos_clip_indices = random.sample(range(gt_st, gt_ed+1), k=max_n)
+ else:
+ pos_clip_indices = [gt_st, gt_st]
+
+ neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))
+ neg_clip_indices = random.sample(neg_pool, k=max_n)
+ return pos_clip_indices, neg_clip_indices
+
+ def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):
+ """Sum the scores from the three annotations, then take the two clips with the
+ maximum scores as positive, and two with the minimum scores as negative.
+ Args:
+ rel_clip_ids: list(int), list of relevant clip ids
+ scores: list([anno1_score, anno2_score, anno3_score]),
+ ctx_l: int
+ max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.
+ add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.
+ """
+ # indices inside rel_clip_ids
+ scores = np.array(scores) # (#rel_clips, 3)
+ agg_scores = np.sum(scores, 1) # (#rel_clips, )
+ sort_indices = np.argsort(agg_scores) # increasing
+
+ # indices in the whole video
+ # the min(_, ctx_l-1) here is incorrect, but should not cause
+ # much troubles since this should be rarely used.
+ hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]
+ hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]
+ easy_pos_clip_indices = []
+ easy_neg_clip_indices = []
+ if add_easy_negative:
+ easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))
+ if len(easy_neg_pool) >= max_n:
+ easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)
+ easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)
+ else: # copy the hard ones
+ easy_pos_clip_indices = hard_pos_clip_indices
+ easy_neg_clip_indices = hard_neg_clip_indices
+
+ pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices
+ neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices
+ return pos_clip_indices, neg_clip_indices
+
+ def get_span_labels(self, windows, ctx_l):
+ """
+ windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)
+ Note a maximum of `self.max_windows` windows are used.
+ returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length
+ """
+ if len(windows) > self.max_windows:
+ random.shuffle(windows)
+ windows = windows[:self.max_windows]
+ if self.span_loss_type == "l1":
+ windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx
+ windows = span_xx_to_cxw(windows) # normalized windows in cxw
+ elif self.span_loss_type == "ce":
+ windows = torch.Tensor([
+ [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]
+ for w in windows]).long() # inclusive
+ else:
+ raise NotImplementedError
+ return windows
+
+ def _get_query_feat_by_qid(self, qid):
+ q_feat_path = join(self.q_feat_dir, f"qid{qid}.npz")
+ q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)
+ if self.q_feat_type == "last_hidden_state":
+ q_feat = q_feat[:self.max_q_l]
+ if self.normalize_t:
+ q_feat = l2_normalize_np_array(q_feat)
+ if self.txt_drop_ratio > 0:
+ q_feat = self.random_drop_rows(q_feat)
+ return torch.from_numpy(q_feat) # (D, ) or (Lq, D)
+
+ def random_drop_rows(self, embeddings):
+ """randomly mask num_drop rows in embeddings to be zero.
+ Args:
+ embeddings: np.ndarray (L, D)
+ """
+ num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)
+ if num_drop_rows > 0:
+ row_indices = np.random.choice(
+ len(embeddings), size=num_drop_rows, replace=False)
+ embeddings[row_indices] = 0
+ return embeddings
+
+ def _get_video_feat_by_vid(self, vid):
+ v_feat_list = []
+ for _feat_dir in self.v_feat_dirs:
+ _feat_path = join(_feat_dir, f"{vid}.npz")
+ _feat = np.load(_feat_path)["features"][:self.max_v_l].astype(np.float32)
+ if self.normalize_v:
+ _feat = l2_normalize_np_array(_feat)
+ v_feat_list.append(_feat)
+ # some features are slightly longer than the others
+ min_len = min([len(e) for e in v_feat_list])
+ v_feat_list = [e[:min_len] for e in v_feat_list]
+ v_feat = np.concatenate(v_feat_list, axis=1)
+ return torch.from_numpy(v_feat) # (Lv, D)
+
+
+def start_end_collate(batch):
+ batch_meta = [e["meta"] for e in batch] # seems no need to collate ?
+
+ model_inputs_keys = batch[0]["model_inputs"].keys()
+ batched_data = dict()
+ for k in model_inputs_keys:
+ if k == "span_labels":
+ batched_data[k] = [dict(spans=e["model_inputs"]["span_labels"]) for e in batch]
+ continue
+ if k in ["saliency_pos_labels", "saliency_neg_labels"]:
+ batched_data[k] = torch.LongTensor([e["model_inputs"][k] for e in batch])
+ continue
+ batched_data[k] = pad_sequences_1d(
+ [e["model_inputs"][k] for e in batch], dtype=torch.float32, fixed_length=None)
+ return batch_meta, batched_data
+
+
+def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):
+ model_inputs = dict(
+ src_txt=batched_model_inputs["query_feat"][0].to(device, non_blocking=non_blocking),
+ src_txt_mask=batched_model_inputs["query_feat"][1].to(device, non_blocking=non_blocking),
+ src_vid=batched_model_inputs["video_feat"][0].to(device, non_blocking=non_blocking),
+ src_vid_mask=batched_model_inputs["video_feat"][1].to(device, non_blocking=non_blocking),
+ )
+ targets = {}
+ if "span_labels" in batched_model_inputs:
+ targets["span_labels"] = [
+ dict(spans=e["spans"].to(device, non_blocking=non_blocking))
+ for e in batched_model_inputs["span_labels"]
+ ]
+ if "saliency_pos_labels" in batched_model_inputs:
+ for name in ["saliency_pos_labels", "saliency_neg_labels"]:
+ targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)
+
+ targets = None if len(targets) == 0 else targets
+ return model_inputs, targets
diff --git a/moment_detr/text_encoder.py b/moment_detr/text_encoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..929dd7c90b15c382bc0ad2169be7deb421219e50
--- /dev/null
+++ b/moment_detr/text_encoder.py
@@ -0,0 +1,53 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from easydict import EasyDict as edict
+from xml.model_components import BertAttention, TrainablePositionalEncoding
+
+
+class TextEncoder(nn.Module):
+ def __init__(self, hidden_size, drop, input_drop, nheads, max_position_embeddings):
+ super().__init__()
+ self.transformer_encoder = BertAttention(edict(
+ hidden_size=hidden_size,
+ intermediate_size=hidden_size,
+ hidden_dropout_prob=drop,
+ attention_probs_dropout_prob=drop,
+ num_attention_heads=nheads,
+ ))
+ self.pos_embed = TrainablePositionalEncoding(
+ max_position_embeddings=max_position_embeddings,
+ hidden_size=hidden_size,
+ dropout=input_drop,
+ )
+ self.modular_vector_mapping = nn.Linear(hidden_size, 1, bias=False)
+
+ def forward(self, feat, mask):
+ """
+ Args:
+ feat: (N, L, D=hidden_size)
+ mask: (N, L) with 1 indicates valid
+
+ Returns:
+ (N, D)
+ """
+ feat = self.pos_embed(feat) # (N, L, D)
+ feat = self.transformer_encoder(feat, mask.unsqueeze(1))
+ att_scores = self.modular_vector_mapping(feat) # (N, L, 1)
+ att_scores = F.softmax(mask_logits(att_scores, mask.unsqueeze(2)), dim=1)
+ pooled_feat = torch.einsum("blm,bld->bmd", att_scores, feat) # (N, 2 or 1, D)
+ return pooled_feat.squeeze(1)
+
+
+def mask_logits(target, mask):
+ return target * mask + (1 - mask) * (-1e10)
+
+
+def build_text_encoder(args):
+ return TextEncoder(
+ hidden_size=args.hidden_dim,
+ drop=args.dropout,
+ input_drop=args.input_dropout,
+ nheads=args.nheads,
+ max_position_embeddings=args.max_q_l
+ )
diff --git a/moment_detr/train.py b/moment_detr/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c18c898cecaafba20a56473e8e7bab95bb97377
--- /dev/null
+++ b/moment_detr/train.py
@@ -0,0 +1,266 @@
+import os
+import time
+import json
+import pprint
+import random
+import numpy as np
+from tqdm import tqdm, trange
+from collections import defaultdict
+
+import torch
+import torch.nn as nn
+import torch.backends.cudnn as cudnn
+from torch.utils.data import DataLoader
+from torch.utils.tensorboard import SummaryWriter
+
+from moment_detr.config import BaseOptions
+from moment_detr.start_end_dataset import \
+ StartEndDataset, start_end_collate, prepare_batch_inputs
+from moment_detr.inference import eval_epoch, start_inference, setup_model
+from utils.basic_utils import AverageMeter, dict_to_markdown
+from utils.model_utils import count_parameters
+
+
+import logging
+logger = logging.getLogger(__name__)
+logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+ level=logging.INFO)
+
+
+def set_seed(seed, use_cuda=True):
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ if use_cuda:
+ torch.cuda.manual_seed_all(seed)
+
+
+def train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer):
+ logger.info(f"[Epoch {epoch_i+1}]")
+ model.train()
+ criterion.train()
+
+ # init meters
+ time_meters = defaultdict(AverageMeter)
+ loss_meters = defaultdict(AverageMeter)
+
+ num_training_examples = len(train_loader)
+ timer_dataloading = time.time()
+ for batch_idx, batch in tqdm(enumerate(train_loader),
+ desc="Training Iteration",
+ total=num_training_examples):
+ time_meters["dataloading_time"].update(time.time() - timer_dataloading)
+
+ timer_start = time.time()
+ model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory)
+ time_meters["prepare_inputs_time"].update(time.time() - timer_start)
+
+ timer_start = time.time()
+ outputs = model(**model_inputs)
+ loss_dict = criterion(outputs, targets)
+ weight_dict = criterion.weight_dict
+ losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
+ time_meters["model_forward_time"].update(time.time() - timer_start)
+
+ timer_start = time.time()
+ optimizer.zero_grad()
+ losses.backward()
+ if opt.grad_clip > 0:
+ nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip)
+ optimizer.step()
+ time_meters["model_backward_time"].update(time.time() - timer_start)
+
+ loss_dict["loss_overall"] = float(losses) # for logging only
+ for k, v in loss_dict.items():
+ loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v))
+
+ timer_dataloading = time.time()
+ if opt.debug and batch_idx == 3:
+ break
+
+ # print/add logs
+ tb_writer.add_scalar("Train/lr", float(optimizer.param_groups[0]["lr"]), epoch_i+1)
+ for k, v in loss_meters.items():
+ tb_writer.add_scalar("Train/{}".format(k), v.avg, epoch_i+1)
+
+ to_write = opt.train_log_txt_formatter.format(
+ time_str=time.strftime("%Y_%m_%d_%H_%M_%S"),
+ epoch=epoch_i+1,
+ loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in loss_meters.items()]))
+ with open(opt.train_log_filepath, "a") as f:
+ f.write(to_write)
+
+ logger.info("Epoch time stats:")
+ for name, meter in time_meters.items():
+ d = {k: f"{getattr(meter, k):.4f}" for k in ["max", "min", "avg"]}
+ logger.info(f"{name} ==> {d}")
+
+
+def train(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt):
+ if opt.device.type == "cuda":
+ logger.info("CUDA enabled.")
+ model.to(opt.device)
+
+ tb_writer = SummaryWriter(opt.tensorboard_log_dir)
+ tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None))
+ opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n"
+ opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n"
+
+ train_loader = DataLoader(
+ train_dataset,
+ collate_fn=start_end_collate,
+ batch_size=opt.bsz,
+ num_workers=opt.num_workers,
+ shuffle=True,
+ pin_memory=opt.pin_memory
+ )
+
+ prev_best_score = 0.
+ es_cnt = 0
+ # start_epoch = 0
+ if opt.start_epoch is None:
+ start_epoch = -1 if opt.eval_untrained else 0
+ else:
+ start_epoch = opt.start_epoch
+ save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name)
+ for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"):
+ if epoch_i > -1:
+ train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer)
+ lr_scheduler.step()
+ eval_epoch_interval = 5
+ if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0:
+ with torch.no_grad():
+ metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \
+ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer)
+
+ # log
+ to_write = opt.eval_log_txt_formatter.format(
+ time_str=time.strftime("%Y_%m_%d_%H_%M_%S"),
+ epoch=epoch_i,
+ loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]),
+ eval_metrics_str=json.dumps(metrics_no_nms))
+
+ with open(opt.eval_log_filepath, "a") as f:
+ f.write(to_write)
+ logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4)))
+ if metrics_nms is not None:
+ logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4)))
+
+ metrics = metrics_no_nms
+ for k, v in metrics["brief"].items():
+ tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1)
+
+ stop_score = metrics["brief"]["MR-full-mAP"]
+ if stop_score > prev_best_score:
+ es_cnt = 0
+ prev_best_score = stop_score
+
+ checkpoint = {
+ "model": model.state_dict(),
+ "optimizer": optimizer.state_dict(),
+ "lr_scheduler": lr_scheduler.state_dict(),
+ "epoch": epoch_i,
+ "opt": opt
+ }
+ torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt"))
+
+ best_file_paths = [e.replace("latest", "best") for e in latest_file_paths]
+ for src, tgt in zip(latest_file_paths, best_file_paths):
+ os.renames(src, tgt)
+ logger.info("The checkpoint file has been updated.")
+ else:
+ es_cnt += 1
+ if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop
+ with open(opt.train_log_filepath, "a") as f:
+ f.write(f"Early Stop at epoch {epoch_i}")
+ logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n")
+ break
+
+ # save ckpt
+ checkpoint = {
+ "model": model.state_dict(),
+ "optimizer": optimizer.state_dict(),
+ "lr_scheduler": lr_scheduler.state_dict(),
+ "epoch": epoch_i,
+ "opt": opt
+ }
+ torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt"))
+
+ save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain
+ if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies
+ checkpoint = {
+ "model": model.state_dict(),
+ "optimizer": optimizer.state_dict(),
+ "epoch": epoch_i,
+ "opt": opt
+ }
+ torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt"))
+
+ if opt.debug:
+ break
+
+ tb_writer.close()
+
+
+def start_training():
+ logger.info("Setup config, data and model...")
+ opt = BaseOptions().parse()
+ set_seed(opt.seed)
+ if opt.debug: # keep the model run deterministically
+ # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config.
+ # Enable this only when input size is fixed.
+ cudnn.benchmark = False
+ cudnn.deterministic = True
+
+ dataset_config = dict(
+ dset_name=opt.dset_name,
+ data_path=opt.train_path,
+ v_feat_dirs=opt.v_feat_dirs,
+ q_feat_dir=opt.t_feat_dir,
+ q_feat_type="last_hidden_state",
+ max_q_l=opt.max_q_l,
+ max_v_l=opt.max_v_l,
+ ctx_mode=opt.ctx_mode,
+ data_ratio=opt.data_ratio,
+ normalize_v=not opt.no_norm_vfeat,
+ normalize_t=not opt.no_norm_tfeat,
+ clip_len=opt.clip_length,
+ max_windows=opt.max_windows,
+ span_loss_type=opt.span_loss_type,
+ txt_drop_ratio=opt.txt_drop_ratio
+ )
+
+ dataset_config["data_path"] = opt.train_path
+ train_dataset = StartEndDataset(**dataset_config)
+
+ if opt.eval_path is not None:
+ dataset_config["data_path"] = opt.eval_path
+ dataset_config["txt_drop_ratio"] = 0
+ dataset_config["q_feat_dir"] = opt.t_feat_dir.replace("sub_features", "text_features") # for pretraining
+ # dataset_config["load_labels"] = False # uncomment to calculate eval loss
+ eval_dataset = StartEndDataset(**dataset_config)
+ else:
+ eval_dataset = None
+
+ model, criterion, optimizer, lr_scheduler = setup_model(opt)
+ logger.info(f"Model {model}")
+ count_parameters(model)
+ logger.info("Start Training...")
+ train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt)
+ return opt.ckpt_filepath.replace(".ckpt", "_best.ckpt"), opt.eval_split_name, opt.eval_path, opt.debug
+
+
+if __name__ == '__main__':
+ best_ckpt_path, eval_split_name, eval_path, debug = start_training()
+ if not debug:
+ input_args = ["--resume", best_ckpt_path,
+ "--eval_split_name", eval_split_name,
+ "--eval_path", eval_path]
+
+ import sys
+ sys.argv[1:] = input_args
+ logger.info("\n\n\nFINISHED TRAINING!!!")
+ logger.info("Evaluating model at {}".format(best_ckpt_path))
+ logger.info("Input args {}".format(sys.argv[1:]))
+ start_inference()
diff --git a/moment_detr/transformer.py b/moment_detr/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a4958ec06d8c1fd1f091362d2c5a22714ed0714
--- /dev/null
+++ b/moment_detr/transformer.py
@@ -0,0 +1,471 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+"""
+DETR Transformer class.
+
+Copy-paste from torch.nn.Transformer with modifications:
+ * positional encodings are passed in MHattention
+ * extra LN at the end of encoder is removed
+ * decoder returns a stack of activations from all decoding layers
+"""
+import copy
+from typing import Optional
+
+import torch
+import torch.nn.functional as F
+from torch import nn, Tensor
+
+
+class Transformer(nn.Module):
+
+ def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
+ num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
+ activation="relu", normalize_before=False,
+ return_intermediate_dec=False):
+ super().__init__()
+
+ # TransformerEncoderLayerThin
+ encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
+ dropout, activation, normalize_before)
+ encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
+ self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
+
+ # TransformerDecoderLayerThin
+ decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
+ dropout, activation, normalize_before)
+ decoder_norm = nn.LayerNorm(d_model)
+ self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
+ return_intermediate=return_intermediate_dec)
+
+ self._reset_parameters()
+
+ self.d_model = d_model
+ self.nhead = nhead
+
+ def _reset_parameters(self):
+ for p in self.parameters():
+ if p.dim() > 1:
+ nn.init.xavier_uniform_(p)
+
+ def forward(self, src, mask, query_embed, pos_embed):
+ """
+ Args:
+ src: (batch_size, L, d)
+ mask: (batch_size, L)
+ query_embed: (#queries, d)
+ pos_embed: (batch_size, L, d) the same as src
+
+ Returns:
+
+ """
+ # flatten NxCxHxW to HWxNxC
+ bs, l, d = src.shape
+ src = src.permute(1, 0, 2) # (L, batch_size, d)
+ pos_embed = pos_embed.permute(1, 0, 2) # (L, batch_size, d)
+ query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) # (#queries, batch_size, d)
+
+ tgt = torch.zeros_like(query_embed)
+ memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) # (L, batch_size, d)
+ hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
+ pos=pos_embed, query_pos=query_embed) # (#layers, #queries, batch_size, d)
+ hs = hs.transpose(1, 2) # (#layers, batch_size, #qeries, d)
+ # memory = memory.permute(1, 2, 0) # (batch_size, d, L)
+ memory = memory.transpose(0, 1) # (batch_size, L, d)
+ return hs, memory
+
+
+class TransformerEncoder(nn.Module):
+
+ def __init__(self, encoder_layer, num_layers, norm=None, return_intermediate=False):
+ super().__init__()
+ self.layers = _get_clones(encoder_layer, num_layers)
+ self.num_layers = num_layers
+ self.norm = norm
+ self.return_intermediate = return_intermediate
+
+ def forward(self, src,
+ mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None):
+ output = src
+
+ intermediate = []
+
+ for layer in self.layers:
+ output = layer(output, src_mask=mask,
+ src_key_padding_mask=src_key_padding_mask, pos=pos)
+ if self.return_intermediate:
+ intermediate.append(output)
+
+ if self.norm is not None:
+ output = self.norm(output)
+
+ if self.return_intermediate:
+ return torch.stack(intermediate)
+
+ return output
+
+
+class TransformerDecoder(nn.Module):
+
+ def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
+ super().__init__()
+ self.layers = _get_clones(decoder_layer, num_layers)
+ self.num_layers = num_layers
+ self.norm = norm
+ self.return_intermediate = return_intermediate
+
+ def forward(self, tgt, memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+ output = tgt
+
+ intermediate = []
+
+ for layer in self.layers:
+ output = layer(output, memory, tgt_mask=tgt_mask,
+ memory_mask=memory_mask,
+ tgt_key_padding_mask=tgt_key_padding_mask,
+ memory_key_padding_mask=memory_key_padding_mask,
+ pos=pos, query_pos=query_pos)
+ if self.return_intermediate:
+ intermediate.append(self.norm(output))
+
+ if self.norm is not None:
+ output = self.norm(output)
+ if self.return_intermediate:
+ intermediate.pop()
+ intermediate.append(output)
+
+ if self.return_intermediate:
+ return torch.stack(intermediate)
+
+ return output.unsqueeze(0)
+
+
+class TransformerEncoderLayerThin(nn.Module):
+
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
+ activation="relu", normalize_before=False):
+ super().__init__()
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ # Implementation of Feedforward model
+ # self.linear1 = nn.Linear(d_model, dim_feedforward)
+ # self.dropout = nn.Dropout(dropout)
+ # self.linear2 = nn.Linear(dim_feedforward, d_model)
+ self.linear = nn.Linear(d_model, d_model)
+ self.norm = nn.LayerNorm(d_model)
+ self.dropout = nn.Dropout(dropout)
+
+ # self.activation = _get_activation_fn(activation)
+ self.normalize_before = normalize_before
+
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
+ return tensor if pos is None else tensor + pos
+
+ def forward_post(self,
+ src,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None):
+ q = k = self.with_pos_embed(src, pos)
+ src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
+ key_padding_mask=src_key_padding_mask)[0]
+ src2 = self.linear(src2)
+ src = src + self.dropout(src2)
+ src = self.norm(src)
+ # src = src + self.dropout1(src2)
+ # src = self.norm1(src)
+ # src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
+ # src = src + self.dropout2(src2)
+ # src = self.norm2(src)
+ return src
+
+ def forward_pre(self, src,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None):
+ """not used"""
+ src2 = self.norm1(src)
+ q = k = self.with_pos_embed(src2, pos)
+ src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
+ key_padding_mask=src_key_padding_mask)[0]
+ src = src + self.dropout1(src2)
+ src2 = self.norm2(src)
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
+ src = src + self.dropout2(src2)
+ return src
+
+ def forward(self, src,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None):
+ if self.normalize_before:
+ return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
+ return self.forward_post(src, src_mask, src_key_padding_mask, pos)
+
+
+class TransformerEncoderLayer(nn.Module):
+
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
+ activation="relu", normalize_before=False):
+ super().__init__()
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+
+ self.activation = _get_activation_fn(activation)
+ self.normalize_before = normalize_before
+
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
+ return tensor if pos is None else tensor + pos
+
+ def forward_post(self,
+ src,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None):
+ q = k = self.with_pos_embed(src, pos)
+ src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
+ key_padding_mask=src_key_padding_mask)[0]
+ src = src + self.dropout1(src2)
+ src = self.norm1(src)
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
+ src = src + self.dropout2(src2)
+ src = self.norm2(src)
+ return src
+
+ def forward_pre(self, src,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None):
+ src2 = self.norm1(src)
+ q = k = self.with_pos_embed(src2, pos)
+ src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
+ key_padding_mask=src_key_padding_mask)[0]
+ src = src + self.dropout1(src2)
+ src2 = self.norm2(src)
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
+ src = src + self.dropout2(src2)
+ return src
+
+ def forward(self, src,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None):
+ if self.normalize_before:
+ return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
+ return self.forward_post(src, src_mask, src_key_padding_mask, pos)
+
+
+class TransformerDecoderLayer(nn.Module):
+
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
+ activation="relu", normalize_before=False):
+ super().__init__()
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
+ self.dropout = nn.Dropout(dropout)
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ self.norm3 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+ self.dropout3 = nn.Dropout(dropout)
+
+ self.activation = _get_activation_fn(activation)
+ self.normalize_before = normalize_before
+
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
+ return tensor if pos is None else tensor + pos
+
+ def forward_post(self, tgt, memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+ q = k = self.with_pos_embed(tgt, query_pos)
+ tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
+ key_padding_mask=tgt_key_padding_mask)[0]
+ tgt = tgt + self.dropout1(tgt2)
+ tgt = self.norm1(tgt)
+ tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
+ key=self.with_pos_embed(memory, pos),
+ value=memory, attn_mask=memory_mask,
+ key_padding_mask=memory_key_padding_mask)[0]
+ tgt = tgt + self.dropout2(tgt2)
+ tgt = self.norm2(tgt)
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
+ tgt = tgt + self.dropout3(tgt2)
+ tgt = self.norm3(tgt)
+ return tgt
+
+ def forward_pre(self, tgt, memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+ tgt2 = self.norm1(tgt)
+ q = k = self.with_pos_embed(tgt2, query_pos)
+ tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
+ key_padding_mask=tgt_key_padding_mask)[0]
+ tgt = tgt + self.dropout1(tgt2)
+ tgt2 = self.norm2(tgt)
+ tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
+ key=self.with_pos_embed(memory, pos),
+ value=memory, attn_mask=memory_mask,
+ key_padding_mask=memory_key_padding_mask)[0]
+ tgt = tgt + self.dropout2(tgt2)
+ tgt2 = self.norm3(tgt)
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
+ tgt = tgt + self.dropout3(tgt2)
+ return tgt
+
+ def forward(self, tgt, memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+ if self.normalize_before:
+ return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
+ tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
+ return self.forward_post(tgt, memory, tgt_mask, memory_mask,
+ tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
+
+
+class TransformerDecoderLayerThin(nn.Module):
+ """removed intermediate layer"""
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
+ activation="relu", normalize_before=False):
+ super().__init__()
+ self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
+ # Implementation of Feedforward model
+ self.linear1 = nn.Linear(d_model, d_model)
+ # self.linear1 = nn.Linear(d_model, dim_feedforward)
+ # self.dropout = nn.Dropout(dropout)
+ # self.linear2 = nn.Linear(dim_feedforward, d_model)
+
+ self.norm1 = nn.LayerNorm(d_model)
+ self.norm2 = nn.LayerNorm(d_model)
+ # self.norm3 = nn.LayerNorm(d_model)
+ self.dropout1 = nn.Dropout(dropout)
+ self.dropout2 = nn.Dropout(dropout)
+ # self.dropout3 = nn.Dropout(dropout)
+
+ # self.activation = _get_activation_fn(activation)
+ self.normalize_before = normalize_before
+
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
+ return tensor if pos is None else tensor + pos
+
+ def forward_post(self, tgt, memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+ q = k = self.with_pos_embed(tgt, query_pos)
+ tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
+ key_padding_mask=tgt_key_padding_mask)[0]
+ tgt = tgt + self.dropout1(tgt2)
+ tgt = self.norm1(tgt)
+ tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
+ key=self.with_pos_embed(memory, pos),
+ value=memory, attn_mask=memory_mask,
+ key_padding_mask=memory_key_padding_mask)[0]
+ tgt2 = self.linear1(tgt2)
+ tgt = tgt + self.dropout2(tgt2)
+ tgt = self.norm2(tgt)
+ # tgt = tgt + self.dropout2(tgt2)
+ # tgt = self.norm2(tgt)
+ # tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
+ # tgt = tgt + self.dropout3(tgt2)
+ # tgt = self.norm3(tgt)
+ return tgt
+
+ def forward_pre(self, tgt, memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+ tgt2 = self.norm1(tgt)
+ q = k = self.with_pos_embed(tgt2, query_pos)
+ tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
+ key_padding_mask=tgt_key_padding_mask)[0]
+ tgt = tgt + self.dropout1(tgt2)
+ tgt2 = self.norm2(tgt)
+ tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
+ key=self.with_pos_embed(memory, pos),
+ value=memory, attn_mask=memory_mask,
+ key_padding_mask=memory_key_padding_mask)[0]
+ tgt = tgt + self.dropout2(tgt2)
+ tgt2 = self.norm3(tgt)
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
+ tgt = tgt + self.dropout3(tgt2)
+ return tgt
+
+ def forward(self, tgt, memory,
+ tgt_mask: Optional[Tensor] = None,
+ memory_mask: Optional[Tensor] = None,
+ tgt_key_padding_mask: Optional[Tensor] = None,
+ memory_key_padding_mask: Optional[Tensor] = None,
+ pos: Optional[Tensor] = None,
+ query_pos: Optional[Tensor] = None):
+ if self.normalize_before:
+ return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
+ tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
+ return self.forward_post(tgt, memory, tgt_mask, memory_mask,
+ tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
+
+
+
+def _get_clones(module, N):
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
+
+
+def build_transformer(args):
+ return Transformer(
+ d_model=args.hidden_dim,
+ dropout=args.dropout,
+ nhead=args.nheads,
+ dim_feedforward=args.dim_feedforward,
+ num_encoder_layers=args.enc_layers,
+ num_decoder_layers=args.dec_layers,
+ normalize_before=args.pre_norm,
+ return_intermediate_dec=True,
+ )
+
+
+def _get_activation_fn(activation):
+ """Return an activation function given a string"""
+ if activation == "relu":
+ return F.relu
+ if activation == "gelu":
+ return F.gelu
+ if activation == "glu":
+ return F.glu
+ raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9c17ae8227d86e3c295a1438527abbec54295e5b
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,14 @@
+torch
+torchvision
+torchaudio
+tqdm
+ipython
+easydict
+tensorboard
+tabulate
+scikit-learn
+pandas
+ffmpeg-python
+ftfy
+regex
+Pillow
\ No newline at end of file
diff --git a/res/model_overview.png b/res/model_overview.png
new file mode 100644
index 0000000000000000000000000000000000000000..883c453be0b840dceb2060c32eb48f6182a3fcfd
Binary files /dev/null and b/res/model_overview.png differ
diff --git a/run_on_video/clip/__init__.py b/run_on_video/clip/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcc5619538c0f7c782508bdbd9587259d805e0d9
--- /dev/null
+++ b/run_on_video/clip/__init__.py
@@ -0,0 +1 @@
+from .clip import *
diff --git a/run_on_video/clip/bpe_simple_vocab_16e6.txt.gz b/run_on_video/clip/bpe_simple_vocab_16e6.txt.gz
new file mode 100644
index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113
--- /dev/null
+++ b/run_on_video/clip/bpe_simple_vocab_16e6.txt.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
+size 1356917
diff --git a/run_on_video/clip/clip.py b/run_on_video/clip/clip.py
new file mode 100644
index 0000000000000000000000000000000000000000..9000dd80de5171b359dc336d0c955cc2332d12d4
--- /dev/null
+++ b/run_on_video/clip/clip.py
@@ -0,0 +1,195 @@
+import hashlib
+import os
+import urllib
+import warnings
+from typing import Union, List
+
+import torch
+from PIL import Image
+from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
+from tqdm import tqdm
+
+from .model import build_model
+from .simple_tokenizer import SimpleTokenizer as _Tokenizer
+
+__all__ = ["available_models", "load", "tokenize"]
+_tokenizer = _Tokenizer()
+
+_MODELS = {
+ "RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
+ "RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
+ "RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
+ "ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
+}
+
+
+def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
+ os.makedirs(root, exist_ok=True)
+ filename = os.path.basename(url)
+
+ expected_sha256 = url.split("/")[-2]
+ download_target = os.path.join(root, filename)
+
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
+
+ if os.path.isfile(download_target):
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
+ return download_target
+ else:
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
+
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
+ with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
+ while True:
+ buffer = source.read(8192)
+ if not buffer:
+ break
+
+ output.write(buffer)
+ loop.update(len(buffer))
+
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
+ raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
+
+ return download_target
+
+
+def _transform(n_px):
+ return Compose([
+ Resize(n_px, interpolation=Image.BICUBIC),
+ CenterCrop(n_px),
+ lambda image: image.convert("RGB"),
+ ToTensor(),
+ Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
+ ])
+
+
+def available_models() -> List[str]:
+ """Returns the names of available CLIP models"""
+ return list(_MODELS.keys())
+
+
+def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
+ """Load a CLIP model
+
+ Parameters
+ ----------
+ name : str
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
+
+ device : Union[str, torch.device]
+ The device to put the loaded model
+
+ jit : bool
+ Whether to load the optimized JIT model (default) or more hackable non-JIT model.
+
+ Returns
+ -------
+ model : torch.nn.Module
+ The CLIP model
+
+ preprocess : Callable[[PIL.Image], torch.Tensor]
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
+ """
+ if name in _MODELS:
+ model_path = _download(_MODELS[name])
+ elif os.path.isfile(name):
+ model_path = name
+ else:
+ raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
+
+ try:
+ # loading JIT archive
+ model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
+ state_dict = None
+ except RuntimeError:
+ # loading saved state dict
+ if jit:
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
+ jit = False
+ state_dict = torch.load(model_path, map_location="cpu")
+
+ if not jit:
+ model = build_model(state_dict or model.state_dict()).to(device)
+ if str(device) == "cpu":
+ model.float()
+ return model, _transform(model.visual.input_resolution)
+
+ # patch the device names
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
+
+ def patch_device(module):
+ graphs = [module.graph] if hasattr(module, "graph") else []
+ if hasattr(module, "forward1"):
+ graphs.append(module.forward1.graph)
+
+ for graph in graphs:
+ for node in graph.findAllNodes("prim::Constant"):
+ if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
+ node.copyAttributes(device_node)
+
+ model.apply(patch_device)
+ patch_device(model.encode_image)
+ patch_device(model.encode_text)
+
+ # patch dtype to float32 on CPU
+ if str(device) == "cpu":
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
+ float_node = float_input.node()
+
+ def patch_float(module):
+ graphs = [module.graph] if hasattr(module, "graph") else []
+ if hasattr(module, "forward1"):
+ graphs.append(module.forward1.graph)
+
+ for graph in graphs:
+ for node in graph.findAllNodes("aten::to"):
+ inputs = list(node.inputs())
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
+ if inputs[i].node()["value"] == 5:
+ inputs[i].node().copyAttributes(float_node)
+
+ model.apply(patch_float)
+ patch_float(model.encode_image)
+ patch_float(model.encode_text)
+
+ model.float()
+
+ return model, _transform(model.input_resolution.item())
+
+
+def tokenize(texts: Union[str, List[str]], context_length: int = 77, max_valid_length: int = 32) -> torch.LongTensor:
+ """
+ Returns the tokenized representation of given input string(s)
+
+ Parameters
+ ----------
+ texts : Union[str, List[str]]
+ An input string or a list of input strings to tokenize
+
+ context_length : int
+ The context length to use; all CLIP models use 77 as the context length
+
+ max_valid_length:
+
+ Returns
+ -------
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
+ """
+ if isinstance(texts, str):
+ texts = [texts]
+
+ sot_token = _tokenizer.encoder["<|startoftext|>"]
+ eot_token = _tokenizer.encoder["<|endoftext|>"]
+ all_tokens = [[sot_token] + _tokenizer.encode(text)[:max_valid_length-2] + [eot_token] for text in texts]
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
+
+ for i, tokens in enumerate(all_tokens):
+ if len(tokens) > context_length:
+ raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
+ result[i, :len(tokens)] = torch.tensor(tokens)
+
+ return result
diff --git a/run_on_video/clip/model.py b/run_on_video/clip/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..658fbbf5ed1379f9c0179fa456635a9ed6d4d4de
--- /dev/null
+++ b/run_on_video/clip/model.py
@@ -0,0 +1,432 @@
+from collections import OrderedDict
+from typing import Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1):
+ super().__init__()
+
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
+
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = None
+ self.stride = stride
+
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
+ self.downsample = nn.Sequential(OrderedDict([
+ ("-1", nn.AvgPool2d(stride)),
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
+ ("1", nn.BatchNorm2d(planes * self.expansion))
+ ]))
+
+ def forward(self, x: torch.Tensor):
+ identity = x
+
+ out = self.relu(self.bn1(self.conv1(x)))
+ out = self.relu(self.bn2(self.conv2(out)))
+ out = self.avgpool(out)
+ out = self.bn3(self.conv3(out))
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+ out = self.relu(out)
+ return out
+
+
+class AttentionPool2d(nn.Module):
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
+ super().__init__()
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
+ self.num_heads = num_heads
+
+ def forward(self, x):
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
+ x, _ = F.multi_head_attention_forward(
+ query=x, key=x, value=x,
+ embed_dim_to_check=x.shape[-1],
+ num_heads=self.num_heads,
+ q_proj_weight=self.q_proj.weight,
+ k_proj_weight=self.k_proj.weight,
+ v_proj_weight=self.v_proj.weight,
+ in_proj_weight=None,
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
+ bias_k=None,
+ bias_v=None,
+ add_zero_attn=False,
+ dropout_p=0,
+ out_proj_weight=self.c_proj.weight,
+ out_proj_bias=self.c_proj.bias,
+ use_separate_proj_weight=True,
+ training=self.training,
+ need_weights=False
+ )
+
+ return x[0]
+
+
+class ModifiedResNet(nn.Module):
+ """
+ A ResNet class that is similar to torchvision's but contains the following changes:
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
+ - The final pooling layer is a QKV attention instead of an average pool
+ """
+
+ def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
+ super().__init__()
+ self.output_dim = output_dim
+ self.input_resolution = input_resolution
+
+ # the 3-layer stem
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(width // 2)
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(width // 2)
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(width)
+ self.avgpool = nn.AvgPool2d(2)
+ self.relu = nn.ReLU(inplace=True)
+
+ # residual layers
+ self._inplanes = width # this is a *mutable* variable used during construction
+ self.layer1 = self._make_layer(width, layers[0])
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
+
+ embed_dim = width * 32 # the ResNet feature dimension
+ self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
+
+ def _make_layer(self, planes, blocks, stride=1):
+ layers = [Bottleneck(self._inplanes, planes, stride)]
+
+ self._inplanes = planes * Bottleneck.expansion
+ for _ in range(1, blocks):
+ layers.append(Bottleneck(self._inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def forward(self, x):
+ def stem(x):
+ for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
+ x = self.relu(bn(conv(x)))
+ x = self.avgpool(x)
+ return x
+
+ x = x.type(self.conv1.weight.dtype)
+ x = stem(x)
+ x = self.layer1(x)
+ x = self.layer2(x)
+ x = self.layer3(x)
+ x = self.layer4(x)
+ x = self.attnpool(x)
+
+ return x
+
+
+class LayerNorm(nn.LayerNorm):
+ """Subclass torch's LayerNorm to handle fp16."""
+
+ def forward(self, x: torch.Tensor):
+ orig_type = x.dtype
+ ret = super().forward(x.type(torch.float32))
+ return ret.type(orig_type)
+
+
+class QuickGELU(nn.Module):
+ def forward(self, x: torch.Tensor):
+ return x * torch.sigmoid(1.702 * x)
+
+
+class ResidualAttentionBlock(nn.Module):
+ def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
+ super().__init__()
+
+ self.attn = nn.MultiheadAttention(d_model, n_head)
+ self.ln_1 = LayerNorm(d_model)
+ self.mlp = nn.Sequential(OrderedDict([
+ ("c_fc", nn.Linear(d_model, d_model * 4)),
+ ("gelu", QuickGELU()),
+ ("c_proj", nn.Linear(d_model * 4, d_model))
+ ]))
+ self.ln_2 = LayerNorm(d_model)
+ self.attn_mask = attn_mask
+
+ def attention(self, x: torch.Tensor):
+ self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
+ return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
+
+ def forward(self, x: torch.Tensor):
+ x = x + self.attention(self.ln_1(x))
+ x = x + self.mlp(self.ln_2(x))
+ return x
+
+
+class Transformer(nn.Module):
+ def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
+ super().__init__()
+ self.width = width
+ self.layers = layers
+ self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
+
+ def forward(self, x: torch.Tensor):
+ return self.resblocks(x)
+
+
+class VisualTransformer(nn.Module):
+ def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
+ super().__init__()
+ self.input_resolution = input_resolution
+ self.output_dim = output_dim
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
+
+ scale = width ** -0.5
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
+ self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
+ self.ln_pre = LayerNorm(width)
+
+ self.transformer = Transformer(width, layers, heads)
+
+ self.ln_post = LayerNorm(width)
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
+
+ def forward(self, x: torch.Tensor):
+ x = self.conv1(x) # shape = [*, width, grid, grid]
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
+ x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
+ x = x + self.positional_embedding.to(x.dtype)
+ x = self.ln_pre(x)
+
+ x = x.permute(1, 0, 2) # NLD -> LND
+ x = self.transformer(x)
+ x = x.permute(1, 0, 2) # LND -> NLD
+
+ x = self.ln_post(x[:, 0, :])
+
+ if self.proj is not None:
+ x = x @ self.proj
+
+ return x
+
+
+class CLIP(nn.Module):
+ def __init__(self,
+ embed_dim: int,
+ # vision
+ image_resolution: int,
+ vision_layers: Union[Tuple[int, int, int, int], int],
+ vision_width: int,
+ vision_patch_size: int,
+ # text
+ context_length: int,
+ vocab_size: int,
+ transformer_width: int,
+ transformer_heads: int,
+ transformer_layers: int
+ ):
+ super().__init__()
+
+ self.context_length = context_length
+
+ if isinstance(vision_layers, (tuple, list)):
+ vision_heads = vision_width * 32 // 64
+ self.visual = ModifiedResNet(
+ layers=vision_layers,
+ output_dim=embed_dim,
+ heads=vision_heads,
+ input_resolution=image_resolution,
+ width=vision_width
+ )
+ else:
+ vision_heads = vision_width // 64
+ self.visual = VisualTransformer(
+ input_resolution=image_resolution,
+ patch_size=vision_patch_size,
+ width=vision_width,
+ layers=vision_layers,
+ heads=vision_heads,
+ output_dim=embed_dim
+ )
+
+ self.transformer = Transformer(
+ width=transformer_width,
+ layers=transformer_layers,
+ heads=transformer_heads,
+ attn_mask=self.build_attention_mask()
+ )
+
+ self.vocab_size = vocab_size
+ self.token_embedding = nn.Embedding(vocab_size, transformer_width)
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
+ self.ln_final = LayerNorm(transformer_width)
+
+ self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
+
+ self.initialize_parameters()
+
+ def initialize_parameters(self):
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
+ nn.init.normal_(self.positional_embedding, std=0.01)
+
+ if isinstance(self.visual, ModifiedResNet):
+ if self.visual.attnpool is not None:
+ std = self.visual.attnpool.c_proj.in_features ** -0.5
+ nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
+ nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
+ nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
+ nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
+
+ for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
+ for name, param in resnet_block.named_parameters():
+ if name.endswith("bn3.weight"):
+ nn.init.zeros_(param)
+
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
+ attn_std = self.transformer.width ** -0.5
+ fc_std = (2 * self.transformer.width) ** -0.5
+ for block in self.transformer.resblocks:
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
+
+ if self.text_projection is not None:
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
+
+ def build_attention_mask(self):
+ # lazily create causal attention mask, with full attention between the vision tokens
+ # pytorch uses additive attention mask; fill with -inf
+ mask = torch.empty(self.context_length, self.context_length)
+ mask.fill_(float("-inf"))
+ mask.triu_(1) # zero out the lower diagonal
+ return mask
+
+ @property
+ def dtype(self):
+ return self.visual.conv1.weight.dtype
+
+ def encode_image(self, image):
+ return self.visual(image.type(self.dtype))
+
+ def encode_text(self, text):
+ x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
+
+ x = x + self.positional_embedding.type(self.dtype)
+ x = x.permute(1, 0, 2) # NLD -> LND
+ x = self.transformer(x)
+ x = x.permute(1, 0, 2) # LND -> NLD
+ x = self.ln_final(x).type(self.dtype)
+
+ # x.shape = [batch_size, n_ctx, transformer.width]
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
+ eos_x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
+
+ return dict(last_hidden_state=x, pooler_output=eos_x)
+
+ def forward(self, image, text):
+ image_features = self.encode_image(image)
+ text_features = self.encode_text(text)
+
+ # normalized features
+ image_features = image_features / image_features.norm(dim=-1, keepdim=True)
+ text_features = text_features / text_features.norm(dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp()
+ logits_per_image = logit_scale * image_features @ text_features.t()
+ logits_per_text = logit_scale * text_features @ image_features.t()
+
+ # shape = [global_batch_size, global_batch_size]
+ return logits_per_image, logits_per_text
+
+
+def convert_weights(model: nn.Module):
+ """Convert applicable model parameters to fp16"""
+
+ def _convert_weights_to_fp16(l):
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
+ l.weight.data = l.weight.data.half()
+ if l.bias is not None:
+ l.bias.data = l.bias.data.half()
+
+ if isinstance(l, nn.MultiheadAttention):
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
+ tensor = getattr(l, attr)
+ if tensor is not None:
+ tensor.data = tensor.data.half()
+
+ for name in ["text_projection", "proj"]:
+ if hasattr(l, name):
+ attr = getattr(l, name)
+ if attr is not None:
+ attr.data = attr.data.half()
+
+ model.apply(_convert_weights_to_fp16)
+
+
+def build_model(state_dict: dict):
+ vit = "visual.proj" in state_dict
+
+ if vit:
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
+ vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
+ image_resolution = vision_patch_size * grid_size
+ else:
+ counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
+ vision_layers = tuple(counts)
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
+ vision_patch_size = None
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
+ image_resolution = output_width * 32
+
+ embed_dim = state_dict["text_projection"].shape[1]
+ context_length = state_dict["positional_embedding"].shape[0]
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
+ transformer_width = state_dict["ln_final.weight"].shape[0]
+ transformer_heads = transformer_width // 64
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
+
+ model = CLIP(
+ embed_dim,
+ image_resolution, vision_layers, vision_width, vision_patch_size,
+ context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
+ )
+
+ for key in ["input_resolution", "context_length", "vocab_size"]:
+ if key in state_dict:
+ del state_dict[key]
+
+ convert_weights(model)
+ model.load_state_dict(state_dict)
+ return model.eval()
diff --git a/run_on_video/clip/simple_tokenizer.py b/run_on_video/clip/simple_tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a66286b7d5019c6e221932a813768038f839c91
--- /dev/null
+++ b/run_on_video/clip/simple_tokenizer.py
@@ -0,0 +1,132 @@
+import gzip
+import html
+import os
+from functools import lru_cache
+
+import ftfy
+import regex as re
+
+
+@lru_cache()
+def default_bpe():
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
+ The reversible bpe codes work on unicode strings.
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
+ """
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8+n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """Return set of symbol pairs in a word.
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+def basic_clean(text):
+ text = ftfy.fix_text(text)
+ text = html.unescape(html.unescape(text))
+ return text.strip()
+
+
+def whitespace_clean(text):
+ text = re.sub(r'\s+', ' ', text)
+ text = text.strip()
+ return text
+
+
+class SimpleTokenizer(object):
+ def __init__(self, bpe_path: str = default_bpe()):
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
+ merges = merges[1:49152-256-2+1]
+ merges = [tuple(merge.split()) for merge in merges]
+ vocab = list(bytes_to_unicode().values())
+ vocab = vocab + [v+'' for v in vocab]
+ for merge in merges:
+ vocab.append(''.join(merge))
+ vocab.extend(['<|startoftext|>', '<|endoftext|>'])
+ self.encoder = dict(zip(vocab, range(len(vocab))))
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
+ self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token[:-1]) + ( token[-1] + '',)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token+''
+
+ while True:
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ new_word.extend(word[i:j])
+ i = j
+ except:
+ new_word.extend(word[i:])
+ break
+
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
+ new_word.append(first+second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = ' '.join(word)
+ self.cache[token] = word
+ return word
+
+ def encode(self, text):
+ bpe_tokens = []
+ text = whitespace_clean(basic_clean(text)).lower()
+ for token in re.findall(self.pat, text):
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
+ return bpe_tokens
+
+ def decode(self, tokens):
+ text = ''.join([self.decoder[token] for token in tokens])
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ')
+ return text
diff --git a/run_on_video/data_utils.py b/run_on_video/data_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1cd2959c2d7764cbc4629d3f0cbecf4344f6912
--- /dev/null
+++ b/run_on_video/data_utils.py
@@ -0,0 +1,183 @@
+import torch
+import os
+import numpy as np
+import ffmpeg
+import math
+import clip
+
+
+class ClipFeatureExtractor:
+ def __init__(self, framerate=1/2, size=224, centercrop=True, model_name_or_path="ViT-B/32", device="cuda"):
+ self.video_loader = VideoLoader(framerate=framerate, size=size, centercrop=centercrop)
+ print("Loading CLIP models")
+ self.clip_extractor, _ = clip.load(model_name_or_path, device=device, jit=False)
+ self.tokenizer = clip.tokenize
+ self.video_preprocessor = Preprocessing()
+ self.device = device
+
+ @torch.no_grad()
+ def encode_video(self, video_path: str, bsz=60):
+ video_frames, origin_video_frames = self.video_loader.read_video_from_file(video_path) # (T, H, W, 3)
+ video_frames = self.video_preprocessor(video_frames)
+ n_frames = len(video_frames)
+ n_batch = int(math.ceil(n_frames / bsz))
+ video_features = []
+ for i in range(n_batch):
+ st_idx = i * bsz
+ ed_idx = (i+1) * bsz
+ _video_frames = video_frames[st_idx:ed_idx].to(self.device)
+ _video_features = self.clip_extractor.encode_image(_video_frames)
+ video_features.append(_video_features)
+ video_features = torch.cat(video_features, dim=0)
+ return video_features, origin_video_frames # (T=#frames, d) torch tensor
+
+ @torch.no_grad()
+ def encode_text(self, text_list, bsz=60):
+ n_text = len(text_list)
+ n_batch = int(math.ceil(n_text / bsz))
+ text_features = []
+ for i in range(n_batch):
+ st_idx = i * bsz
+ ed_idx = (i+1) * bsz
+ encoded_texts = self.tokenizer(text_list[st_idx:ed_idx], context_length=77).to(self.device)
+ output = self.clip_extractor.encode_text(encoded_texts)
+ valid_lengths = (encoded_texts != 0).sum(1).tolist()
+ batch_last_hidden_states = output["last_hidden_state"]
+ for j, valid_len in enumerate(valid_lengths):
+ text_features.append(batch_last_hidden_states[j, :valid_len])
+ return text_features # List([L_j, d]) torch tensor
+
+
+def convert_to_float(frac_str):
+ try:
+ return float(frac_str)
+ except ValueError:
+ try:
+ num, denom = frac_str.split('/')
+ except ValueError:
+ return None
+ try:
+ leading, num = num.split(' ')
+ except ValueError:
+ return float(num) / float(denom)
+ if float(leading) < 0:
+ sign_mult = -1
+ else:
+ sign_mult = 1
+ return float(leading) + sign_mult * (float(num) / float(denom))
+
+
+class Normalize(object):
+
+ def __init__(self, mean, std):
+ self.mean = torch.FloatTensor(mean).view(1, 3, 1, 1)
+ self.std = torch.FloatTensor(std).view(1, 3, 1, 1)
+
+ def __call__(self, tensor):
+ tensor = (tensor - self.mean) / (self.std + 1e-8)
+ return tensor
+
+
+class Preprocessing(object):
+
+ def __init__(self):
+ self.norm = Normalize(
+ mean=[0.48145466, 0.4578275, 0.40821073],
+ std=[0.26862954, 0.26130258, 0.27577711])
+
+ def __call__(self, tensor):
+ tensor = tensor / 255.0
+ tensor = self.norm(tensor)
+ return tensor
+
+
+class VideoLoader:
+ """Pytorch video loader.
+ Copied and modified from:
+ https://github.com/linjieli222/HERO_Video_Feature_Extractor/blob/main/clip/video_loader.py
+ """
+ def __init__(
+ self,
+ framerate=1/2,
+ size=224,
+ centercrop=True,
+ ):
+ self.centercrop = centercrop
+ self.size = size
+ self.framerate = framerate
+
+ def _get_video_info(self, video_path):
+ probe = ffmpeg.probe(video_path)
+ video_stream = next((stream for stream in probe['streams']
+ if stream['codec_type'] == 'video'), None)
+ width = int(video_stream['width'])
+ height = int(video_stream['height'])
+ if 'rotate' in video_stream['tags']:
+ rotate = int(video_stream['tags']['rotate'])
+ if rotate == 90 or rotate == 270:
+ width, height = int(video_stream['height']), int(video_stream['width'])
+ fps = math.floor(convert_to_float(video_stream['avg_frame_rate']))
+ try:
+ frames_length = int(video_stream['nb_frames'])
+ duration = float(video_stream['duration'])
+ except Exception:
+ frames_length, duration = -1, -1
+ info = {"duration": duration, "frames_length": frames_length,
+ "fps": fps, "height": height, "width": width}
+ return info
+
+ def _get_output_dim(self, h, w):
+ if isinstance(self.size, tuple) and len(self.size) == 2:
+ return self.size
+ elif h >= w:
+ return int(h * self.size / w), self.size
+ else:
+ return self.size, int(w * self.size / h)
+
+ def read_video_from_file(self, video_path):
+ try:
+ info = self._get_video_info(video_path)
+ h, w = info["height"], info["width"]
+ except Exception:
+ print('ffprobe failed at: {}'.format(video_path))
+ return {'video': torch.zeros(1), 'input': video_path,
+ 'info': {}}
+ height, width = self._get_output_dim(h, w)
+ try:
+ duration = info["duration"]
+ fps = self.framerate
+ if duration > 0 and duration < 1/fps+0.1:
+ fps = 2/max(int(duration), 1)
+ print(duration, fps)
+ except Exception:
+ fps = self.framerate
+ cmd = (
+ ffmpeg
+ .input(video_path)
+ .filter('fps', fps=fps)
+ .filter('scale', width, height)
+ )
+ original_size_out, _ = (
+ cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
+ .run(capture_stdout=True, quiet=True)
+ )
+ original_size_video = np.frombuffer(original_size_out, np.uint8).reshape(
+ [-1, height, width, 3])
+ original_size_video = torch.from_numpy(original_size_video.astype('float32'))
+ original_size_video = original_size_video.permute(0, 3, 1, 2)
+ if self.centercrop:
+ x = int((width - self.size) / 2.0)
+ y = int((height - self.size) / 2.0)
+ cmd = cmd.crop(x, y, self.size, self.size)
+ out, _ = (
+ cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
+ .run(capture_stdout=True, quiet=True)
+ )
+ if self.centercrop and isinstance(self.size, int):
+ height, width = self.size, self.size
+ video = np.frombuffer(out, np.uint8).reshape(
+ [-1, height, width, 3])
+ video = torch.from_numpy(video.astype('float32'))
+ video = video.permute(0, 3, 1, 2)
+
+ return video, original_size_video
diff --git a/run_on_video/dataset.py b/run_on_video/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b46fa69a7b421b709e2a3dd97f500627711dea4
--- /dev/null
+++ b/run_on_video/dataset.py
@@ -0,0 +1,72 @@
+from torch.utils.data import Dataset
+import csv
+import os
+import numpy as np
+
+
+class HighlightDataset(Dataset):
+ """Face Landmarks dataset."""
+
+ def __init__(self, root_dir, transform=None):
+ """
+ Arguments:
+ csv_file (string): Path to the csv file with annotations.
+ root_dir (string): Directory with all datas including videos and annotations.
+ """
+ self.root_dir = root_dir
+ self.video_dir = os.path.join(root_dir, "videos")
+ self.anno_path = os.path.join(root_dir, "ydata-tvsum50-anno.tsv")
+
+ #read annotations
+ with open(self.anno_path, newline='') as f:
+ reader = csv.reader(f, delimiter='\t')
+ raw_annotations = list(reader)
+
+ self.num_annotator = 20
+ self.annotations = self.parse_annotations(raw_annotations) # {video_id: [importance scores]}
+
+ #get list of videos
+ self.video_list = os.listdir(self.video_dir)
+
+ def parse_annotations(self, annotations):
+ '''
+ format of annotations:
+ [[video_id, video_category, importance score], ...]
+ '''
+ #separate annotations into chunks of length 20
+ parsed_annotations = {}
+ annotations_per_video = [annotations[i:i + self.num_annotator] for i in range(0, len(annotations), self.num_annotator)]
+ for anno_video in annotations_per_video:
+ video_id = anno_video[0][0]
+ video_category = anno_video[0][1]
+ #get importance score
+ #anno[2] is a string of scores separated by commas
+ importance_score = []
+ for anno in anno_video:
+ anno[2] = anno[2].split(',')
+ anno[2] = [float(score) for score in anno[2]]
+ importance_score.append(anno[2])
+ importance_score = np.array(importance_score)
+
+ #get average importance score
+ parsed_annotations[video_id] = np.mean(importance_score, axis=0)
+
+ return parsed_annotations
+
+
+ def __len__(self):
+ return len(self.video_list)
+
+ def __getitem__(self, idx):
+ if torch.is_tensor(idx):
+ idx = idx.tolist()
+
+ #should return frames and scores
+ video_name = self.video_list[idx]
+ video_id = video_name.split('.')[0]
+ video_path = os.path.join(self.video_dir, video_name)
+
+ #get annotations
+ annotations = self.annotations[video_id]
+
+ return video_path, annotations
\ No newline at end of file
diff --git a/run_on_video/eval.py b/run_on_video/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..37b64912cad49d76dece7f39e2206f43f3280ee2
--- /dev/null
+++ b/run_on_video/eval.py
@@ -0,0 +1,10 @@
+from dataset import HighlightDataset
+
+
+
+
+
+
+if __name__ == "__main__":
+
+ dataset = HighlightDataset(root_dir="../data/tvsum")
diff --git a/run_on_video/example/RoripwjYFp8_60.0_210.0.mp4 b/run_on_video/example/RoripwjYFp8_60.0_210.0.mp4
new file mode 100644
index 0000000000000000000000000000000000000000..6d8eab9a7a5a7456c249bebc59d372cb153b6418
--- /dev/null
+++ b/run_on_video/example/RoripwjYFp8_60.0_210.0.mp4
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e847dbe79219afc6d70225117684a7e03563e1580b6f455ece13f89747cb5a50
+size 10404801
diff --git a/run_on_video/example/queries.jsonl b/run_on_video/example/queries.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..6e7b3fe4da46f2558227300650054a4f8b5cd054
--- /dev/null
+++ b/run_on_video/example/queries.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f093f3bba5afb018b9c3a1319eda89a3dca042b53acb02d529db728d2157805
+size 307
diff --git a/run_on_video/model_utils.py b/run_on_video/model_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..24c080b2dcbbecf57fc88d25e3176cc1927330a6
--- /dev/null
+++ b/run_on_video/model_utils.py
@@ -0,0 +1,32 @@
+import torch
+from moment_detr.model import build_transformer, build_position_encoding, MomentDETR
+
+
+def build_inference_model(ckpt_path, **kwargs):
+ ckpt = torch.load(ckpt_path, map_location="cpu")
+ args = ckpt["opt"]
+ if len(kwargs) > 0: # used to overwrite default args
+ args.update(kwargs)
+ transformer = build_transformer(args)
+ position_embedding, txt_position_embedding = build_position_encoding(args)
+
+ model = MomentDETR(
+ transformer,
+ position_embedding,
+ txt_position_embedding,
+ txt_dim=args.t_feat_dim,
+ vid_dim=args.v_feat_dim,
+ num_queries=args.num_queries,
+ input_dropout=args.input_dropout,
+ aux_loss=args.aux_loss,
+ contrastive_align_loss=args.contrastive_align_loss,
+ contrastive_hdim=args.contrastive_hdim,
+ span_loss_type=args.span_loss_type,
+ use_txt_pos=args.use_txt_pos,
+ n_input_proj=args.n_input_proj,
+ )
+
+ model.load_state_dict(ckpt["model"])
+ return model
+
+
diff --git a/run_on_video/moment_detr_ckpt/README.md b/run_on_video/moment_detr_ckpt/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e1814623c02fe04e4d4db487cb80aa551b1b2430
--- /dev/null
+++ b/run_on_video/moment_detr_ckpt/README.md
@@ -0,0 +1,9 @@
+To simplify feature extraction pipeline,
+this model checkpoint [model_best.ckpt](model_best.ckpt) is trained with only CLIP image and text features as input.
+It is trained from scratch, without ASR pre-training.
+It may perform worse than the model reported in the paper.
+
+In addition to the model checkpoint, this directory also
+contains multiple files from its training process,
+including the training/evaluation log files,
+training configurations and prediction files on QVHighlights val split.
diff --git a/run_on_video/moment_detr_ckpt/eval.log.txt b/run_on_video/moment_detr_ckpt/eval.log.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b7bf817a9e884c93d0ee6ff669de00ceec7b4f0e
--- /dev/null
+++ b/run_on_video/moment_detr_ckpt/eval.log.txt
@@ -0,0 +1,40 @@
+2021_08_04_12_48_21 [Epoch] 004 [Loss] loss_span 1.9722 loss_giou 1.2180 loss_label 0.6783 class_error -0.0000 loss_saliency 0.3492 loss_span_0 1.6771 loss_giou_0 1.2267 loss_label_0 0.6398 class_error_0 -0.0000 loss_overall 7.7613 [Metrics] {"brief": {"MR-full-R1@0.5": 7.55, "MR-full-R1@0.7": 2.58, "MR-full-mAP": 2.7, "MR-full-mAP@0.5": 6.47, "MR-full-mAP@0.75": 1.99, "MR-long-mAP": 1.24, "MR-middle-mAP": 4.41, "MR-short-mAP": 0.63, "HL-min-Fair-mAP": 42.14, "HL-min-Fair-Hit1": 35.29, "HL-min-Good-mAP": 35.75, "HL-min-Good-Hit1": 34.19, "HL-min-VeryGood-mAP": 21.99, "HL-min-VeryGood-Hit1": 28.71}, "HL-min-Fair": {"HL-mAP": 42.14, "HL-Hit1": 35.29}, "HL-min-Good": {"HL-mAP": 35.75, "HL-Hit1": 34.19}, "HL-min-VeryGood": {"HL-mAP": 21.99, "HL-Hit1": 28.71}, "full": {"MR-mAP": {"0.5": 6.47, "0.55": 5.22, "0.6": 4.38, "0.65": 3.34, "0.7": 2.69, "0.75": 1.99, "0.8": 1.31, "0.85": 0.97, "0.9": 0.4, "0.95": 0.25, "average": 2.7}, "MR-R1": {"0.5": 7.55, "0.55": 6.06, "0.6": 4.84, "0.65": 3.48, "0.7": 2.58, "0.75": 1.87, "0.8": 1.16, "0.85": 0.84, "0.9": 0.32, "0.95": 0.26}}, "long": {"MR-mAP": {"0.5": 4.26, "0.55": 3.1, "0.6": 1.92, "0.65": 1.18, "0.7": 0.7, "0.75": 0.49, "0.8": 0.44, "0.85": 0.19, "0.9": 0.09, "0.95": 0.02, "average": 1.24}, "MR-R1": {"0.5": 3.48, "0.55": 2.61, "0.6": 1.39, "0.65": 0.7, "0.7": 0.17, "0.75": 0.17, "0.8": 0.17, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}, "middle": {"MR-mAP": {"0.5": 9.87, "0.55": 8.16, "0.6": 7.28, "0.65": 5.71, "0.7": 4.71, "0.75": 3.43, "0.8": 2.11, "0.85": 1.69, "0.9": 0.67, "0.95": 0.44, "average": 4.41}, "MR-R1": {"0.5": 9.09, "0.55": 7.42, "0.6": 6.58, "0.65": 4.81, "0.7": 3.76, "0.75": 2.72, "0.8": 1.57, "0.85": 1.25, "0.9": 0.42, "0.95": 0.31}}, "short": {"MR-mAP": {"0.5": 1.31, "0.55": 1.08, "0.6": 0.81, "0.65": 0.7, "0.7": 0.61, "0.75": 0.54, "0.8": 0.54, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23, "average": 0.63}, "MR-R1": {"0.5": 2.33, "0.55": 1.86, "0.6": 0.93, "0.65": 0.93, "0.7": 0.7, "0.75": 0.47, "0.8": 0.47, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_12_53_39 [Epoch] 009 [Loss] loss_span 0.6697 loss_giou 0.6066 loss_label 0.5144 class_error 7.8618 loss_saliency 0.3228 loss_span_0 0.6657 loss_giou_0 0.6339 loss_label_0 0.5316 class_error_0 12.6456 loss_overall 3.9447 [Metrics] {"brief": {"MR-full-R1@0.5": 13.94, "MR-full-R1@0.7": 4.26, "MR-full-mAP": 10.44, "MR-full-mAP@0.5": 24.95, "MR-full-mAP@0.75": 7.38, "MR-long-mAP": 15.34, "MR-middle-mAP": 9.86, "MR-short-mAP": 0.77, "HL-min-Fair-mAP": 43.41, "HL-min-Fair-Hit1": 35.55, "HL-min-Good-mAP": 36.85, "HL-min-Good-Hit1": 34.52, "HL-min-VeryGood-mAP": 22.74, "HL-min-VeryGood-Hit1": 28.84}, "HL-min-Fair": {"HL-mAP": 43.41, "HL-Hit1": 35.55}, "HL-min-Good": {"HL-mAP": 36.85, "HL-Hit1": 34.52}, "HL-min-VeryGood": {"HL-mAP": 22.74, "HL-Hit1": 28.84}, "full": {"MR-mAP": {"0.5": 24.95, "0.55": 20.02, "0.6": 16.46, "0.65": 12.17, "0.7": 9.75, "0.75": 7.38, "0.8": 5.45, "0.85": 3.65, "0.9": 2.68, "0.95": 1.93, "average": 10.44}, "MR-R1": {"0.5": 13.94, "0.55": 10.71, "0.6": 8.52, "0.65": 5.74, "0.7": 4.26, "0.75": 3.16, "0.8": 2.39, "0.85": 1.81, "0.9": 1.29, "0.95": 0.97}}, "long": {"MR-mAP": {"0.5": 35.7, "0.55": 29.14, "0.6": 23.71, "0.65": 17.74, "0.7": 13.43, "0.75": 10.15, "0.8": 8.23, "0.85": 6.45, "0.9": 4.84, "0.95": 4.01, "average": 15.34}, "MR-R1": {"0.5": 19.69, "0.55": 15.33, "0.6": 12.37, "0.65": 8.54, "0.7": 5.92, "0.75": 4.36, "0.8": 3.83, "0.85": 3.14, "0.9": 2.44, "0.95": 2.09}}, "middle": {"MR-mAP": {"0.5": 24.23, "0.55": 19.26, "0.6": 16.01, "0.65": 11.69, "0.7": 9.78, "0.75": 7.38, "0.8": 4.95, "0.85": 2.71, "0.9": 1.72, "0.95": 0.86, "average": 9.86}, "MR-R1": {"0.5": 10.14, "0.55": 7.63, "0.6": 6.17, "0.65": 4.08, "0.7": 3.24, "0.75": 2.51, "0.8": 1.57, "0.85": 1.04, "0.9": 0.63, "0.95": 0.31}}, "short": {"MR-mAP": {"0.5": 2.91, "0.55": 1.68, "0.6": 1.12, "0.65": 0.67, "0.7": 0.43, "0.75": 0.25, "0.8": 0.19, "0.85": 0.16, "0.9": 0.16, "0.95": 0.16, "average": 0.77}, "MR-R1": {"0.5": 1.4, "0.55": 1.17, "0.6": 0.47, "0.65": 0.23, "0.7": 0.23, "0.75": 0.0, "0.8": 0.0, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_12_59_02 [Epoch] 014 [Loss] loss_span 0.6164 loss_giou 0.5899 loss_label 0.5207 class_error 5.3474 loss_saliency 0.3045 loss_span_0 0.6267 loss_giou_0 0.5896 loss_label_0 0.5234 class_error_0 5.5158 loss_overall 3.7711 [Metrics] {"brief": {"MR-full-R1@0.5": 15.48, "MR-full-R1@0.7": 7.03, "MR-full-mAP": 11.47, "MR-full-mAP@0.5": 25.56, "MR-full-mAP@0.75": 8.37, "MR-long-mAP": 19.17, "MR-middle-mAP": 8.97, "MR-short-mAP": 1.14, "HL-min-Fair-mAP": 46.74, "HL-min-Fair-Hit1": 39.74, "HL-min-Good-mAP": 39.87, "HL-min-Good-Hit1": 38.45, "HL-min-VeryGood-mAP": 24.59, "HL-min-VeryGood-Hit1": 32.39}, "HL-min-Fair": {"HL-mAP": 46.74, "HL-Hit1": 39.74}, "HL-min-Good": {"HL-mAP": 39.87, "HL-Hit1": 38.45}, "HL-min-VeryGood": {"HL-mAP": 24.59, "HL-Hit1": 32.39}, "full": {"MR-mAP": {"0.5": 25.56, "0.55": 20.5, "0.6": 17.27, "0.65": 13.65, "0.7": 11.01, "0.75": 8.37, "0.8": 6.61, "0.85": 4.97, "0.9": 3.79, "0.95": 3.02, "average": 11.47}, "MR-R1": {"0.5": 15.48, "0.55": 12.71, "0.6": 10.65, "0.65": 8.39, "0.7": 7.03, "0.75": 5.55, "0.8": 4.45, "0.85": 3.55, "0.9": 2.84, "0.95": 2.39}}, "long": {"MR-mAP": {"0.5": 38.88, "0.55": 33.28, "0.6": 27.52, "0.65": 21.65, "0.7": 18.33, "0.75": 14.91, "0.8": 11.92, "0.85": 9.9, "0.9": 8.07, "0.95": 7.2, "average": 19.17}, "MR-R1": {"0.5": 24.91, "0.55": 21.78, "0.6": 18.29, "0.65": 15.16, "0.7": 13.59, "0.75": 11.32, "0.8": 9.23, "0.85": 7.84, "0.9": 6.62, "0.95": 6.1}}, "middle": {"MR-mAP": {"0.5": 22.46, "0.55": 17.08, "0.6": 14.76, "0.65": 11.55, "0.7": 8.84, "0.75": 5.97, "0.8": 4.34, "0.85": 2.55, "0.9": 1.6, "0.95": 0.59, "average": 8.97}, "MR-R1": {"0.5": 8.78, "0.55": 6.48, "0.6": 5.64, "0.65": 4.18, "0.7": 3.03, "0.75": 2.09, "0.8": 1.57, "0.85": 0.94, "0.9": 0.52, "0.95": 0.1}}, "short": {"MR-mAP": {"0.5": 3.8, "0.55": 2.34, "0.6": 1.66, "0.65": 1.18, "0.7": 0.83, "0.75": 0.59, "0.8": 0.42, "0.85": 0.18, "0.9": 0.18, "0.95": 0.18, "average": 1.14}, "MR-R1": {"0.5": 3.03, "0.55": 2.33, "0.6": 1.4, "0.65": 0.7, "0.7": 0.47, "0.75": 0.23, "0.8": 0.23, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_13_04_23 [Epoch] 019 [Loss] loss_span 0.6010 loss_giou 0.5839 loss_label 0.5096 class_error 5.2251 loss_saliency 0.2961 loss_span_0 0.6149 loss_giou_0 0.6061 loss_label_0 0.5012 class_error_0 6.0110 loss_overall 3.7127 [Metrics] {"brief": {"MR-full-R1@0.5": 19.42, "MR-full-R1@0.7": 9.42, "MR-full-mAP": 13.46, "MR-full-mAP@0.5": 29.34, "MR-full-mAP@0.75": 10.61, "MR-long-mAP": 21.95, "MR-middle-mAP": 10.91, "MR-short-mAP": 1.16, "HL-min-Fair-mAP": 48.98, "HL-min-Fair-Hit1": 40.58, "HL-min-Good-mAP": 41.64, "HL-min-Good-Hit1": 39.42, "HL-min-VeryGood-mAP": 25.49, "HL-min-VeryGood-Hit1": 32.52}, "HL-min-Fair": {"HL-mAP": 48.98, "HL-Hit1": 40.58}, "HL-min-Good": {"HL-mAP": 41.64, "HL-Hit1": 39.42}, "HL-min-VeryGood": {"HL-mAP": 25.49, "HL-Hit1": 32.52}, "full": {"MR-mAP": {"0.5": 29.34, "0.55": 23.78, "0.6": 20.06, "0.65": 16.02, "0.7": 13.38, "0.75": 10.61, "0.8": 7.91, "0.85": 5.78, "0.9": 4.43, "0.95": 3.27, "average": 13.46}, "MR-R1": {"0.5": 19.42, "0.55": 15.81, "0.6": 13.55, "0.65": 11.1, "0.7": 9.42, "0.75": 7.55, "0.8": 5.74, "0.85": 4.52, "0.9": 3.61, "0.95": 2.84}}, "long": {"MR-mAP": {"0.5": 43.84, "0.55": 36.86, "0.6": 30.39, "0.65": 24.64, "0.7": 21.83, "0.75": 17.61, "0.8": 14.09, "0.85": 12.45, "0.9": 9.62, "0.95": 8.15, "average": 21.95}, "MR-R1": {"0.5": 30.84, "0.55": 26.48, "0.6": 22.65, "0.65": 19.34, "0.7": 17.77, "0.75": 14.63, "0.8": 12.02, "0.85": 10.98, "0.9": 8.89, "0.95": 7.67}}, "middle": {"MR-mAP": {"0.5": 26.2, "0.55": 20.52, "0.6": 18.11, "0.65": 14.27, "0.7": 11.02, "0.75": 8.45, "0.8": 5.58, "0.85": 2.63, "0.9": 1.87, "0.95": 0.45, "average": 10.91}, "MR-R1": {"0.5": 11.39, "0.55": 8.67, "0.6": 7.73, "0.65": 6.06, "0.7": 4.39, "0.75": 3.34, "0.8": 1.99, "0.85": 0.73, "0.9": 0.52, "0.95": 0.0}}, "short": {"MR-mAP": {"0.5": 4.1, "0.55": 2.49, "0.6": 1.64, "0.65": 1.01, "0.7": 0.67, "0.75": 0.62, "0.8": 0.5, "0.85": 0.18, "0.9": 0.18, "0.95": 0.18, "average": 1.16}, "MR-R1": {"0.5": 3.5, "0.55": 2.33, "0.6": 1.4, "0.65": 0.7, "0.7": 0.47, "0.75": 0.23, "0.8": 0.23, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_09_44 [Epoch] 024 [Loss] loss_span 0.5764 loss_giou 0.5700 loss_label 0.5057 class_error 10.9184 loss_saliency 0.2787 loss_span_0 0.6011 loss_giou_0 0.5872 loss_label_0 0.4939 class_error_0 9.9908 loss_overall 3.6131 [Metrics] {"brief": {"MR-full-R1@0.5": 28.39, "MR-full-R1@0.7": 12.52, "MR-full-mAP": 16.73, "MR-full-mAP@0.5": 36.28, "MR-full-mAP@0.75": 13.75, "MR-long-mAP": 24.64, "MR-middle-mAP": 15.5, "MR-short-mAP": 1.21, "HL-min-Fair-mAP": 52.92, "HL-min-Fair-Hit1": 45.48, "HL-min-Good-mAP": 44.97, "HL-min-Good-Hit1": 44.39, "HL-min-VeryGood-mAP": 27.55, "HL-min-VeryGood-Hit1": 37.81}, "HL-min-Fair": {"HL-mAP": 52.92, "HL-Hit1": 45.48}, "HL-min-Good": {"HL-mAP": 44.97, "HL-Hit1": 44.39}, "HL-min-VeryGood": {"HL-mAP": 27.55, "HL-Hit1": 37.81}, "full": {"MR-mAP": {"0.5": 36.28, "0.55": 29.23, "0.6": 25.13, "0.65": 20.19, "0.7": 16.34, "0.75": 13.75, "0.8": 10.27, "0.85": 7.28, "0.9": 5.41, "0.95": 3.39, "average": 16.73}, "MR-R1": {"0.5": 28.39, "0.55": 22.58, "0.6": 19.23, "0.65": 15.42, "0.7": 12.52, "0.75": 10.77, "0.8": 7.94, "0.85": 5.94, "0.9": 4.39, "0.95": 2.97}}, "long": {"MR-mAP": {"0.5": 50.63, "0.55": 42.09, "0.6": 36.69, "0.65": 29.97, "0.7": 23.21, "0.75": 19.48, "0.8": 14.99, "0.85": 11.99, "0.9": 9.51, "0.95": 7.86, "average": 24.64}, "MR-R1": {"0.5": 38.33, "0.55": 31.18, "0.6": 26.83, "0.65": 21.95, "0.7": 17.77, "0.75": 15.51, "0.8": 12.02, "0.85": 9.76, "0.9": 8.36, "0.95": 7.32}}, "middle": {"MR-mAP": {"0.5": 34.92, "0.55": 27.96, "0.6": 24.17, "0.65": 19.19, "0.7": 16.04, "0.75": 13.23, "0.8": 9.28, "0.85": 5.77, "0.9": 3.56, "0.95": 0.89, "average": 15.5}, "MR-R1": {"0.5": 21.0, "0.55": 16.93, "0.6": 14.73, "0.65": 11.6, "0.7": 9.61, "0.75": 8.15, "0.8": 5.64, "0.85": 3.76, "0.9": 2.09, "0.95": 0.42}}, "short": {"MR-mAP": {"0.5": 4.84, "0.55": 2.64, "0.6": 1.44, "0.65": 1.07, "0.7": 0.61, "0.75": 0.46, "0.8": 0.35, "0.85": 0.24, "0.9": 0.24, "0.95": 0.24, "average": 1.21}, "MR-R1": {"0.5": 4.43, "0.55": 2.1, "0.6": 0.7, "0.65": 0.47, "0.7": 0.0, "0.75": 0.0, "0.8": 0.0, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_15_06 [Epoch] 029 [Loss] loss_span 0.5769 loss_giou 0.5428 loss_label 0.4959 class_error 9.5076 loss_saliency 0.2722 loss_span_0 0.5945 loss_giou_0 0.5571 loss_label_0 0.4819 class_error_0 9.3947 loss_overall 3.5213 [Metrics] {"brief": {"MR-full-R1@0.5": 34.71, "MR-full-R1@0.7": 17.35, "MR-full-mAP": 21.46, "MR-full-mAP@0.5": 43.15, "MR-full-mAP@0.75": 18.03, "MR-long-mAP": 32.91, "MR-middle-mAP": 18.88, "MR-short-mAP": 1.31, "HL-min-Fair-mAP": 57.62, "HL-min-Fair-Hit1": 53.03, "HL-min-Good-mAP": 48.91, "HL-min-Good-Hit1": 51.42, "HL-min-VeryGood-mAP": 29.9, "HL-min-VeryGood-Hit1": 42.9}, "HL-min-Fair": {"HL-mAP": 57.62, "HL-Hit1": 53.03}, "HL-min-Good": {"HL-mAP": 48.91, "HL-Hit1": 51.42}, "HL-min-VeryGood": {"HL-mAP": 29.9, "HL-Hit1": 42.9}, "full": {"MR-mAP": {"0.5": 43.15, "0.55": 37.13, "0.6": 32.22, "0.65": 26.36, "0.7": 22.56, "0.75": 18.03, "0.8": 13.93, "0.85": 10.34, "0.9": 6.92, "0.95": 3.9, "average": 21.46}, "MR-R1": {"0.5": 34.71, "0.55": 29.94, "0.6": 25.35, "0.65": 20.0, "0.7": 17.35, "0.75": 14.06, "0.8": 10.71, "0.85": 8.0, "0.9": 5.29, "0.95": 3.35}}, "long": {"MR-mAP": {"0.5": 59.94, "0.55": 55.13, "0.6": 47.52, "0.65": 41.28, "0.7": 34.96, "0.75": 28.71, "0.8": 22.22, "0.85": 18.48, "0.9": 12.54, "0.95": 8.34, "average": 32.91}, "MR-R1": {"0.5": 46.86, "0.55": 42.33, "0.6": 35.89, "0.65": 30.66, "0.7": 26.48, "0.75": 21.95, "0.8": 16.72, "0.85": 14.29, "0.9": 9.93, "0.95": 7.67}}, "middle": {"MR-mAP": {"0.5": 41.59, "0.55": 34.3, "0.6": 30.21, "0.65": 22.93, "0.7": 19.53, "0.75": 15.13, "0.8": 11.64, "0.85": 7.28, "0.9": 4.58, "0.95": 1.62, "average": 18.88}, "MR-R1": {"0.5": 26.12, "0.55": 21.73, "0.6": 19.02, "0.65": 13.79, "0.7": 12.02, "0.75": 9.51, "0.8": 7.21, "0.85": 4.39, "0.9": 2.61, "0.95": 0.84}}, "short": {"MR-mAP": {"0.5": 5.12, "0.55": 2.72, "0.6": 1.8, "0.65": 0.99, "0.7": 0.75, "0.75": 0.49, "0.8": 0.43, "0.85": 0.25, "0.9": 0.25, "0.95": 0.25, "average": 1.31}, "MR-R1": {"0.5": 4.43, "0.55": 3.03, "0.6": 1.17, "0.65": 0.47, "0.7": 0.47, "0.75": 0.23, "0.8": 0.23, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_20_24 [Epoch] 034 [Loss] loss_span 0.5500 loss_giou 0.5458 loss_label 0.5259 class_error 15.6010 loss_saliency 0.2497 loss_span_0 0.5689 loss_giou_0 0.5526 loss_label_0 0.4940 class_error_0 15.0615 loss_overall 3.4868 [Metrics] {"brief": {"MR-full-R1@0.5": 38.71, "MR-full-R1@0.7": 20.19, "MR-full-mAP": 23.14, "MR-full-mAP@0.5": 46.38, "MR-full-mAP@0.75": 20.3, "MR-long-mAP": 33.91, "MR-middle-mAP": 21.61, "MR-short-mAP": 1.17, "HL-min-Fair-mAP": 60.76, "HL-min-Fair-Hit1": 56.06, "HL-min-Good-mAP": 51.64, "HL-min-Good-Hit1": 54.65, "HL-min-VeryGood-mAP": 31.67, "HL-min-VeryGood-Hit1": 45.68}, "HL-min-Fair": {"HL-mAP": 60.76, "HL-Hit1": 56.06}, "HL-min-Good": {"HL-mAP": 51.64, "HL-Hit1": 54.65}, "HL-min-VeryGood": {"HL-mAP": 31.67, "HL-Hit1": 45.68}, "full": {"MR-mAP": {"0.5": 46.38, "0.55": 39.64, "0.6": 34.86, "0.65": 29.5, "0.7": 24.99, "0.75": 20.3, "0.8": 15.51, "0.85": 10.11, "0.9": 6.72, "0.95": 3.36, "average": 23.14}, "MR-R1": {"0.5": 38.71, "0.55": 32.84, "0.6": 28.77, "0.65": 24.32, "0.7": 20.19, "0.75": 16.13, "0.8": 12.32, "0.85": 8.06, "0.9": 5.35, "0.95": 2.77}}, "long": {"MR-mAP": {"0.5": 61.73, "0.55": 55.1, "0.6": 49.35, "0.65": 42.75, "0.7": 36.23, "0.75": 30.36, "0.8": 24.92, "0.85": 18.36, "0.9": 12.45, "0.95": 7.83, "average": 33.91}, "MR-R1": {"0.5": 50.17, "0.55": 43.55, "0.6": 38.68, "0.65": 33.1, "0.7": 27.87, "0.75": 23.0, "0.8": 19.34, "0.85": 14.29, "0.9": 9.76, "0.95": 6.45}}, "middle": {"MR-mAP": {"0.5": 47.01, "0.55": 39.41, "0.6": 34.4, "0.65": 28.21, "0.7": 23.7, "0.75": 18.4, "0.8": 13.1, "0.85": 7.02, "0.9": 4.06, "0.95": 0.83, "average": 21.61}, "MR-R1": {"0.5": 31.03, "0.55": 26.12, "0.6": 22.88, "0.65": 19.33, "0.7": 15.88, "0.75": 12.23, "0.8": 8.25, "0.85": 4.49, "0.9": 2.82, "0.95": 0.63}}, "short": {"MR-mAP": {"0.5": 4.7, "0.55": 2.32, "0.6": 1.54, "0.65": 0.84, "0.7": 0.67, "0.75": 0.57, "0.8": 0.46, "0.85": 0.2, "0.9": 0.2, "0.95": 0.2, "average": 1.17}, "MR-R1": {"0.5": 3.5, "0.55": 2.1, "0.6": 1.17, "0.65": 0.47, "0.7": 0.23, "0.75": 0.23, "0.8": 0.23, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_25_46 [Epoch] 039 [Loss] loss_span 0.5464 loss_giou 0.5469 loss_label 0.4978 class_error 12.7535 loss_saliency 0.2423 loss_span_0 0.5721 loss_giou_0 0.5566 loss_label_0 0.5010 class_error_0 13.8361 loss_overall 3.4631 [Metrics] {"brief": {"MR-full-R1@0.5": 40.65, "MR-full-R1@0.7": 22.52, "MR-full-mAP": 24.83, "MR-full-mAP@0.5": 46.9, "MR-full-mAP@0.75": 22.06, "MR-long-mAP": 37.22, "MR-middle-mAP": 22.19, "MR-short-mAP": 1.63, "HL-min-Fair-mAP": 62.7, "HL-min-Fair-Hit1": 60.58, "HL-min-Good-mAP": 53.32, "HL-min-Good-Hit1": 58.97, "HL-min-VeryGood-mAP": 32.74, "HL-min-VeryGood-Hit1": 48.77}, "HL-min-Fair": {"HL-mAP": 62.7, "HL-Hit1": 60.58}, "HL-min-Good": {"HL-mAP": 53.32, "HL-Hit1": 58.97}, "HL-min-VeryGood": {"HL-mAP": 32.74, "HL-Hit1": 48.77}, "full": {"MR-mAP": {"0.5": 46.9, "0.55": 41.04, "0.6": 36.28, "0.65": 30.93, "0.7": 26.67, "0.75": 22.06, "0.8": 17.06, "0.85": 12.7, "0.9": 8.95, "0.95": 5.68, "average": 24.83}, "MR-R1": {"0.5": 40.65, "0.55": 35.03, "0.6": 30.45, "0.65": 26.13, "0.7": 22.52, "0.75": 18.58, "0.8": 14.39, "0.85": 10.9, "0.9": 7.94, "0.95": 5.16}}, "long": {"MR-mAP": {"0.5": 63.2, "0.55": 58.41, "0.6": 52.96, "0.65": 47.95, "0.7": 41.11, "0.75": 34.41, "0.8": 27.6, "0.85": 21.27, "0.9": 14.78, "0.95": 10.49, "average": 37.22}, "MR-R1": {"0.5": 51.57, "0.55": 47.39, "0.6": 42.86, "0.65": 38.68, "0.7": 32.93, "0.75": 27.35, "0.8": 21.95, "0.85": 17.07, "0.9": 12.37, "0.95": 9.23}}, "middle": {"MR-mAP": {"0.5": 46.31, "0.55": 38.96, "0.6": 33.67, "0.65": 27.37, "0.7": 23.16, "0.75": 18.74, "0.8": 13.75, "0.85": 9.73, "0.9": 6.9, "0.95": 3.32, "average": 22.19}, "MR-R1": {"0.5": 33.23, "0.55": 27.27, "0.6": 23.3, "0.65": 19.02, "0.7": 16.61, "0.75": 13.69, "0.8": 10.14, "0.85": 7.42, "0.9": 5.43, "0.95": 2.82}}, "short": {"MR-mAP": {"0.5": 5.23, "0.55": 3.69, "0.6": 2.55, "0.65": 1.46, "0.7": 1.04, "0.75": 0.95, "0.8": 0.61, "0.85": 0.25, "0.9": 0.25, "0.95": 0.25, "average": 1.63}, "MR-R1": {"0.5": 3.73, "0.55": 2.33, "0.6": 0.7, "0.65": 0.23, "0.7": 0.23, "0.75": 0.0, "0.8": 0.0, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_31_05 [Epoch] 044 [Loss] loss_span 0.5647 loss_giou 0.5367 loss_label 0.5061 class_error 14.7438 loss_saliency 0.2398 loss_span_0 0.5806 loss_giou_0 0.5614 loss_label_0 0.5083 class_error_0 16.3073 loss_overall 3.4976 [Metrics] {"brief": {"MR-full-R1@0.5": 43.48, "MR-full-R1@0.7": 24.06, "MR-full-mAP": 25.53, "MR-full-mAP@0.5": 49.58, "MR-full-mAP@0.75": 21.68, "MR-long-mAP": 35.35, "MR-middle-mAP": 24.45, "MR-short-mAP": 1.65, "HL-min-Fair-mAP": 63.15, "HL-min-Fair-Hit1": 60.26, "HL-min-Good-mAP": 53.71, "HL-min-Good-Hit1": 58.84, "HL-min-VeryGood-mAP": 32.99, "HL-min-VeryGood-Hit1": 49.03}, "HL-min-Fair": {"HL-mAP": 63.15, "HL-Hit1": 60.26}, "HL-min-Good": {"HL-mAP": 53.71, "HL-Hit1": 58.84}, "HL-min-VeryGood": {"HL-mAP": 32.99, "HL-Hit1": 49.03}, "full": {"MR-mAP": {"0.5": 49.58, "0.55": 43.18, "0.6": 38.95, "0.65": 32.61, "0.7": 28.07, "0.75": 21.68, "0.8": 16.68, "0.85": 11.53, "0.9": 8.27, "0.95": 4.72, "average": 25.53}, "MR-R1": {"0.5": 43.48, "0.55": 36.9, "0.6": 32.84, "0.65": 27.61, "0.7": 24.06, "0.75": 18.52, "0.8": 14.32, "0.85": 9.87, "0.9": 6.97, "0.95": 4.19}}, "long": {"MR-mAP": {"0.5": 63.33, "0.55": 56.54, "0.6": 52.01, "0.65": 44.34, "0.7": 38.03, "0.75": 31.65, "0.8": 26.43, "0.85": 18.44, "0.9": 13.75, "0.95": 9.0, "average": 35.35}, "MR-R1": {"0.5": 52.26, "0.55": 45.12, "0.6": 40.77, "0.65": 34.32, "0.7": 29.79, "0.75": 24.56, "0.8": 20.38, "0.85": 14.29, "0.9": 10.45, "0.95": 7.49}}, "middle": {"MR-mAP": {"0.5": 50.76, "0.55": 43.46, "0.6": 38.67, "0.65": 31.76, "0.7": 27.51, "0.75": 20.02, "0.8": 13.95, "0.85": 9.52, "0.9": 6.21, "0.95": 2.59, "average": 24.45}, "MR-R1": {"0.5": 36.89, "0.55": 31.87, "0.6": 28.42, "0.65": 23.93, "0.7": 21.11, "0.75": 15.26, "0.8": 10.97, "0.85": 7.42, "0.9": 5.02, "0.95": 2.3}}, "short": {"MR-mAP": {"0.5": 5.53, "0.55": 3.53, "0.6": 2.55, "0.65": 1.72, "0.7": 1.03, "0.75": 0.82, "0.8": 0.62, "0.85": 0.24, "0.9": 0.24, "0.95": 0.24, "average": 1.65}, "MR-R1": {"0.5": 4.9, "0.55": 1.86, "0.6": 0.7, "0.65": 0.47, "0.7": 0.0, "0.75": 0.0, "0.8": 0.0, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_36_26 [Epoch] 049 [Loss] loss_span 0.5534 loss_giou 0.5409 loss_label 0.4937 class_error 13.0729 loss_saliency 0.2346 loss_span_0 0.5726 loss_giou_0 0.5634 loss_label_0 0.4787 class_error_0 13.1292 loss_overall 3.4374 [Metrics] {"brief": {"MR-full-R1@0.5": 42.45, "MR-full-R1@0.7": 24.32, "MR-full-mAP": 25.29, "MR-full-mAP@0.5": 48.7, "MR-full-mAP@0.75": 22.59, "MR-long-mAP": 35.09, "MR-middle-mAP": 24.3, "MR-short-mAP": 1.87, "HL-min-Fair-mAP": 64.45, "HL-min-Fair-Hit1": 62.71, "HL-min-Good-mAP": 54.93, "HL-min-Good-Hit1": 61.68, "HL-min-VeryGood-mAP": 33.54, "HL-min-VeryGood-Hit1": 51.29}, "HL-min-Fair": {"HL-mAP": 64.45, "HL-Hit1": 62.71}, "HL-min-Good": {"HL-mAP": 54.93, "HL-Hit1": 61.68}, "HL-min-VeryGood": {"HL-mAP": 33.54, "HL-Hit1": 51.29}, "full": {"MR-mAP": {"0.5": 48.7, "0.55": 43.06, "0.6": 38.63, "0.65": 32.31, "0.7": 27.6, "0.75": 22.59, "0.8": 16.69, "0.85": 11.54, "0.9": 7.34, "0.95": 4.43, "average": 25.29}, "MR-R1": {"0.5": 42.45, "0.55": 37.35, "0.6": 33.48, "0.65": 28.13, "0.7": 24.32, "0.75": 19.94, "0.8": 14.77, "0.85": 9.87, "0.9": 6.39, "0.95": 3.94}}, "long": {"MR-mAP": {"0.5": 59.88, "0.55": 55.93, "0.6": 50.09, "0.65": 44.76, "0.7": 38.97, "0.75": 33.17, "0.8": 26.82, "0.85": 19.73, "0.9": 12.89, "0.95": 8.67, "average": 35.09}, "MR-R1": {"0.5": 50.35, "0.55": 46.69, "0.6": 41.64, "0.65": 36.93, "0.7": 32.58, "0.75": 28.05, "0.8": 22.65, "0.85": 16.38, "0.9": 10.45, "0.95": 7.84}}, "middle": {"MR-mAP": {"0.5": 50.85, "0.55": 44.02, "0.6": 39.62, "0.65": 31.68, "0.7": 26.59, "0.75": 20.67, "0.8": 13.47, "0.85": 8.64, "0.9": 5.29, "0.95": 2.19, "average": 24.3}, "MR-R1": {"0.5": 36.99, "0.55": 31.87, "0.6": 28.74, "0.65": 23.3, "0.7": 19.75, "0.75": 15.36, "0.8": 10.24, "0.85": 6.06, "0.9": 3.97, "0.95": 1.57}}, "short": {"MR-mAP": {"0.5": 6.46, "0.55": 3.71, "0.6": 3.05, "0.65": 1.72, "0.7": 1.2, "0.75": 0.95, "0.8": 0.67, "0.85": 0.32, "0.9": 0.32, "0.95": 0.32, "average": 1.87}, "MR-R1": {"0.5": 3.5, "0.55": 1.4, "0.6": 1.17, "0.65": 0.23, "0.7": 0.23, "0.75": 0.23, "0.8": 0.23, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_13_41_59 [Epoch] 054 [Loss] loss_span 0.5433 loss_giou 0.5406 loss_label 0.4840 class_error 11.9544 loss_saliency 0.2298 loss_span_0 0.5562 loss_giou_0 0.5440 loss_label_0 0.4892 class_error_0 12.0364 loss_overall 3.3871 [Metrics] {"brief": {"MR-full-R1@0.5": 45.87, "MR-full-R1@0.7": 25.23, "MR-full-mAP": 26.49, "MR-full-mAP@0.5": 50.61, "MR-full-mAP@0.75": 23.15, "MR-long-mAP": 37.05, "MR-middle-mAP": 25.23, "MR-short-mAP": 2.13, "HL-min-Fair-mAP": 65.52, "HL-min-Fair-Hit1": 64.0, "HL-min-Good-mAP": 55.91, "HL-min-Good-Hit1": 62.65, "HL-min-VeryGood-mAP": 34.11, "HL-min-VeryGood-Hit1": 52.45}, "HL-min-Fair": {"HL-mAP": 65.52, "HL-Hit1": 64.0}, "HL-min-Good": {"HL-mAP": 55.91, "HL-Hit1": 62.65}, "HL-min-VeryGood": {"HL-mAP": 34.11, "HL-Hit1": 52.45}, "full": {"MR-mAP": {"0.5": 50.61, "0.55": 44.16, "0.6": 40.11, "0.65": 33.33, "0.7": 28.55, "0.75": 23.15, "0.8": 17.93, "0.85": 13.15, "0.9": 8.78, "0.95": 5.15, "average": 26.49}, "MR-R1": {"0.5": 45.87, "0.55": 39.81, "0.6": 35.74, "0.65": 29.61, "0.7": 25.23, "0.75": 20.32, "0.8": 15.68, "0.85": 11.48, "0.9": 7.61, "0.95": 4.26}}, "long": {"MR-mAP": {"0.5": 63.41, "0.55": 59.01, "0.6": 53.24, "0.65": 47.36, "0.7": 40.59, "0.75": 34.09, "0.8": 26.99, "0.85": 20.92, "0.9": 14.56, "0.95": 10.3, "average": 37.05}, "MR-R1": {"0.5": 53.31, "0.55": 49.65, "0.6": 43.9, "0.65": 38.5, "0.7": 32.93, "0.75": 27.53, "0.8": 21.78, "0.85": 17.07, "0.9": 11.85, "0.95": 8.54}}, "middle": {"MR-mAP": {"0.5": 52.59, "0.55": 44.29, "0.6": 40.33, "0.65": 32.12, "0.7": 27.29, "0.75": 20.99, "0.8": 15.44, "0.85": 10.49, "0.9": 6.43, "0.95": 2.35, "average": 25.23}, "MR-R1": {"0.5": 40.33, "0.55": 33.65, "0.6": 30.83, "0.65": 24.56, "0.7": 20.9, "0.75": 16.2, "0.8": 12.12, "0.85": 8.36, "0.9": 5.22, "0.95": 1.78}}, "short": {"MR-mAP": {"0.5": 6.79, "0.55": 4.2, "0.6": 3.5, "0.65": 1.93, "0.7": 1.47, "0.75": 1.39, "0.8": 1.09, "0.85": 0.3, "0.9": 0.3, "0.95": 0.3, "average": 2.13}, "MR-R1": {"0.5": 4.43, "0.55": 2.33, "0.6": 1.63, "0.65": 0.7, "0.7": 0.47, "0.75": 0.47, "0.8": 0.47, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_47_33 [Epoch] 059 [Loss] loss_span 0.5430 loss_giou 0.5388 loss_label 0.5116 class_error 16.8536 loss_saliency 0.2274 loss_span_0 0.5704 loss_giou_0 0.5608 loss_label_0 0.4891 class_error_0 16.3761 loss_overall 3.4411 [Metrics] {"brief": {"MR-full-R1@0.5": 48.06, "MR-full-R1@0.7": 26.97, "MR-full-mAP": 27.32, "MR-full-mAP@0.5": 51.69, "MR-full-mAP@0.75": 24.98, "MR-long-mAP": 38.23, "MR-middle-mAP": 25.72, "MR-short-mAP": 2.38, "HL-min-Fair-mAP": 65.76, "HL-min-Fair-Hit1": 63.61, "HL-min-Good-mAP": 56.01, "HL-min-Good-Hit1": 62.0, "HL-min-VeryGood-mAP": 33.99, "HL-min-VeryGood-Hit1": 51.68}, "HL-min-Fair": {"HL-mAP": 65.76, "HL-Hit1": 63.61}, "HL-min-Good": {"HL-mAP": 56.01, "HL-Hit1": 62.0}, "HL-min-VeryGood": {"HL-mAP": 33.99, "HL-Hit1": 51.68}, "full": {"MR-mAP": {"0.5": 51.69, "0.55": 45.48, "0.6": 40.59, "0.65": 34.79, "0.7": 29.7, "0.75": 24.98, "0.8": 19.04, "0.85": 13.31, "0.9": 8.96, "0.95": 4.63, "average": 27.32}, "MR-R1": {"0.5": 48.06, "0.55": 42.19, "0.6": 37.29, "0.65": 31.74, "0.7": 26.97, "0.75": 22.45, "0.8": 16.9, "0.85": 12.06, "0.9": 7.81, "0.95": 4.06}}, "long": {"MR-mAP": {"0.5": 63.35, "0.55": 58.88, "0.6": 53.1, "0.65": 47.27, "0.7": 42.56, "0.75": 36.88, "0.8": 30.52, "0.85": 23.76, "0.9": 16.32, "0.95": 9.63, "average": 38.23}, "MR-R1": {"0.5": 54.53, "0.55": 50.7, "0.6": 45.47, "0.65": 39.55, "0.7": 35.54, "0.75": 30.49, "0.8": 25.09, "0.85": 19.69, "0.9": 13.41, "0.95": 8.36}}, "middle": {"MR-mAP": {"0.5": 54.23, "0.55": 46.81, "0.6": 41.1, "0.65": 34.09, "0.7": 27.62, "0.75": 22.07, "0.8": 14.88, "0.85": 9.19, "0.9": 5.34, "0.95": 1.87, "average": 25.72}, "MR-R1": {"0.5": 43.47, "0.55": 37.41, "0.6": 32.81, "0.65": 27.48, "0.7": 22.15, "0.75": 17.97, "0.8": 12.23, "0.85": 7.73, "0.9": 4.6, "0.95": 1.57}}, "short": {"MR-mAP": {"0.5": 7.25, "0.55": 4.24, "0.6": 3.5, "0.65": 2.34, "0.7": 1.83, "0.75": 1.63, "0.8": 1.32, "0.85": 0.55, "0.9": 0.55, "0.95": 0.55, "average": 2.38}, "MR-R1": {"0.5": 3.73, "0.55": 1.17, "0.6": 0.7, "0.65": 0.47, "0.7": 0.47, "0.75": 0.23, "0.8": 0.23, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_13_53_06 [Epoch] 064 [Loss] loss_span 0.5574 loss_giou 0.5329 loss_label 0.5468 class_error 19.3817 loss_saliency 0.2217 loss_span_0 0.5769 loss_giou_0 0.5461 loss_label_0 0.5328 class_error_0 18.5783 loss_overall 3.5146 [Metrics] {"brief": {"MR-full-R1@0.5": 48.26, "MR-full-R1@0.7": 27.16, "MR-full-mAP": 28.75, "MR-full-mAP@0.5": 52.71, "MR-full-mAP@0.75": 26.08, "MR-long-mAP": 41.49, "MR-middle-mAP": 26.16, "MR-short-mAP": 2.37, "HL-min-Fair-mAP": 66.77, "HL-min-Fair-Hit1": 65.94, "HL-min-Good-mAP": 56.86, "HL-min-Good-Hit1": 64.32, "HL-min-VeryGood-mAP": 34.65, "HL-min-VeryGood-Hit1": 53.87}, "HL-min-Fair": {"HL-mAP": 66.77, "HL-Hit1": 65.94}, "HL-min-Good": {"HL-mAP": 56.86, "HL-Hit1": 64.32}, "HL-min-VeryGood": {"HL-mAP": 34.65, "HL-Hit1": 53.87}, "full": {"MR-mAP": {"0.5": 52.71, "0.55": 46.62, "0.6": 42.46, "0.65": 36.36, "0.7": 30.54, "0.75": 26.08, "0.8": 20.65, "0.85": 15.33, "0.9": 10.77, "0.95": 6.01, "average": 28.75}, "MR-R1": {"0.5": 48.26, "0.55": 42.26, "0.6": 38.06, "0.65": 32.52, "0.7": 27.16, "0.75": 23.55, "0.8": 18.45, "0.85": 13.87, "0.9": 9.87, "0.95": 5.48}}, "long": {"MR-mAP": {"0.5": 66.65, "0.55": 61.96, "0.6": 55.94, "0.65": 51.38, "0.7": 45.29, "0.75": 40.51, "0.8": 33.35, "0.85": 27.68, "0.9": 19.67, "0.95": 12.53, "average": 41.49}, "MR-R1": {"0.5": 57.32, "0.55": 52.44, "0.6": 46.34, "0.65": 42.16, "0.7": 37.11, "0.75": 33.62, "0.8": 27.7, "0.85": 23.0, "0.9": 17.07, "0.95": 10.98}}, "middle": {"MR-mAP": {"0.5": 53.68, "0.55": 46.47, "0.6": 42.41, "0.65": 34.19, "0.7": 27.44, "0.75": 22.03, "0.8": 16.01, "0.85": 10.25, "0.9": 6.62, "0.95": 2.48, "average": 26.16}, "MR-R1": {"0.5": 41.38, "0.55": 35.63, "0.6": 32.92, "0.65": 26.65, "0.7": 21.32, "0.75": 17.66, "0.8": 13.06, "0.85": 8.57, "0.9": 5.64, "0.95": 2.19}}, "short": {"MR-mAP": {"0.5": 7.72, "0.55": 4.35, "0.6": 3.59, "0.65": 2.4, "0.7": 1.56, "0.75": 1.43, "0.8": 1.15, "0.85": 0.5, "0.9": 0.5, "0.95": 0.5, "average": 2.37}, "MR-R1": {"0.5": 5.36, "0.55": 3.03, "0.6": 2.1, "0.65": 1.63, "0.7": 0.93, "0.75": 0.7, "0.8": 0.47, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_13_58_41 [Epoch] 069 [Loss] loss_span 0.5581 loss_giou 0.5321 loss_label 0.5575 class_error 19.7858 loss_saliency 0.2349 loss_span_0 0.5913 loss_giou_0 0.5443 loss_label_0 0.5379 class_error_0 19.8230 loss_overall 3.5563 [Metrics] {"brief": {"MR-full-R1@0.5": 49.16, "MR-full-R1@0.7": 28.0, "MR-full-mAP": 28.88, "MR-full-mAP@0.5": 53.33, "MR-full-mAP@0.75": 26.27, "MR-long-mAP": 41.96, "MR-middle-mAP": 25.97, "MR-short-mAP": 2.44, "HL-min-Fair-mAP": 66.47, "HL-min-Fair-Hit1": 63.68, "HL-min-Good-mAP": 56.62, "HL-min-Good-Hit1": 62.13, "HL-min-VeryGood-mAP": 34.43, "HL-min-VeryGood-Hit1": 52.06}, "HL-min-Fair": {"HL-mAP": 66.47, "HL-Hit1": 63.68}, "HL-min-Good": {"HL-mAP": 56.62, "HL-Hit1": 62.13}, "HL-min-VeryGood": {"HL-mAP": 34.43, "HL-Hit1": 52.06}, "full": {"MR-mAP": {"0.5": 53.33, "0.55": 46.56, "0.6": 42.38, "0.65": 35.55, "0.7": 30.84, "0.75": 26.27, "0.8": 21.49, "0.85": 15.49, "0.9": 10.67, "0.95": 6.19, "average": 28.88}, "MR-R1": {"0.5": 49.16, "0.55": 42.45, "0.6": 38.26, "0.65": 31.61, "0.7": 28.0, "0.75": 23.94, "0.8": 19.48, "0.85": 13.94, "0.9": 9.87, "0.95": 5.87}}, "long": {"MR-mAP": {"0.5": 69.35, "0.55": 62.23, "0.6": 56.68, "0.65": 50.95, "0.7": 45.9, "0.75": 40.54, "0.8": 35.03, "0.85": 26.9, "0.9": 19.41, "0.95": 12.62, "average": 41.96}, "MR-R1": {"0.5": 60.1, "0.55": 52.79, "0.6": 47.39, "0.65": 41.64, "0.7": 37.8, "0.75": 33.62, "0.8": 29.44, "0.85": 22.82, "0.9": 16.9, "0.95": 11.32}}, "middle": {"MR-mAP": {"0.5": 53.43, "0.55": 46.21, "0.6": 41.28, "0.65": 32.57, "0.7": 27.28, "0.75": 22.0, "0.8": 16.21, "0.85": 10.76, "0.9": 6.89, "0.95": 3.03, "average": 25.97}, "MR-R1": {"0.5": 41.8, "0.55": 36.15, "0.6": 32.71, "0.65": 25.71, "0.7": 22.26, "0.75": 18.39, "0.8": 13.69, "0.85": 8.78, "0.9": 5.75, "0.95": 2.61}}, "short": {"MR-mAP": {"0.5": 6.74, "0.55": 4.63, "0.6": 4.24, "0.65": 3.19, "0.7": 2.08, "0.75": 1.38, "0.8": 0.98, "0.85": 0.4, "0.9": 0.4, "0.95": 0.4, "average": 2.44}, "MR-R1": {"0.5": 3.96, "0.55": 2.1, "0.6": 1.86, "0.65": 1.17, "0.7": 0.93, "0.75": 0.47, "0.8": 0.47, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_14_04_13 [Epoch] 074 [Loss] loss_span 0.5622 loss_giou 0.5384 loss_label 0.6026 class_error 20.7191 loss_saliency 0.2317 loss_span_0 0.5782 loss_giou_0 0.5511 loss_label_0 0.6059 class_error_0 21.9726 loss_overall 3.6702 [Metrics] {"brief": {"MR-full-R1@0.5": 49.16, "MR-full-R1@0.7": 28.9, "MR-full-mAP": 29.26, "MR-full-mAP@0.5": 52.54, "MR-full-mAP@0.75": 27.59, "MR-long-mAP": 41.0, "MR-middle-mAP": 27.42, "MR-short-mAP": 2.34, "HL-min-Fair-mAP": 66.86, "HL-min-Fair-Hit1": 65.16, "HL-min-Good-mAP": 56.83, "HL-min-Good-Hit1": 62.84, "HL-min-VeryGood-mAP": 34.72, "HL-min-VeryGood-Hit1": 53.61}, "HL-min-Fair": {"HL-mAP": 66.86, "HL-Hit1": 65.16}, "HL-min-Good": {"HL-mAP": 56.83, "HL-Hit1": 62.84}, "HL-min-VeryGood": {"HL-mAP": 34.72, "HL-Hit1": 53.61}, "full": {"MR-mAP": {"0.5": 52.54, "0.55": 46.57, "0.6": 43.14, "0.65": 37.36, "0.7": 31.69, "0.75": 27.59, "0.8": 21.64, "0.85": 15.26, "0.9": 10.86, "0.95": 5.98, "average": 29.26}, "MR-R1": {"0.5": 49.16, "0.55": 43.55, "0.6": 40.13, "0.65": 34.45, "0.7": 28.9, "0.75": 25.48, "0.8": 19.81, "0.85": 14.06, "0.9": 10.39, "0.95": 5.68}}, "long": {"MR-mAP": {"0.5": 66.39, "0.55": 61.33, "0.6": 56.44, "0.65": 51.27, "0.7": 45.17, "0.75": 41.57, "0.8": 33.32, "0.85": 25.79, "0.9": 17.85, "0.95": 10.88, "average": 41.0}, "MR-R1": {"0.5": 58.36, "0.55": 52.96, "0.6": 48.26, "0.65": 43.73, "0.7": 38.15, "0.75": 35.89, "0.8": 28.75, "0.85": 22.3, "0.9": 15.85, "0.95": 9.76}}, "middle": {"MR-mAP": {"0.5": 53.96, "0.55": 46.69, "0.6": 43.54, "0.65": 36.08, "0.7": 29.46, "0.75": 23.78, "0.8": 17.86, "0.85": 11.23, "0.9": 8.04, "0.95": 3.56, "average": 27.42}, "MR-R1": {"0.5": 42.95, "0.55": 37.51, "0.6": 35.42, "0.65": 29.36, "0.7": 23.82, "0.75": 19.64, "0.8": 14.73, "0.85": 9.3, "0.9": 7.21, "0.95": 3.24}}, "short": {"MR-mAP": {"0.5": 6.88, "0.55": 4.69, "0.6": 3.43, "0.65": 2.22, "0.7": 1.48, "0.75": 1.44, "0.8": 1.19, "0.85": 0.69, "0.9": 0.69, "0.95": 0.69, "average": 2.34}, "MR-R1": {"0.5": 3.73, "0.55": 2.8, "0.6": 1.4, "0.65": 0.47, "0.7": 0.23, "0.75": 0.23, "0.8": 0.23, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_14_09_47 [Epoch] 079 [Loss] loss_span 0.5459 loss_giou 0.5378 loss_label 0.5861 class_error 19.2414 loss_saliency 0.2271 loss_span_0 0.5757 loss_giou_0 0.5655 loss_label_0 0.5599 class_error_0 18.7932 loss_overall 3.5981 [Metrics] {"brief": {"MR-full-R1@0.5": 49.42, "MR-full-R1@0.7": 30.39, "MR-full-mAP": 29.22, "MR-full-mAP@0.5": 52.94, "MR-full-mAP@0.75": 27.33, "MR-long-mAP": 40.16, "MR-middle-mAP": 27.72, "MR-short-mAP": 2.82, "HL-min-Fair-mAP": 67.74, "HL-min-Fair-Hit1": 66.13, "HL-min-Good-mAP": 57.67, "HL-min-Good-Hit1": 64.52, "HL-min-VeryGood-mAP": 35.29, "HL-min-VeryGood-Hit1": 53.48}, "HL-min-Fair": {"HL-mAP": 67.74, "HL-Hit1": 66.13}, "HL-min-Good": {"HL-mAP": 57.67, "HL-Hit1": 64.52}, "HL-min-VeryGood": {"HL-mAP": 35.29, "HL-Hit1": 53.48}, "full": {"MR-mAP": {"0.5": 52.94, "0.55": 46.63, "0.6": 42.48, "0.65": 37.13, "0.7": 32.38, "0.75": 27.33, "0.8": 21.69, "0.85": 15.65, "0.9": 10.36, "0.95": 5.65, "average": 29.22}, "MR-R1": {"0.5": 49.42, "0.55": 43.94, "0.6": 39.61, "0.65": 34.71, "0.7": 30.39, "0.75": 25.23, "0.8": 19.94, "0.85": 14.39, "0.9": 9.48, "0.95": 4.84}}, "long": {"MR-mAP": {"0.5": 65.28, "0.55": 58.61, "0.6": 54.82, "0.65": 50.37, "0.7": 45.5, "0.75": 40.17, "0.8": 34.07, "0.85": 25.04, "0.9": 17.2, "0.95": 10.56, "average": 40.16}, "MR-R1": {"0.5": 56.62, "0.55": 50.35, "0.6": 46.69, "0.65": 42.86, "0.7": 38.5, "0.75": 34.32, "0.8": 28.92, "0.85": 21.25, "0.9": 14.46, "0.95": 8.89}}, "middle": {"MR-mAP": {"0.5": 54.58, "0.55": 48.23, "0.6": 43.05, "0.65": 35.78, "0.7": 30.84, "0.75": 24.23, "0.8": 17.66, "0.85": 12.13, "0.9": 7.51, "0.95": 3.17, "average": 27.72}, "MR-R1": {"0.5": 43.47, "0.55": 39.08, "0.6": 35.01, "0.65": 29.78, "0.7": 25.6, "0.75": 19.96, "0.8": 14.84, "0.85": 10.55, "0.9": 6.69, "0.95": 2.51}}, "short": {"MR-mAP": {"0.5": 8.81, "0.55": 5.87, "0.6": 4.77, "0.65": 3.01, "0.7": 1.71, "0.75": 1.38, "0.8": 1.05, "0.85": 0.52, "0.9": 0.52, "0.95": 0.52, "average": 2.82}, "MR-R1": {"0.5": 5.83, "0.55": 4.2, "0.6": 2.56, "0.65": 1.63, "0.7": 1.17, "0.75": 0.7, "0.8": 0.23, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_14_15_17 [Epoch] 084 [Loss] loss_span 0.5492 loss_giou 0.5416 loss_label 0.5856 class_error 16.8746 loss_saliency 0.2289 loss_span_0 0.5613 loss_giou_0 0.5593 loss_label_0 0.5495 class_error_0 16.2807 loss_overall 3.5754 [Metrics] {"brief": {"MR-full-R1@0.5": 47.81, "MR-full-R1@0.7": 29.29, "MR-full-mAP": 28.83, "MR-full-mAP@0.5": 51.42, "MR-full-mAP@0.75": 27.1, "MR-long-mAP": 41.36, "MR-middle-mAP": 26.22, "MR-short-mAP": 2.88, "HL-min-Fair-mAP": 66.94, "HL-min-Fair-Hit1": 66.77, "HL-min-Good-mAP": 56.97, "HL-min-Good-Hit1": 64.84, "HL-min-VeryGood-mAP": 34.78, "HL-min-VeryGood-Hit1": 54.58}, "HL-min-Fair": {"HL-mAP": 66.94, "HL-Hit1": 66.77}, "HL-min-Good": {"HL-mAP": 56.97, "HL-Hit1": 64.84}, "HL-min-VeryGood": {"HL-mAP": 34.78, "HL-Hit1": 54.58}, "full": {"MR-mAP": {"0.5": 51.42, "0.55": 45.76, "0.6": 42.14, "0.65": 36.42, "0.7": 31.65, "0.75": 27.1, "0.8": 21.07, "0.85": 15.92, "0.9": 11.07, "0.95": 5.79, "average": 28.83}, "MR-R1": {"0.5": 47.81, "0.55": 42.45, "0.6": 38.9, "0.65": 33.55, "0.7": 29.29, "0.75": 25.23, "0.8": 19.55, "0.85": 14.97, "0.9": 10.39, "0.95": 5.61}}, "long": {"MR-mAP": {"0.5": 64.01, "0.55": 60.35, "0.6": 55.84, "0.65": 50.8, "0.7": 45.74, "0.75": 42.35, "0.8": 35.52, "0.85": 27.99, "0.9": 19.24, "0.95": 11.73, "average": 41.36}, "MR-R1": {"0.5": 54.7, "0.55": 51.74, "0.6": 47.74, "0.65": 43.21, "0.7": 39.2, "0.75": 36.41, "0.8": 29.97, "0.85": 24.04, "0.9": 16.9, "0.95": 10.8}}, "middle": {"MR-mAP": {"0.5": 52.42, "0.55": 45.32, "0.6": 41.45, "0.65": 34.3, "0.7": 28.72, "0.75": 22.74, "0.8": 16.0, "0.85": 11.09, "0.9": 7.45, "0.95": 2.68, "average": 26.22}, "MR-R1": {"0.5": 41.9, "0.55": 36.15, "0.6": 33.33, "0.65": 27.69, "0.7": 23.3, "0.75": 18.6, "0.8": 13.48, "0.85": 9.61, "0.9": 6.48, "0.95": 2.4}}, "short": {"MR-mAP": {"0.5": 8.45, "0.55": 5.44, "0.6": 4.44, "0.65": 3.35, "0.7": 2.2, "0.75": 1.75, "0.8": 1.2, "0.85": 0.66, "0.9": 0.66, "0.95": 0.66, "average": 2.88}, "MR-R1": {"0.5": 6.06, "0.55": 3.5, "0.6": 2.33, "0.65": 1.63, "0.7": 1.4, "0.75": 0.93, "0.8": 0.47, "0.85": 0.47, "0.9": 0.47, "0.95": 0.47}}}
+2021_08_04_14_20_52 [Epoch] 089 [Loss] loss_span 0.5726 loss_giou 0.5424 loss_label 0.6611 class_error 23.5645 loss_saliency 0.2291 loss_span_0 0.5964 loss_giou_0 0.5513 loss_label_0 0.6122 class_error_0 22.0650 loss_overall 3.7652 [Metrics] {"brief": {"MR-full-R1@0.5": 50.84, "MR-full-R1@0.7": 29.68, "MR-full-mAP": 29.59, "MR-full-mAP@0.5": 53.79, "MR-full-mAP@0.75": 27.11, "MR-long-mAP": 42.74, "MR-middle-mAP": 27.16, "MR-short-mAP": 2.26, "HL-min-Fair-mAP": 67.82, "HL-min-Fair-Hit1": 67.61, "HL-min-Good-mAP": 57.76, "HL-min-Good-Hit1": 65.61, "HL-min-VeryGood-mAP": 35.41, "HL-min-VeryGood-Hit1": 54.84}, "HL-min-Fair": {"HL-mAP": 67.82, "HL-Hit1": 67.61}, "HL-min-Good": {"HL-mAP": 57.76, "HL-Hit1": 65.61}, "HL-min-VeryGood": {"HL-mAP": 35.41, "HL-Hit1": 54.84}, "full": {"MR-mAP": {"0.5": 53.79, "0.55": 47.51, "0.6": 43.51, "0.65": 37.3, "0.7": 32.04, "0.75": 27.11, "0.8": 21.67, "0.85": 16.15, "0.9": 10.69, "0.95": 6.16, "average": 29.59}, "MR-R1": {"0.5": 50.84, "0.55": 44.32, "0.6": 40.58, "0.65": 34.45, "0.7": 29.68, "0.75": 24.77, "0.8": 19.68, "0.85": 14.77, "0.9": 9.74, "0.95": 5.68}}, "long": {"MR-mAP": {"0.5": 67.61, "0.55": 61.93, "0.6": 57.26, "0.65": 52.83, "0.7": 47.75, "0.75": 43.03, "0.8": 36.25, "0.85": 28.33, "0.9": 19.22, "0.95": 13.15, "average": 42.74}, "MR-R1": {"0.5": 58.71, "0.55": 53.14, "0.6": 48.61, "0.65": 44.08, "0.7": 40.07, "0.75": 36.24, "0.8": 30.66, "0.85": 24.39, "0.9": 16.2, "0.95": 11.5}}, "middle": {"MR-mAP": {"0.5": 55.46, "0.55": 48.33, "0.6": 43.68, "0.65": 35.17, "0.7": 28.93, "0.75": 22.34, "0.8": 16.27, "0.85": 11.35, "0.9": 7.26, "0.95": 2.78, "average": 27.16}, "MR-R1": {"0.5": 45.04, "0.55": 38.98, "0.6": 35.95, "0.65": 29.05, "0.7": 23.82, "0.75": 18.29, "0.8": 13.38, "0.85": 9.3, "0.9": 6.06, "0.95": 2.3}}, "short": {"MR-mAP": {"0.5": 7.07, "0.55": 4.35, "0.6": 3.48, "0.65": 2.42, "0.7": 1.54, "0.75": 1.38, "0.8": 1.11, "0.85": 0.43, "0.9": 0.43, "0.95": 0.43, "average": 2.26}, "MR-R1": {"0.5": 4.66, "0.55": 2.1, "0.6": 1.4, "0.65": 0.7, "0.7": 0.47, "0.75": 0.23, "0.8": 0.23, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_14_26_29 [Epoch] 094 [Loss] loss_span 0.5609 loss_giou 0.5546 loss_label 0.5836 class_error 17.2686 loss_saliency 0.2343 loss_span_0 0.5799 loss_giou_0 0.5608 loss_label_0 0.5801 class_error_0 18.4508 loss_overall 3.6543 [Metrics] {"brief": {"MR-full-R1@0.5": 50.45, "MR-full-R1@0.7": 30.06, "MR-full-mAP": 28.74, "MR-full-mAP@0.5": 53.0, "MR-full-mAP@0.75": 26.66, "MR-long-mAP": 38.36, "MR-middle-mAP": 28.03, "MR-short-mAP": 3.38, "HL-min-Fair-mAP": 67.6, "HL-min-Fair-Hit1": 67.23, "HL-min-Good-mAP": 57.27, "HL-min-Good-Hit1": 65.48, "HL-min-VeryGood-mAP": 34.98, "HL-min-VeryGood-Hit1": 54.58}, "HL-min-Fair": {"HL-mAP": 67.6, "HL-Hit1": 67.23}, "HL-min-Good": {"HL-mAP": 57.27, "HL-Hit1": 65.48}, "HL-min-VeryGood": {"HL-mAP": 34.98, "HL-Hit1": 54.58}, "full": {"MR-mAP": {"0.5": 53.0, "0.55": 47.25, "0.6": 42.93, "0.65": 37.05, "0.7": 31.54, "0.75": 26.66, "0.8": 20.36, "0.85": 14.16, "0.9": 9.5, "0.95": 4.9, "average": 28.74}, "MR-R1": {"0.5": 50.45, "0.55": 44.77, "0.6": 40.19, "0.65": 34.9, "0.7": 30.06, "0.75": 25.48, "0.8": 19.55, "0.85": 13.81, "0.9": 9.16, "0.95": 4.65}}, "long": {"MR-mAP": {"0.5": 63.16, "0.55": 57.53, "0.6": 52.39, "0.65": 48.3, "0.7": 44.15, "0.75": 38.74, "0.8": 30.97, "0.85": 23.56, "0.9": 16.6, "0.95": 8.24, "average": 38.36}, "MR-R1": {"0.5": 55.57, "0.55": 50.52, "0.6": 45.82, "0.65": 41.99, "0.7": 38.68, "0.75": 34.15, "0.8": 27.0, "0.85": 20.56, "0.9": 14.46, "0.95": 7.14}}, "middle": {"MR-mAP": {"0.5": 56.5, "0.55": 49.77, "0.6": 44.8, "0.65": 37.29, "0.7": 29.93, "0.75": 23.85, "0.8": 17.16, "0.85": 10.99, "0.9": 6.65, "0.95": 3.39, "average": 28.03}, "MR-R1": {"0.5": 46.08, "0.55": 40.86, "0.6": 36.68, "0.65": 30.62, "0.7": 24.97, "0.75": 20.48, "0.8": 15.26, "0.85": 10.03, "0.9": 6.17, "0.95": 3.24}}, "short": {"MR-mAP": {"0.5": 8.93, "0.55": 6.42, "0.6": 5.85, "0.65": 4.19, "0.7": 2.81, "0.75": 2.29, "0.8": 1.72, "0.85": 0.51, "0.9": 0.51, "0.95": 0.51, "average": 3.38}, "MR-R1": {"0.5": 5.13, "0.55": 3.03, "0.6": 2.1, "0.65": 1.63, "0.7": 1.17, "0.75": 0.7, "0.8": 0.47, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_14_32_00 [Epoch] 099 [Loss] loss_span 0.5694 loss_giou 0.5547 loss_label 0.6673 class_error 21.9861 loss_saliency 0.2260 loss_span_0 0.5956 loss_giou_0 0.5550 loss_label_0 0.6565 class_error_0 22.3623 loss_overall 3.8245 [Metrics] {"brief": {"MR-full-R1@0.5": 49.42, "MR-full-R1@0.7": 29.16, "MR-full-mAP": 28.99, "MR-full-mAP@0.5": 52.85, "MR-full-mAP@0.75": 26.89, "MR-long-mAP": 40.3, "MR-middle-mAP": 27.31, "MR-short-mAP": 2.86, "HL-min-Fair-mAP": 67.11, "HL-min-Fair-Hit1": 66.52, "HL-min-Good-mAP": 57.23, "HL-min-Good-Hit1": 64.32, "HL-min-VeryGood-mAP": 34.93, "HL-min-VeryGood-Hit1": 53.87}, "HL-min-Fair": {"HL-mAP": 67.11, "HL-Hit1": 66.52}, "HL-min-Good": {"HL-mAP": 57.23, "HL-Hit1": 64.32}, "HL-min-VeryGood": {"HL-mAP": 34.93, "HL-Hit1": 53.87}, "full": {"MR-mAP": {"0.5": 52.85, "0.55": 46.48, "0.6": 42.15, "0.65": 36.33, "0.7": 31.69, "0.75": 26.89, "0.8": 21.67, "0.85": 15.86, "0.9": 10.75, "0.95": 5.24, "average": 28.99}, "MR-R1": {"0.5": 49.42, "0.55": 43.48, "0.6": 38.97, "0.65": 33.29, "0.7": 29.16, "0.75": 24.97, "0.8": 20.45, "0.85": 14.9, "0.9": 10.0, "0.95": 4.52}}, "long": {"MR-mAP": {"0.5": 64.98, "0.55": 59.2, "0.6": 53.84, "0.65": 48.97, "0.7": 44.88, "0.75": 39.8, "0.8": 34.15, "0.85": 27.19, "0.9": 19.26, "0.95": 10.7, "average": 40.3}, "MR-R1": {"0.5": 56.62, "0.55": 50.52, "0.6": 45.47, "0.65": 41.11, "0.7": 37.46, "0.75": 33.28, "0.8": 29.09, "0.85": 23.34, "0.9": 16.2, "0.95": 8.71}}, "middle": {"MR-mAP": {"0.5": 54.92, "0.55": 47.92, "0.6": 42.92, "0.65": 35.42, "0.7": 29.71, "0.75": 23.96, "0.8": 17.68, "0.85": 11.33, "0.9": 6.98, "0.95": 2.29, "average": 27.31}, "MR-R1": {"0.5": 44.51, "0.55": 38.77, "0.6": 34.69, "0.65": 28.63, "0.7": 24.24, "0.75": 19.96, "0.8": 15.15, "0.85": 10.03, "0.9": 6.37, "0.95": 1.99}}, "short": {"MR-mAP": {"0.5": 7.78, "0.55": 5.06, "0.6": 4.72, "0.65": 3.23, "0.7": 2.51, "0.75": 2.31, "0.8": 1.79, "0.85": 0.39, "0.9": 0.39, "0.95": 0.39, "average": 2.86}, "MR-R1": {"0.5": 3.5, "0.55": 3.03, "0.6": 2.56, "0.65": 1.4, "0.7": 1.17, "0.75": 1.17, "0.8": 1.17, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_14_37_35 [Epoch] 104 [Loss] loss_span 0.5610 loss_giou 0.5396 loss_label 0.6858 class_error 21.1351 loss_saliency 0.2364 loss_span_0 0.5720 loss_giou_0 0.5504 loss_label_0 0.6311 class_error_0 19.8577 loss_overall 3.7763 [Metrics] {"brief": {"MR-full-R1@0.5": 51.23, "MR-full-R1@0.7": 31.16, "MR-full-mAP": 30.13, "MR-full-mAP@0.5": 53.68, "MR-full-mAP@0.75": 28.35, "MR-long-mAP": 39.74, "MR-middle-mAP": 29.72, "MR-short-mAP": 3.1, "HL-min-Fair-mAP": 67.88, "HL-min-Fair-Hit1": 68.13, "HL-min-Good-mAP": 57.76, "HL-min-Good-Hit1": 66.32, "HL-min-VeryGood-mAP": 35.18, "HL-min-VeryGood-Hit1": 54.9}, "HL-min-Fair": {"HL-mAP": 67.88, "HL-Hit1": 68.13}, "HL-min-Good": {"HL-mAP": 57.76, "HL-Hit1": 66.32}, "HL-min-VeryGood": {"HL-mAP": 35.18, "HL-Hit1": 54.9}, "full": {"MR-mAP": {"0.5": 53.68, "0.55": 48.03, "0.6": 43.69, "0.65": 37.51, "0.7": 33.27, "0.75": 28.35, "0.8": 22.95, "0.85": 16.27, "0.9": 11.39, "0.95": 6.13, "average": 30.13}, "MR-R1": {"0.5": 51.23, "0.55": 45.55, "0.6": 41.16, "0.65": 35.1, "0.7": 31.16, "0.75": 26.84, "0.8": 21.68, "0.85": 15.16, "0.9": 10.65, "0.95": 5.81}}, "long": {"MR-mAP": {"0.5": 62.52, "0.55": 58.34, "0.6": 53.23, "0.65": 48.57, "0.7": 44.68, "0.75": 40.41, "0.8": 35.05, "0.85": 25.9, "0.9": 18.0, "0.95": 10.65, "average": 39.74}, "MR-R1": {"0.5": 54.36, "0.55": 50.35, "0.6": 45.47, "0.65": 40.94, "0.7": 37.63, "0.75": 34.49, "0.8": 29.79, "0.85": 22.13, "0.9": 15.16, "0.95": 9.58}}, "middle": {"MR-mAP": {"0.5": 57.76, "0.55": 50.73, "0.6": 45.72, "0.65": 37.95, "0.7": 33.05, "0.75": 26.34, "0.8": 19.75, "0.85": 13.02, "0.9": 8.96, "0.95": 3.94, "average": 29.72}, "MR-R1": {"0.5": 48.07, "0.55": 42.11, "0.6": 38.24, "0.65": 31.56, "0.7": 27.59, "0.75": 22.57, "0.8": 17.24, "0.85": 11.29, "0.9": 8.15, "0.95": 3.66}}, "short": {"MR-mAP": {"0.5": 9.08, "0.55": 5.9, "0.6": 5.1, "0.65": 3.53, "0.7": 2.21, "0.75": 2.05, "0.8": 1.44, "0.85": 0.57, "0.9": 0.57, "0.95": 0.57, "average": 3.1}, "MR-R1": {"0.5": 5.13, "0.55": 3.26, "0.6": 2.56, "0.65": 1.63, "0.7": 0.7, "0.75": 0.47, "0.8": 0.0, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_14_43_06 [Epoch] 109 [Loss] loss_span 0.5730 loss_giou 0.5507 loss_label 0.7272 class_error 22.4184 loss_saliency 0.2332 loss_span_0 0.5875 loss_giou_0 0.5529 loss_label_0 0.6475 class_error_0 21.0924 loss_overall 3.8719 [Metrics] {"brief": {"MR-full-R1@0.5": 51.68, "MR-full-R1@0.7": 32.06, "MR-full-mAP": 29.79, "MR-full-mAP@0.5": 53.58, "MR-full-mAP@0.75": 27.59, "MR-long-mAP": 41.61, "MR-middle-mAP": 27.97, "MR-short-mAP": 3.16, "HL-min-Fair-mAP": 67.6, "HL-min-Fair-Hit1": 67.68, "HL-min-Good-mAP": 57.46, "HL-min-Good-Hit1": 65.81, "HL-min-VeryGood-mAP": 35.35, "HL-min-VeryGood-Hit1": 55.55}, "HL-min-Fair": {"HL-mAP": 67.6, "HL-Hit1": 67.68}, "HL-min-Good": {"HL-mAP": 57.46, "HL-Hit1": 65.81}, "HL-min-VeryGood": {"HL-mAP": 35.35, "HL-Hit1": 55.55}, "full": {"MR-mAP": {"0.5": 53.58, "0.55": 47.74, "0.6": 43.33, "0.65": 37.71, "0.7": 32.71, "0.75": 27.59, "0.8": 22.51, "0.85": 16.44, "0.9": 10.95, "0.95": 5.33, "average": 29.79}, "MR-R1": {"0.5": 51.68, "0.55": 46.26, "0.6": 42.0, "0.65": 36.77, "0.7": 32.06, "0.75": 27.03, "0.8": 22.32, "0.85": 16.19, "0.9": 10.84, "0.95": 5.03}}, "long": {"MR-mAP": {"0.5": 64.6, "0.55": 60.13, "0.6": 55.71, "0.65": 50.59, "0.7": 46.77, "0.75": 41.33, "0.8": 36.5, "0.85": 28.73, "0.9": 20.78, "0.95": 11.01, "average": 41.61}, "MR-R1": {"0.5": 57.67, "0.55": 53.48, "0.6": 49.13, "0.65": 44.08, "0.7": 41.11, "0.75": 36.41, "0.8": 32.4, "0.85": 25.61, "0.9": 18.47, "0.95": 9.76}}, "middle": {"MR-mAP": {"0.5": 56.31, "0.55": 48.82, "0.6": 43.19, "0.65": 36.77, "0.7": 30.27, "0.75": 24.35, "0.8": 18.11, "0.85": 12.24, "0.9": 7.04, "0.95": 2.56, "average": 27.97}, "MR-R1": {"0.5": 46.71, "0.55": 41.07, "0.6": 37.1, "0.65": 32.08, "0.7": 26.54, "0.75": 21.53, "0.8": 16.41, "0.85": 10.87, "0.9": 6.48, "0.95": 2.3}}, "short": {"MR-mAP": {"0.5": 8.77, "0.55": 6.01, "0.6": 5.24, "0.65": 3.93, "0.7": 2.65, "0.75": 2.13, "0.8": 1.41, "0.85": 0.47, "0.9": 0.47, "0.95": 0.47, "average": 3.16}, "MR-R1": {"0.5": 5.36, "0.55": 3.96, "0.6": 3.26, "0.65": 2.33, "0.7": 1.63, "0.75": 0.93, "0.8": 0.7, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_14_48_29 [Epoch] 114 [Loss] loss_span 0.5579 loss_giou 0.5424 loss_label 0.6750 class_error 21.2955 loss_saliency 0.2273 loss_span_0 0.5762 loss_giou_0 0.5481 loss_label_0 0.6629 class_error_0 20.3352 loss_overall 3.7899 [Metrics] {"brief": {"MR-full-R1@0.5": 51.42, "MR-full-R1@0.7": 32.39, "MR-full-mAP": 30.29, "MR-full-mAP@0.5": 53.62, "MR-full-mAP@0.75": 28.51, "MR-long-mAP": 41.37, "MR-middle-mAP": 29.19, "MR-short-mAP": 2.93, "HL-min-Fair-mAP": 67.61, "HL-min-Fair-Hit1": 67.81, "HL-min-Good-mAP": 57.41, "HL-min-Good-Hit1": 66.26, "HL-min-VeryGood-mAP": 35.17, "HL-min-VeryGood-Hit1": 55.55}, "HL-min-Fair": {"HL-mAP": 67.61, "HL-Hit1": 67.81}, "HL-min-Good": {"HL-mAP": 57.41, "HL-Hit1": 66.26}, "HL-min-VeryGood": {"HL-mAP": 35.17, "HL-Hit1": 55.55}, "full": {"MR-mAP": {"0.5": 53.62, "0.55": 47.83, "0.6": 44.03, "0.65": 38.59, "0.7": 33.87, "0.75": 28.51, "0.8": 23.13, "0.85": 16.71, "0.9": 11.13, "0.95": 5.51, "average": 30.29}, "MR-R1": {"0.5": 51.42, "0.55": 45.87, "0.6": 42.13, "0.65": 36.77, "0.7": 32.39, "0.75": 27.55, "0.8": 22.9, "0.85": 16.45, "0.9": 10.65, "0.95": 4.9}}, "long": {"MR-mAP": {"0.5": 64.33, "0.55": 59.24, "0.6": 55.21, "0.65": 50.89, "0.7": 46.84, "0.75": 42.03, "0.8": 36.36, "0.85": 28.51, "0.9": 19.73, "0.95": 10.55, "average": 41.37}, "MR-R1": {"0.5": 56.97, "0.55": 52.44, "0.6": 48.61, "0.65": 44.43, "0.7": 41.11, "0.75": 36.93, "0.8": 32.4, "0.85": 25.26, "0.9": 17.42, "0.95": 9.23}}, "middle": {"MR-mAP": {"0.5": 56.9, "0.55": 50.27, "0.6": 45.98, "0.65": 38.44, "0.7": 32.61, "0.75": 25.69, "0.8": 19.24, "0.85": 12.51, "0.9": 7.53, "0.95": 2.7, "average": 29.19}, "MR-R1": {"0.5": 46.81, "0.55": 41.38, "0.6": 38.14, "0.65": 32.5, "0.7": 27.59, "0.75": 22.26, "0.8": 17.55, "0.85": 11.39, "0.9": 6.69, "0.95": 2.3}}, "short": {"MR-mAP": {"0.5": 8.43, "0.55": 5.29, "0.6": 4.3, "0.65": 3.14, "0.7": 2.03, "0.75": 1.98, "0.8": 1.55, "0.85": 0.87, "0.9": 0.87, "0.95": 0.87, "average": 2.93}, "MR-R1": {"0.5": 5.13, "0.55": 3.26, "0.6": 2.1, "0.65": 0.93, "0.7": 0.47, "0.75": 0.47, "0.8": 0.23, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_14_53_58 [Epoch] 119 [Loss] loss_span 0.5592 loss_giou 0.5568 loss_label 0.7342 class_error 22.2560 loss_saliency 0.2334 loss_span_0 0.5724 loss_giou_0 0.5574 loss_label_0 0.6654 class_error_0 21.0825 loss_overall 3.8787 [Metrics] {"brief": {"MR-full-R1@0.5": 51.23, "MR-full-R1@0.7": 31.68, "MR-full-mAP": 29.93, "MR-full-mAP@0.5": 53.57, "MR-full-mAP@0.75": 28.58, "MR-long-mAP": 39.57, "MR-middle-mAP": 29.24, "MR-short-mAP": 2.99, "HL-min-Fair-mAP": 67.82, "HL-min-Fair-Hit1": 68.06, "HL-min-Good-mAP": 57.73, "HL-min-Good-Hit1": 65.94, "HL-min-VeryGood-mAP": 35.14, "HL-min-VeryGood-Hit1": 55.23}, "HL-min-Fair": {"HL-mAP": 67.82, "HL-Hit1": 68.06}, "HL-min-Good": {"HL-mAP": 57.73, "HL-Hit1": 65.94}, "HL-min-VeryGood": {"HL-mAP": 35.14, "HL-Hit1": 55.23}, "full": {"MR-mAP": {"0.5": 53.57, "0.55": 47.94, "0.6": 43.88, "0.65": 37.95, "0.7": 33.19, "0.75": 28.58, "0.8": 21.79, "0.85": 15.72, "0.9": 10.95, "0.95": 5.73, "average": 29.93}, "MR-R1": {"0.5": 51.23, "0.55": 45.87, "0.6": 41.87, "0.65": 35.81, "0.7": 31.68, "0.75": 27.35, "0.8": 20.97, "0.85": 14.97, "0.9": 10.52, "0.95": 5.42}}, "long": {"MR-mAP": {"0.5": 63.52, "0.55": 57.97, "0.6": 54.17, "0.65": 48.91, "0.7": 43.78, "0.75": 39.88, "0.8": 32.89, "0.85": 25.2, "0.9": 18.32, "0.95": 11.02, "average": 39.57}, "MR-R1": {"0.5": 55.92, "0.55": 50.17, "0.6": 46.34, "0.65": 41.11, "0.7": 37.11, "0.75": 34.32, "0.8": 28.22, "0.85": 20.91, "0.9": 15.68, "0.95": 9.76}}, "middle": {"MR-mAP": {"0.5": 56.41, "0.55": 50.61, "0.6": 45.6, "0.65": 38.12, "0.7": 32.68, "0.75": 26.7, "0.8": 18.67, "0.85": 12.78, "0.9": 7.98, "0.95": 2.89, "average": 29.24}, "MR-R1": {"0.5": 47.02, "0.55": 42.63, "0.6": 38.87, "0.65": 32.71, "0.7": 28.63, "0.75": 23.51, "0.8": 16.82, "0.85": 11.49, "0.9": 7.42, "0.95": 2.72}}, "short": {"MR-mAP": {"0.5": 9.07, "0.55": 5.52, "0.6": 4.46, "0.65": 3.16, "0.7": 2.15, "0.75": 1.94, "0.8": 1.5, "0.85": 0.69, "0.9": 0.69, "0.95": 0.69, "average": 2.99}, "MR-R1": {"0.5": 5.36, "0.55": 3.5, "0.6": 2.56, "0.65": 1.4, "0.7": 0.93, "0.75": 0.47, "0.8": 0.47, "0.85": 0.47, "0.9": 0.47, "0.95": 0.47}}}
+2021_08_04_14_59_23 [Epoch] 124 [Loss] loss_span 0.5859 loss_giou 0.5526 loss_label 0.8002 class_error 23.7492 loss_saliency 0.2241 loss_span_0 0.6056 loss_giou_0 0.5505 loss_label_0 0.7723 class_error_0 25.1843 loss_overall 4.0912 [Metrics] {"brief": {"MR-full-R1@0.5": 51.74, "MR-full-R1@0.7": 31.81, "MR-full-mAP": 30.23, "MR-full-mAP@0.5": 54.02, "MR-full-mAP@0.75": 28.68, "MR-long-mAP": 41.6, "MR-middle-mAP": 28.78, "MR-short-mAP": 2.73, "HL-min-Fair-mAP": 68.29, "HL-min-Fair-Hit1": 68.97, "HL-min-Good-mAP": 57.96, "HL-min-Good-Hit1": 67.16, "HL-min-VeryGood-mAP": 35.64, "HL-min-VeryGood-Hit1": 56.84}, "HL-min-Fair": {"HL-mAP": 68.29, "HL-Hit1": 68.97}, "HL-min-Good": {"HL-mAP": 57.96, "HL-Hit1": 67.16}, "HL-min-VeryGood": {"HL-mAP": 35.64, "HL-Hit1": 56.84}, "full": {"MR-mAP": {"0.5": 54.02, "0.55": 48.19, "0.6": 44.42, "0.65": 38.6, "0.7": 33.02, "0.75": 28.68, "0.8": 22.15, "0.85": 16.47, "0.9": 11.04, "0.95": 5.68, "average": 30.23}, "MR-R1": {"0.5": 51.74, "0.55": 46.26, "0.6": 42.39, "0.65": 36.77, "0.7": 31.81, "0.75": 27.55, "0.8": 21.1, "0.85": 15.68, "0.9": 10.39, "0.95": 5.1}}, "long": {"MR-mAP": {"0.5": 64.45, "0.55": 59.74, "0.6": 55.94, "0.65": 51.29, "0.7": 46.95, "0.75": 42.56, "0.8": 36.27, "0.85": 28.91, "0.9": 19.46, "0.95": 10.39, "average": 41.6}, "MR-R1": {"0.5": 56.45, "0.55": 52.09, "0.6": 48.61, "0.65": 43.9, "0.7": 40.07, "0.75": 36.41, "0.8": 31.18, "0.85": 25.26, "0.9": 16.9, "0.95": 8.89}}, "middle": {"MR-mAP": {"0.5": 57.22, "0.55": 50.21, "0.6": 45.76, "0.65": 38.33, "0.7": 31.08, "0.75": 25.57, "0.8": 17.37, "0.85": 11.66, "0.9": 7.42, "0.95": 3.17, "average": 28.78}, "MR-R1": {"0.5": 47.44, "0.55": 41.69, "0.6": 38.04, "0.65": 31.87, "0.7": 26.44, "0.75": 22.15, "0.8": 15.15, "0.85": 10.24, "0.9": 6.69, "0.95": 2.93}}, "short": {"MR-mAP": {"0.5": 8.15, "0.55": 5.5, "0.6": 4.59, "0.65": 3.3, "0.7": 2.08, "0.75": 1.64, "0.8": 1.05, "0.85": 0.32, "0.9": 0.32, "0.95": 0.32, "average": 2.73}, "MR-R1": {"0.5": 5.59, "0.55": 4.43, "0.6": 3.26, "0.65": 3.03, "0.7": 2.33, "0.75": 1.4, "0.8": 0.7, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_15_04_47 [Epoch] 129 [Loss] loss_span 0.5800 loss_giou 0.5514 loss_label 0.8848 class_error 24.3366 loss_saliency 0.2315 loss_span_0 0.6031 loss_giou_0 0.5608 loss_label_0 0.8139 class_error_0 24.3083 loss_overall 4.2256 [Metrics] {"brief": {"MR-full-R1@0.5": 50.39, "MR-full-R1@0.7": 30.32, "MR-full-mAP": 29.31, "MR-full-mAP@0.5": 53.22, "MR-full-mAP@0.75": 27.37, "MR-long-mAP": 40.2, "MR-middle-mAP": 27.65, "MR-short-mAP": 3.15, "HL-min-Fair-mAP": 68.27, "HL-min-Fair-Hit1": 69.23, "HL-min-Good-mAP": 57.89, "HL-min-Good-Hit1": 67.23, "HL-min-VeryGood-mAP": 35.57, "HL-min-VeryGood-Hit1": 56.26}, "HL-min-Fair": {"HL-mAP": 68.27, "HL-Hit1": 69.23}, "HL-min-Good": {"HL-mAP": 57.89, "HL-Hit1": 67.23}, "HL-min-VeryGood": {"HL-mAP": 35.57, "HL-Hit1": 56.26}, "full": {"MR-mAP": {"0.5": 53.22, "0.55": 47.79, "0.6": 43.28, "0.65": 37.52, "0.7": 32.21, "0.75": 27.37, "0.8": 21.42, "0.85": 15.11, "0.9": 9.58, "0.95": 5.6, "average": 29.31}, "MR-R1": {"0.5": 50.39, "0.55": 45.35, "0.6": 40.84, "0.65": 35.23, "0.7": 30.32, "0.75": 25.61, "0.8": 20.26, "0.85": 14.19, "0.9": 8.9, "0.95": 5.16}}, "long": {"MR-mAP": {"0.5": 62.91, "0.55": 59.53, "0.6": 55.11, "0.65": 51.0, "0.7": 46.06, "0.75": 41.36, "0.8": 33.88, "0.85": 25.67, "0.9": 16.13, "0.95": 10.38, "average": 40.2}, "MR-R1": {"0.5": 55.23, "0.55": 51.92, "0.6": 48.43, "0.65": 44.77, "0.7": 40.24, "0.75": 36.59, "0.8": 29.97, "0.85": 22.47, "0.9": 13.94, "0.95": 9.23}}, "middle": {"MR-mAP": {"0.5": 56.32, "0.55": 49.61, "0.6": 44.06, "0.65": 35.75, "0.7": 29.38, "0.75": 23.26, "0.8": 16.88, "0.85": 11.11, "0.9": 6.94, "0.95": 3.22, "average": 27.65}, "MR-R1": {"0.5": 46.08, "0.55": 40.75, "0.6": 36.15, "0.65": 29.47, "0.7": 24.35, "0.75": 19.02, "0.8": 14.32, "0.85": 9.4, "0.9": 5.96, "0.95": 2.72}}, "short": {"MR-mAP": {"0.5": 8.71, "0.55": 5.68, "0.6": 4.74, "0.65": 3.63, "0.7": 2.86, "0.75": 2.58, "0.8": 1.97, "0.85": 0.44, "0.9": 0.44, "0.95": 0.44, "average": 3.15}, "MR-R1": {"0.5": 5.36, "0.55": 3.5, "0.6": 2.1, "0.65": 1.63, "0.7": 1.4, "0.75": 1.17, "0.8": 1.17, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_15_10_10 [Epoch] 134 [Loss] loss_span 0.5682 loss_giou 0.5481 loss_label 0.7586 class_error 21.9846 loss_saliency 0.2314 loss_span_0 0.5841 loss_giou_0 0.5606 loss_label_0 0.7285 class_error_0 22.3826 loss_overall 3.9796 [Metrics] {"brief": {"MR-full-R1@0.5": 50.9, "MR-full-R1@0.7": 31.74, "MR-full-mAP": 30.02, "MR-full-mAP@0.5": 53.52, "MR-full-mAP@0.75": 28.51, "MR-long-mAP": 40.55, "MR-middle-mAP": 28.88, "MR-short-mAP": 3.09, "HL-min-Fair-mAP": 67.87, "HL-min-Fair-Hit1": 68.52, "HL-min-Good-mAP": 57.63, "HL-min-Good-Hit1": 66.32, "HL-min-VeryGood-mAP": 35.33, "HL-min-VeryGood-Hit1": 55.87}, "HL-min-Fair": {"HL-mAP": 67.87, "HL-Hit1": 68.52}, "HL-min-Good": {"HL-mAP": 57.63, "HL-Hit1": 66.32}, "HL-min-VeryGood": {"HL-mAP": 35.33, "HL-Hit1": 55.87}, "full": {"MR-mAP": {"0.5": 53.52, "0.55": 47.69, "0.6": 43.85, "0.65": 38.05, "0.7": 33.56, "0.75": 28.51, "0.8": 22.82, "0.85": 16.46, "0.9": 10.45, "0.95": 5.32, "average": 30.02}, "MR-R1": {"0.5": 50.9, "0.55": 45.29, "0.6": 41.68, "0.65": 36.0, "0.7": 31.74, "0.75": 27.03, "0.8": 22.0, "0.85": 15.74, "0.9": 10.06, "0.95": 5.03}}, "long": {"MR-mAP": {"0.5": 64.54, "0.55": 59.19, "0.6": 55.06, "0.65": 49.78, "0.7": 44.85, "0.75": 40.58, "0.8": 36.09, "0.85": 26.95, "0.9": 17.46, "0.95": 11.02, "average": 40.55}, "MR-R1": {"0.5": 56.45, "0.55": 51.05, "0.6": 47.39, "0.65": 42.51, "0.7": 38.33, "0.75": 34.84, "0.8": 31.71, "0.85": 23.52, "0.9": 14.98, "0.95": 9.93}}, "middle": {"MR-mAP": {"0.5": 56.15, "0.55": 49.63, "0.6": 44.76, "0.65": 37.94, "0.7": 32.95, "0.75": 26.12, "0.8": 18.56, "0.85": 12.81, "0.9": 7.55, "0.95": 2.35, "average": 28.88}, "MR-R1": {"0.5": 46.08, "0.55": 40.75, "0.6": 37.3, "0.65": 31.77, "0.7": 27.9, "0.75": 22.47, "0.8": 16.2, "0.85": 11.29, "0.9": 7.21, "0.95": 2.09}}, "short": {"MR-mAP": {"0.5": 8.41, "0.55": 5.82, "0.6": 5.34, "0.65": 3.77, "0.7": 2.21, "0.75": 1.98, "0.8": 1.45, "0.85": 0.62, "0.9": 0.62, "0.95": 0.62, "average": 3.09}, "MR-R1": {"0.5": 5.59, "0.55": 4.43, "0.6": 3.96, "0.65": 2.33, "0.7": 1.17, "0.75": 0.93, "0.8": 0.93, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_15_15_34 [Epoch] 139 [Loss] loss_span 0.5758 loss_giou 0.5644 loss_label 0.9191 class_error 26.4119 loss_saliency 0.2407 loss_span_0 0.5829 loss_giou_0 0.5716 loss_label_0 0.8582 class_error_0 25.5424 loss_overall 4.3128 [Metrics] {"brief": {"MR-full-R1@0.5": 50.0, "MR-full-R1@0.7": 30.39, "MR-full-mAP": 28.28, "MR-full-mAP@0.5": 51.78, "MR-full-mAP@0.75": 26.69, "MR-long-mAP": 38.79, "MR-middle-mAP": 27.12, "MR-short-mAP": 3.03, "HL-min-Fair-mAP": 67.84, "HL-min-Fair-Hit1": 68.13, "HL-min-Good-mAP": 57.53, "HL-min-Good-Hit1": 66.19, "HL-min-VeryGood-mAP": 35.2, "HL-min-VeryGood-Hit1": 56.19}, "HL-min-Fair": {"HL-mAP": 67.84, "HL-Hit1": 68.13}, "HL-min-Good": {"HL-mAP": 57.53, "HL-Hit1": 66.19}, "HL-min-VeryGood": {"HL-mAP": 35.2, "HL-Hit1": 56.19}, "full": {"MR-mAP": {"0.5": 51.78, "0.55": 46.07, "0.6": 40.82, "0.65": 35.49, "0.7": 30.74, "0.75": 26.69, "0.8": 20.53, "0.85": 15.42, "0.9": 9.81, "0.95": 5.41, "average": 28.28}, "MR-R1": {"0.5": 50.0, "0.55": 44.84, "0.6": 39.94, "0.65": 34.77, "0.7": 30.39, "0.75": 26.32, "0.8": 20.26, "0.85": 15.42, "0.9": 9.74, "0.95": 5.29}}, "long": {"MR-mAP": {"0.5": 63.04, "0.55": 58.65, "0.6": 53.28, "0.65": 48.27, "0.7": 43.11, "0.75": 39.2, "0.8": 32.32, "0.85": 24.19, "0.9": 15.83, "0.95": 10.0, "average": 38.79}, "MR-R1": {"0.5": 55.92, "0.55": 52.26, "0.6": 46.69, "0.65": 42.51, "0.7": 38.33, "0.75": 35.19, "0.8": 29.09, "0.85": 21.95, "0.9": 14.63, "0.95": 9.23}}, "middle": {"MR-mAP": {"0.5": 54.05, "0.55": 47.26, "0.6": 41.04, "0.65": 34.52, "0.7": 29.18, "0.75": 24.05, "0.8": 17.17, "0.85": 12.93, "0.9": 7.69, "0.95": 3.25, "average": 27.12}, "MR-R1": {"0.5": 44.83, "0.55": 39.6, "0.6": 35.11, "0.65": 29.78, "0.7": 25.29, "0.75": 21.11, "0.8": 15.15, "0.85": 11.7, "0.9": 6.9, "0.95": 2.93}}, "short": {"MR-mAP": {"0.5": 8.39, "0.55": 5.48, "0.6": 4.79, "0.65": 3.46, "0.7": 2.51, "0.75": 2.14, "0.8": 1.35, "0.85": 0.71, "0.9": 0.71, "0.95": 0.71, "average": 3.03}, "MR-R1": {"0.5": 5.83, "0.55": 3.73, "0.6": 3.5, "0.65": 2.33, "0.7": 2.1, "0.75": 0.93, "0.8": 0.47, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_15_21_06 [Epoch] 144 [Loss] loss_span 0.5701 loss_giou 0.5510 loss_label 0.8712 class_error 24.8251 loss_saliency 0.2277 loss_span_0 0.5805 loss_giou_0 0.5574 loss_label_0 0.8076 class_error_0 25.1642 loss_overall 4.1656 [Metrics] {"brief": {"MR-full-R1@0.5": 51.94, "MR-full-R1@0.7": 30.77, "MR-full-mAP": 29.27, "MR-full-mAP@0.5": 53.35, "MR-full-mAP@0.75": 27.11, "MR-long-mAP": 40.35, "MR-middle-mAP": 27.89, "MR-short-mAP": 3.07, "HL-min-Fair-mAP": 67.97, "HL-min-Fair-Hit1": 68.52, "HL-min-Good-mAP": 57.77, "HL-min-Good-Hit1": 66.13, "HL-min-VeryGood-mAP": 35.15, "HL-min-VeryGood-Hit1": 55.29}, "HL-min-Fair": {"HL-mAP": 67.97, "HL-Hit1": 68.52}, "HL-min-Good": {"HL-mAP": 57.77, "HL-Hit1": 66.13}, "HL-min-VeryGood": {"HL-mAP": 35.15, "HL-Hit1": 55.29}, "full": {"MR-mAP": {"0.5": 53.35, "0.55": 47.19, "0.6": 42.51, "0.65": 36.52, "0.7": 31.99, "0.75": 27.11, "0.8": 21.64, "0.85": 16.3, "0.9": 10.75, "0.95": 5.31, "average": 29.27}, "MR-R1": {"0.5": 51.94, "0.55": 46.26, "0.6": 41.42, "0.65": 34.9, "0.7": 30.77, "0.75": 26.71, "0.8": 21.23, "0.85": 15.94, "0.9": 10.71, "0.95": 5.03}}, "long": {"MR-mAP": {"0.5": 63.46, "0.55": 59.22, "0.6": 53.77, "0.65": 49.13, "0.7": 44.97, "0.75": 41.04, "0.8": 33.61, "0.85": 27.49, "0.9": 19.48, "0.95": 11.31, "average": 40.35}, "MR-R1": {"0.5": 55.92, "0.55": 52.09, "0.6": 46.69, "0.65": 42.16, "0.7": 38.85, "0.75": 36.41, "0.8": 29.62, "0.85": 24.22, "0.9": 17.42, "0.95": 10.1}}, "middle": {"MR-mAP": {"0.5": 56.63, "0.55": 48.85, "0.6": 43.58, "0.65": 35.59, "0.7": 30.1, "0.75": 23.8, "0.8": 18.3, "0.85": 12.44, "0.9": 7.47, "0.95": 2.09, "average": 27.89}, "MR-R1": {"0.5": 47.44, "0.55": 41.27, "0.6": 36.99, "0.65": 30.2, "0.7": 25.91, "0.75": 20.79, "0.8": 16.2, "0.85": 11.18, "0.9": 6.79, "0.95": 1.99}}, "short": {"MR-mAP": {"0.5": 8.71, "0.55": 5.74, "0.6": 5.12, "0.65": 3.16, "0.7": 2.22, "0.75": 2.02, "0.8": 1.3, "0.85": 0.8, "0.9": 0.8, "0.95": 0.8, "average": 3.07}, "MR-R1": {"0.5": 6.99, "0.55": 5.36, "0.6": 4.66, "0.65": 2.33, "0.7": 1.4, "0.75": 1.4, "0.8": 0.93, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_15_26_35 [Epoch] 149 [Loss] loss_span 0.5704 loss_giou 0.5610 loss_label 0.8380 class_error 23.7072 loss_saliency 0.2299 loss_span_0 0.5957 loss_giou_0 0.5653 loss_label_0 0.7653 class_error_0 22.2490 loss_overall 4.1257 [Metrics] {"brief": {"MR-full-R1@0.5": 51.74, "MR-full-R1@0.7": 31.61, "MR-full-mAP": 29.41, "MR-full-mAP@0.5": 52.78, "MR-full-mAP@0.75": 27.72, "MR-long-mAP": 40.99, "MR-middle-mAP": 27.8, "MR-short-mAP": 3.07, "HL-min-Fair-mAP": 67.89, "HL-min-Fair-Hit1": 67.94, "HL-min-Good-mAP": 57.74, "HL-min-Good-Hit1": 66.0, "HL-min-VeryGood-mAP": 35.45, "HL-min-VeryGood-Hit1": 56.19}, "HL-min-Fair": {"HL-mAP": 67.89, "HL-Hit1": 67.94}, "HL-min-Good": {"HL-mAP": 57.74, "HL-Hit1": 66.0}, "HL-min-VeryGood": {"HL-mAP": 35.45, "HL-Hit1": 56.19}, "full": {"MR-mAP": {"0.5": 52.78, "0.55": 46.41, "0.6": 42.18, "0.65": 36.89, "0.7": 32.44, "0.75": 27.72, "0.8": 21.82, "0.85": 16.68, "0.9": 11.21, "0.95": 5.95, "average": 29.41}, "MR-R1": {"0.5": 51.74, "0.55": 45.74, "0.6": 41.48, "0.65": 36.0, "0.7": 31.61, "0.75": 26.9, "0.8": 20.9, "0.85": 16.39, "0.9": 10.65, "0.95": 5.48}}, "long": {"MR-mAP": {"0.5": 63.41, "0.55": 58.93, "0.6": 54.77, "0.65": 49.89, "0.7": 46.34, "0.75": 41.84, "0.8": 35.36, "0.85": 28.64, "0.9": 19.26, "0.95": 11.47, "average": 40.99}, "MR-R1": {"0.5": 56.62, "0.55": 52.26, "0.6": 48.43, "0.65": 43.9, "0.7": 40.94, "0.75": 36.93, "0.8": 30.66, "0.85": 25.09, "0.9": 16.55, "0.95": 9.93}}, "middle": {"MR-mAP": {"0.5": 55.83, "0.55": 47.98, "0.6": 42.85, "0.65": 35.89, "0.7": 29.96, "0.75": 24.08, "0.8": 17.58, "0.85": 12.64, "0.9": 8.04, "0.95": 3.13, "average": 27.8}, "MR-R1": {"0.5": 47.44, "0.55": 41.17, "0.6": 36.99, "0.65": 31.24, "0.7": 26.12, "0.75": 21.0, "0.8": 15.26, "0.85": 11.29, "0.9": 7.11, "0.95": 2.72}}, "short": {"MR-mAP": {"0.5": 8.6, "0.55": 5.76, "0.6": 4.94, "0.65": 3.98, "0.7": 2.42, "0.75": 2.11, "0.8": 1.23, "0.85": 0.55, "0.9": 0.55, "0.95": 0.55, "average": 3.07}, "MR-R1": {"0.5": 5.36, "0.55": 3.5, "0.6": 2.56, "0.65": 1.63, "0.7": 1.17, "0.75": 0.93, "0.8": 0.47, "0.85": 0.47, "0.9": 0.47, "0.95": 0.47}}}
+2021_08_04_15_32_11 [Epoch] 154 [Loss] loss_span 0.5749 loss_giou 0.5762 loss_label 0.8996 class_error 24.6712 loss_saliency 0.2324 loss_span_0 0.5934 loss_giou_0 0.5855 loss_label_0 0.8649 class_error_0 25.4092 loss_overall 4.3268 [Metrics] {"brief": {"MR-full-R1@0.5": 51.81, "MR-full-R1@0.7": 31.68, "MR-full-mAP": 28.93, "MR-full-mAP@0.5": 52.76, "MR-full-mAP@0.75": 26.85, "MR-long-mAP": 39.38, "MR-middle-mAP": 27.46, "MR-short-mAP": 3.22, "HL-min-Fair-mAP": 67.66, "HL-min-Fair-Hit1": 66.77, "HL-min-Good-mAP": 57.4, "HL-min-Good-Hit1": 64.58, "HL-min-VeryGood-mAP": 35.11, "HL-min-VeryGood-Hit1": 55.1}, "HL-min-Fair": {"HL-mAP": 67.66, "HL-Hit1": 66.77}, "HL-min-Good": {"HL-mAP": 57.4, "HL-Hit1": 64.58}, "HL-min-VeryGood": {"HL-mAP": 35.11, "HL-Hit1": 55.1}, "full": {"MR-mAP": {"0.5": 52.76, "0.55": 46.0, "0.6": 42.03, "0.65": 37.09, "0.7": 32.33, "0.75": 26.85, "0.8": 20.71, "0.85": 15.51, "0.9": 10.12, "0.95": 5.92, "average": 28.93}, "MR-R1": {"0.5": 51.81, "0.55": 45.68, "0.6": 41.81, "0.65": 36.45, "0.7": 31.68, "0.75": 26.06, "0.8": 20.0, "0.85": 15.1, "0.9": 10.06, "0.95": 5.74}}, "long": {"MR-mAP": {"0.5": 61.65, "0.55": 56.84, "0.6": 53.27, "0.65": 48.69, "0.7": 44.71, "0.75": 39.18, "0.8": 32.87, "0.85": 26.51, "0.9": 18.18, "0.95": 11.88, "average": 39.38}, "MR-R1": {"0.5": 55.23, "0.55": 50.7, "0.6": 47.04, "0.65": 42.86, "0.7": 39.37, "0.75": 34.15, "0.8": 28.4, "0.85": 23.0, "0.9": 16.38, "0.95": 10.8}}, "middle": {"MR-mAP": {"0.5": 55.64, "0.55": 47.79, "0.6": 42.83, "0.65": 36.58, "0.7": 30.7, "0.75": 23.88, "0.8": 16.59, "0.85": 11.27, "0.9": 6.54, "0.95": 2.77, "average": 27.46}, "MR-R1": {"0.5": 47.75, "0.55": 41.69, "0.6": 37.83, "0.65": 32.39, "0.7": 27.06, "0.75": 21.21, "0.8": 14.84, "0.85": 10.34, "0.9": 6.17, "0.95": 2.51}}, "short": {"MR-mAP": {"0.5": 9.29, "0.55": 5.43, "0.6": 4.79, "0.65": 3.41, "0.7": 2.58, "0.75": 2.38, "0.8": 1.92, "0.85": 0.79, "0.9": 0.79, "0.95": 0.79, "average": 3.22}, "MR-R1": {"0.5": 6.76, "0.55": 4.2, "0.6": 3.73, "0.65": 2.1, "0.7": 1.4, "0.75": 1.17, "0.8": 1.17, "0.85": 0.7, "0.9": 0.7, "0.95": 0.7}}}
+2021_08_04_15_37_41 [Epoch] 159 [Loss] loss_span 0.5667 loss_giou 0.5632 loss_label 0.8335 class_error 21.8606 loss_saliency 0.2354 loss_span_0 0.5713 loss_giou_0 0.5630 loss_label_0 0.7501 class_error_0 21.8055 loss_overall 4.0832 [Metrics] {"brief": {"MR-full-R1@0.5": 50.65, "MR-full-R1@0.7": 31.03, "MR-full-mAP": 28.84, "MR-full-mAP@0.5": 52.79, "MR-full-mAP@0.75": 27.06, "MR-long-mAP": 39.64, "MR-middle-mAP": 27.07, "MR-short-mAP": 3.87, "HL-min-Fair-mAP": 67.55, "HL-min-Fair-Hit1": 68.26, "HL-min-Good-mAP": 57.32, "HL-min-Good-Hit1": 66.32, "HL-min-VeryGood-mAP": 35.09, "HL-min-VeryGood-Hit1": 55.55}, "HL-min-Fair": {"HL-mAP": 67.55, "HL-Hit1": 68.26}, "HL-min-Good": {"HL-mAP": 57.32, "HL-Hit1": 66.32}, "HL-min-VeryGood": {"HL-mAP": 35.09, "HL-Hit1": 55.55}, "full": {"MR-mAP": {"0.5": 52.79, "0.55": 46.67, "0.6": 42.61, "0.65": 36.85, "0.7": 31.96, "0.75": 27.06, "0.8": 20.46, "0.85": 14.63, "0.9": 9.96, "0.95": 5.45, "average": 28.84}, "MR-R1": {"0.5": 50.65, "0.55": 44.97, "0.6": 40.9, "0.65": 35.68, "0.7": 31.03, "0.75": 26.45, "0.8": 20.32, "0.85": 14.52, "0.9": 9.87, "0.95": 5.1}}, "long": {"MR-mAP": {"0.5": 63.21, "0.55": 58.99, "0.6": 53.38, "0.65": 49.18, "0.7": 44.69, "0.75": 39.69, "0.8": 33.48, "0.85": 25.43, "0.9": 18.0, "0.95": 10.36, "average": 39.64}, "MR-R1": {"0.5": 56.79, "0.55": 52.79, "0.6": 47.21, "0.65": 43.03, "0.7": 39.2, "0.75": 34.84, "0.8": 29.44, "0.85": 22.65, "0.9": 16.38, "0.95": 9.23}}, "middle": {"MR-mAP": {"0.5": 54.83, "0.55": 47.2, "0.6": 43.19, "0.65": 35.81, "0.7": 29.96, "0.75": 23.96, "0.8": 15.93, "0.85": 10.62, "0.9": 6.56, "0.95": 2.67, "average": 27.07}, "MR-R1": {"0.5": 45.77, "0.55": 39.5, "0.6": 36.36, "0.65": 30.72, "0.7": 25.81, "0.75": 21.21, "0.8": 14.63, "0.85": 9.72, "0.9": 5.96, "0.95": 2.51}}, "short": {"MR-mAP": {"0.5": 9.73, "0.55": 6.54, "0.6": 5.83, "0.65": 4.76, "0.7": 3.65, "0.75": 3.24, "0.8": 2.16, "0.85": 0.93, "0.9": 0.93, "0.95": 0.93, "average": 3.87}, "MR-R1": {"0.5": 4.9, "0.55": 3.73, "0.6": 3.5, "0.65": 2.8, "0.7": 2.1, "0.75": 1.63, "0.8": 1.4, "0.85": 0.47, "0.9": 0.47, "0.95": 0.47}}}
+2021_08_04_15_43_01 [Epoch] 164 [Loss] loss_span 0.5712 loss_giou 0.5586 loss_label 1.0040 class_error 25.8786 loss_saliency 0.2340 loss_span_0 0.5832 loss_giou_0 0.5678 loss_label_0 0.9042 class_error_0 24.7946 loss_overall 4.4230 [Metrics] {"brief": {"MR-full-R1@0.5": 52.19, "MR-full-R1@0.7": 32.26, "MR-full-mAP": 29.86, "MR-full-mAP@0.5": 53.75, "MR-full-mAP@0.75": 27.79, "MR-long-mAP": 40.89, "MR-middle-mAP": 28.32, "MR-short-mAP": 3.05, "HL-min-Fair-mAP": 67.42, "HL-min-Fair-Hit1": 67.61, "HL-min-Good-mAP": 57.26, "HL-min-Good-Hit1": 65.81, "HL-min-VeryGood-mAP": 34.95, "HL-min-VeryGood-Hit1": 55.1}, "HL-min-Fair": {"HL-mAP": 67.42, "HL-Hit1": 67.61}, "HL-min-Good": {"HL-mAP": 57.26, "HL-Hit1": 65.81}, "HL-min-VeryGood": {"HL-mAP": 34.95, "HL-Hit1": 55.1}, "full": {"MR-mAP": {"0.5": 53.75, "0.55": 48.01, "0.6": 43.93, "0.65": 38.06, "0.7": 32.93, "0.75": 27.79, "0.8": 21.66, "0.85": 16.15, "0.9": 11.01, "0.95": 5.35, "average": 29.86}, "MR-R1": {"0.5": 52.19, "0.55": 47.23, "0.6": 43.35, "0.65": 37.35, "0.7": 32.26, "0.75": 27.42, "0.8": 21.16, "0.85": 16.0, "0.9": 10.9, "0.95": 4.97}}, "long": {"MR-mAP": {"0.5": 63.52, "0.55": 59.15, "0.6": 54.31, "0.65": 50.82, "0.7": 47.28, "0.75": 41.94, "0.8": 35.17, "0.85": 26.9, "0.9": 18.89, "0.95": 10.92, "average": 40.89}, "MR-R1": {"0.5": 56.62, "0.55": 52.96, "0.6": 48.61, "0.65": 45.3, "0.7": 41.99, "0.75": 37.63, "0.8": 31.18, "0.85": 24.22, "0.9": 17.42, "0.95": 10.1}}, "middle": {"MR-mAP": {"0.5": 57.17, "0.55": 49.98, "0.6": 45.29, "0.65": 36.88, "0.7": 30.1, "0.75": 24.01, "0.8": 17.12, "0.85": 12.49, "0.9": 7.89, "0.95": 2.28, "average": 28.32}, "MR-R1": {"0.5": 48.38, "0.55": 42.74, "0.6": 39.6, "0.65": 32.39, "0.7": 26.44, "0.75": 21.53, "0.8": 15.36, "0.85": 11.39, "0.9": 7.21, "0.95": 1.99}}, "short": {"MR-mAP": {"0.5": 8.33, "0.55": 6.05, "0.6": 5.58, "0.65": 3.76, "0.7": 2.42, "0.75": 1.97, "0.8": 1.15, "0.85": 0.4, "0.9": 0.4, "0.95": 0.4, "average": 3.05}, "MR-R1": {"0.5": 4.9, "0.55": 4.43, "0.6": 3.26, "0.65": 2.1, "0.7": 1.4, "0.75": 0.7, "0.8": 0.47, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_15_48_24 [Epoch] 169 [Loss] loss_span 0.5846 loss_giou 0.5582 loss_label 1.0207 class_error 24.9182 loss_saliency 0.2305 loss_span_0 0.6111 loss_giou_0 0.5561 loss_label_0 0.9035 class_error_0 25.1699 loss_overall 4.4648 [Metrics] {"brief": {"MR-full-R1@0.5": 53.23, "MR-full-R1@0.7": 34.0, "MR-full-mAP": 30.58, "MR-full-mAP@0.5": 54.8, "MR-full-mAP@0.75": 29.02, "MR-long-mAP": 41.27, "MR-middle-mAP": 29.42, "MR-short-mAP": 3.11, "HL-min-Fair-mAP": 68.29, "HL-min-Fair-Hit1": 68.32, "HL-min-Good-mAP": 57.93, "HL-min-Good-Hit1": 66.26, "HL-min-VeryGood-mAP": 35.51, "HL-min-VeryGood-Hit1": 55.87}, "HL-min-Fair": {"HL-mAP": 68.29, "HL-Hit1": 68.32}, "HL-min-Good": {"HL-mAP": 57.93, "HL-Hit1": 66.26}, "HL-min-VeryGood": {"HL-mAP": 35.51, "HL-Hit1": 55.87}, "full": {"MR-mAP": {"0.5": 54.8, "0.55": 48.28, "0.6": 43.8, "0.65": 38.62, "0.7": 34.39, "0.75": 29.02, "0.8": 23.3, "0.85": 16.75, "0.9": 11.04, "0.95": 5.8, "average": 30.58}, "MR-R1": {"0.5": 53.23, "0.55": 47.48, "0.6": 43.03, "0.65": 38.45, "0.7": 34.0, "0.75": 28.65, "0.8": 22.97, "0.85": 16.84, "0.9": 11.29, "0.95": 5.81}}, "long": {"MR-mAP": {"0.5": 64.99, "0.55": 59.64, "0.6": 55.6, "0.65": 51.5, "0.7": 47.39, "0.75": 42.77, "0.8": 34.76, "0.85": 27.53, "0.9": 18.31, "0.95": 10.26, "average": 41.27}, "MR-R1": {"0.5": 57.49, "0.55": 53.14, "0.6": 49.3, "0.65": 45.82, "0.7": 41.99, "0.75": 38.5, "0.8": 31.36, "0.85": 25.09, "0.9": 17.07, "0.95": 9.23}}, "middle": {"MR-mAP": {"0.5": 57.68, "0.55": 50.3, "0.6": 44.71, "0.65": 38.22, "0.7": 32.78, "0.75": 25.68, "0.8": 20.05, "0.85": 13.05, "0.9": 8.2, "0.95": 3.53, "average": 29.42}, "MR-R1": {"0.5": 48.9, "0.55": 43.16, "0.6": 38.56, "0.65": 33.54, "0.7": 29.15, "0.75": 22.88, "0.8": 18.08, "0.85": 12.02, "0.9": 7.84, "0.95": 3.66}}, "short": {"MR-mAP": {"0.5": 8.89, "0.55": 5.69, "0.6": 4.75, "0.65": 3.32, "0.7": 2.53, "0.75": 2.09, "0.8": 1.5, "0.85": 0.77, "0.9": 0.77, "0.95": 0.77, "average": 3.11}, "MR-R1": {"0.5": 6.29, "0.55": 4.2, "0.6": 3.5, "0.65": 2.8, "0.7": 1.63, "0.75": 0.93, "0.8": 0.7, "0.85": 0.47, "0.9": 0.47, "0.95": 0.47}}}
+2021_08_04_15_53_50 [Epoch] 174 [Loss] loss_span 0.5984 loss_giou 0.5776 loss_label 1.1829 class_error 29.2745 loss_saliency 0.2393 loss_span_0 0.6046 loss_giou_0 0.5716 loss_label_0 1.0677 class_error_0 28.3503 loss_overall 4.8422 [Metrics] {"brief": {"MR-full-R1@0.5": 52.06, "MR-full-R1@0.7": 31.81, "MR-full-mAP": 29.21, "MR-full-mAP@0.5": 53.22, "MR-full-mAP@0.75": 26.87, "MR-long-mAP": 41.6, "MR-middle-mAP": 26.84, "MR-short-mAP": 2.88, "HL-min-Fair-mAP": 68.03, "HL-min-Fair-Hit1": 67.81, "HL-min-Good-mAP": 57.66, "HL-min-Good-Hit1": 65.61, "HL-min-VeryGood-mAP": 35.31, "HL-min-VeryGood-Hit1": 55.16}, "HL-min-Fair": {"HL-mAP": 68.03, "HL-Hit1": 67.81}, "HL-min-Good": {"HL-mAP": 57.66, "HL-Hit1": 65.61}, "HL-min-VeryGood": {"HL-mAP": 35.31, "HL-Hit1": 55.16}, "full": {"MR-mAP": {"0.5": 53.22, "0.55": 47.09, "0.6": 43.07, "0.65": 36.63, "0.7": 32.09, "0.75": 26.87, "0.8": 20.96, "0.85": 15.57, "0.9": 10.98, "0.95": 5.59, "average": 29.21}, "MR-R1": {"0.5": 52.06, "0.55": 46.65, "0.6": 42.71, "0.65": 35.94, "0.7": 31.81, "0.75": 26.71, "0.8": 20.52, "0.85": 15.1, "0.9": 10.52, "0.95": 5.35}}, "long": {"MR-mAP": {"0.5": 64.73, "0.55": 60.28, "0.6": 57.38, "0.65": 51.44, "0.7": 47.15, "0.75": 41.93, "0.8": 34.36, "0.85": 27.15, "0.9": 20.36, "0.95": 11.19, "average": 41.6}, "MR-R1": {"0.5": 57.14, "0.55": 53.48, "0.6": 51.05, "0.65": 45.47, "0.7": 42.33, "0.75": 37.98, "0.8": 30.84, "0.85": 24.22, "0.9": 17.94, "0.95": 10.28}}, "middle": {"MR-mAP": {"0.5": 55.23, "0.55": 47.78, "0.6": 42.0, "0.65": 34.74, "0.7": 29.11, "0.75": 22.76, "0.8": 16.39, "0.85": 10.93, "0.9": 6.81, "0.95": 2.66, "average": 26.84}, "MR-R1": {"0.5": 47.23, "0.55": 41.48, "0.6": 36.89, "0.65": 30.2, "0.7": 25.6, "0.75": 20.27, "0.8": 14.52, "0.85": 9.82, "0.9": 6.17, "0.95": 2.4}}, "short": {"MR-mAP": {"0.5": 8.31, "0.55": 5.34, "0.6": 4.85, "0.65": 3.17, "0.7": 2.22, "0.75": 1.76, "0.8": 1.21, "0.85": 0.65, "0.9": 0.65, "0.95": 0.65, "average": 2.88}, "MR-R1": {"0.5": 6.29, "0.55": 4.43, "0.6": 3.73, "0.65": 1.63, "0.7": 1.17, "0.75": 0.47, "0.8": 0.47, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_15_59_09 [Epoch] 179 [Loss] loss_span 0.5806 loss_giou 0.5669 loss_label 1.1008 class_error 27.1876 loss_saliency 0.2377 loss_span_0 0.5878 loss_giou_0 0.5664 loss_label_0 1.0045 class_error_0 27.6649 loss_overall 4.6447 [Metrics] {"brief": {"MR-full-R1@0.5": 51.48, "MR-full-R1@0.7": 30.97, "MR-full-mAP": 28.99, "MR-full-mAP@0.5": 53.17, "MR-full-mAP@0.75": 26.94, "MR-long-mAP": 39.94, "MR-middle-mAP": 27.67, "MR-short-mAP": 3.0, "HL-min-Fair-mAP": 68.15, "HL-min-Fair-Hit1": 68.52, "HL-min-Good-mAP": 57.8, "HL-min-Good-Hit1": 66.13, "HL-min-VeryGood-mAP": 35.5, "HL-min-VeryGood-Hit1": 55.42}, "HL-min-Fair": {"HL-mAP": 68.15, "HL-Hit1": 68.52}, "HL-min-Good": {"HL-mAP": 57.8, "HL-Hit1": 66.13}, "HL-min-VeryGood": {"HL-mAP": 35.5, "HL-Hit1": 55.42}, "full": {"MR-mAP": {"0.5": 53.17, "0.55": 47.84, "0.6": 43.02, "0.65": 36.48, "0.7": 31.5, "0.75": 26.94, "0.8": 20.65, "0.85": 14.82, "0.9": 10.42, "0.95": 5.11, "average": 28.99}, "MR-R1": {"0.5": 51.48, "0.55": 46.84, "0.6": 42.19, "0.65": 35.55, "0.7": 30.97, "0.75": 26.65, "0.8": 20.26, "0.85": 14.65, "0.9": 10.32, "0.95": 5.03}}, "long": {"MR-mAP": {"0.5": 63.94, "0.55": 59.98, "0.6": 54.74, "0.65": 48.99, "0.7": 44.19, "0.75": 39.91, "0.8": 34.01, "0.85": 25.3, "0.9": 18.7, "0.95": 9.68, "average": 39.94}, "MR-R1": {"0.5": 56.97, "0.55": 53.66, "0.6": 48.43, "0.65": 43.03, "0.7": 38.68, "0.75": 35.19, "0.8": 29.97, "0.85": 22.82, "0.9": 17.07, "0.95": 8.89}}, "middle": {"MR-mAP": {"0.5": 55.53, "0.55": 49.38, "0.6": 43.96, "0.65": 35.82, "0.7": 30.13, "0.75": 24.25, "0.8": 16.37, "0.85": 11.2, "0.9": 7.12, "0.95": 2.97, "average": 27.67}, "MR-R1": {"0.5": 46.39, "0.55": 41.69, "0.6": 37.93, "0.65": 30.93, "0.7": 26.23, "0.75": 21.53, "0.8": 14.32, "0.85": 9.93, "0.9": 6.37, "0.95": 2.72}}, "short": {"MR-mAP": {"0.5": 8.83, "0.55": 5.94, "0.6": 4.73, "0.65": 3.24, "0.7": 2.23, "0.75": 1.96, "0.8": 1.34, "0.85": 0.58, "0.9": 0.58, "0.95": 0.58, "average": 3.0}, "MR-R1": {"0.5": 6.29, "0.55": 4.43, "0.6": 3.03, "0.65": 1.86, "0.7": 1.63, "0.75": 1.17, "0.8": 1.17, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_16_04_29 [Epoch] 184 [Loss] loss_span 0.5727 loss_giou 0.5537 loss_label 1.0821 class_error 27.1106 loss_saliency 0.2421 loss_span_0 0.5915 loss_giou_0 0.5634 loss_label_0 0.9882 class_error_0 26.3640 loss_overall 4.5937 [Metrics] {"brief": {"MR-full-R1@0.5": 51.87, "MR-full-R1@0.7": 31.29, "MR-full-mAP": 29.77, "MR-full-mAP@0.5": 53.73, "MR-full-mAP@0.75": 28.4, "MR-long-mAP": 40.5, "MR-middle-mAP": 28.65, "MR-short-mAP": 2.99, "HL-min-Fair-mAP": 68.42, "HL-min-Fair-Hit1": 68.71, "HL-min-Good-mAP": 58.06, "HL-min-Good-Hit1": 66.45, "HL-min-VeryGood-mAP": 35.63, "HL-min-VeryGood-Hit1": 56.19}, "HL-min-Fair": {"HL-mAP": 68.42, "HL-Hit1": 68.71}, "HL-min-Good": {"HL-mAP": 58.06, "HL-Hit1": 66.45}, "HL-min-VeryGood": {"HL-mAP": 35.63, "HL-Hit1": 56.19}, "full": {"MR-mAP": {"0.5": 53.73, "0.55": 46.84, "0.6": 42.86, "0.65": 37.26, "0.7": 32.14, "0.75": 28.4, "0.8": 22.35, "0.85": 16.82, "0.9": 11.72, "0.95": 5.63, "average": 29.77}, "MR-R1": {"0.5": 51.87, "0.55": 45.03, "0.6": 41.16, "0.65": 35.74, "0.7": 31.29, "0.75": 27.48, "0.8": 21.87, "0.85": 16.52, "0.9": 11.55, "0.95": 5.35}}, "long": {"MR-mAP": {"0.5": 63.7, "0.55": 58.29, "0.6": 53.96, "0.65": 50.65, "0.7": 45.42, "0.75": 40.68, "0.8": 34.47, "0.85": 26.79, "0.9": 20.03, "0.95": 11.05, "average": 40.5}, "MR-R1": {"0.5": 56.45, "0.55": 51.22, "0.6": 47.21, "0.65": 44.43, "0.7": 40.42, "0.75": 35.71, "0.8": 30.49, "0.85": 24.04, "0.9": 18.29, "0.95": 10.28}}, "middle": {"MR-mAP": {"0.5": 56.82, "0.55": 48.59, "0.6": 44.01, "0.65": 36.22, "0.7": 30.51, "0.75": 26.26, "0.8": 19.05, "0.85": 13.85, "0.9": 8.43, "0.95": 2.8, "average": 28.65}, "MR-R1": {"0.5": 47.44, "0.55": 40.54, "0.6": 37.2, "0.65": 30.83, "0.7": 26.23, "0.75": 22.99, "0.8": 17.03, "0.85": 12.33, "0.9": 7.73, "0.95": 2.51}}, "short": {"MR-mAP": {"0.5": 8.88, "0.55": 5.62, "0.6": 4.85, "0.65": 3.55, "0.7": 2.28, "0.75": 1.86, "0.8": 1.36, "0.85": 0.51, "0.9": 0.51, "0.95": 0.51, "average": 2.99}, "MR-R1": {"0.5": 6.06, "0.55": 3.73, "0.6": 2.56, "0.65": 0.93, "0.7": 0.47, "0.75": 0.23, "0.8": 0.23, "0.85": 0.0, "0.9": 0.0, "0.95": 0.0}}}
+2021_08_04_16_09_51 [Epoch] 189 [Loss] loss_span 0.5774 loss_giou 0.5608 loss_label 1.0931 class_error 27.5772 loss_saliency 0.2402 loss_span_0 0.5943 loss_giou_0 0.5691 loss_label_0 1.0639 class_error_0 28.9274 loss_overall 4.6987 [Metrics] {"brief": {"MR-full-R1@0.5": 52.06, "MR-full-R1@0.7": 32.77, "MR-full-mAP": 29.81, "MR-full-mAP@0.5": 53.85, "MR-full-mAP@0.75": 28.31, "MR-long-mAP": 41.1, "MR-middle-mAP": 28.15, "MR-short-mAP": 2.82, "HL-min-Fair-mAP": 68.32, "HL-min-Fair-Hit1": 68.58, "HL-min-Good-mAP": 57.99, "HL-min-Good-Hit1": 66.77, "HL-min-VeryGood-mAP": 35.57, "HL-min-VeryGood-Hit1": 56.06}, "HL-min-Fair": {"HL-mAP": 68.32, "HL-Hit1": 68.58}, "HL-min-Good": {"HL-mAP": 57.99, "HL-Hit1": 66.77}, "HL-min-VeryGood": {"HL-mAP": 35.57, "HL-Hit1": 56.06}, "full": {"MR-mAP": {"0.5": 53.85, "0.55": 47.19, "0.6": 43.34, "0.65": 37.78, "0.7": 33.39, "0.75": 28.31, "0.8": 21.99, "0.85": 16.2, "0.9": 10.45, "0.95": 5.64, "average": 29.81}, "MR-R1": {"0.5": 52.06, "0.55": 45.94, "0.6": 42.26, "0.65": 36.65, "0.7": 32.77, "0.75": 27.81, "0.8": 21.55, "0.85": 15.74, "0.9": 10.06, "0.95": 5.42}}, "long": {"MR-mAP": {"0.5": 65.33, "0.55": 59.52, "0.6": 55.24, "0.65": 51.25, "0.7": 47.08, "0.75": 41.71, "0.8": 34.96, "0.85": 26.88, "0.9": 18.11, "0.95": 10.91, "average": 41.1}, "MR-R1": {"0.5": 57.32, "0.55": 51.74, "0.6": 48.26, "0.65": 44.43, "0.7": 41.46, "0.75": 36.76, "0.8": 30.14, "0.85": 23.17, "0.9": 15.68, "0.95": 9.76}}, "middle": {"MR-mAP": {"0.5": 55.74, "0.55": 48.37, "0.6": 43.87, "0.65": 36.49, "0.7": 31.17, "0.75": 25.21, "0.8": 17.98, "0.85": 12.43, "0.9": 7.22, "0.95": 3.05, "average": 28.15}, "MR-R1": {"0.5": 47.54, "0.55": 41.8, "0.6": 38.35, "0.65": 31.87, "0.7": 27.59, "0.75": 22.47, "0.8": 16.51, "0.85": 11.49, "0.9": 6.79, "0.95": 2.82}}, "short": {"MR-mAP": {"0.5": 8.85, "0.55": 5.71, "0.6": 4.94, "0.65": 3.21, "0.7": 2.1, "0.75": 1.61, "0.8": 0.98, "0.85": 0.28, "0.9": 0.28, "0.95": 0.28, "average": 2.82}, "MR-R1": {"0.5": 5.36, "0.55": 3.5, "0.6": 2.56, "0.65": 1.86, "0.7": 1.4, "0.75": 1.17, "0.8": 0.7, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_16_15_13 [Epoch] 194 [Loss] loss_span 0.5855 loss_giou 0.5604 loss_label 0.9803 class_error 23.7438 loss_saliency 0.2436 loss_span_0 0.5946 loss_giou_0 0.5654 loss_label_0 0.9436 class_error_0 24.4644 loss_overall 4.4732 [Metrics] {"brief": {"MR-full-R1@0.5": 51.87, "MR-full-R1@0.7": 32.45, "MR-full-mAP": 30.14, "MR-full-mAP@0.5": 53.92, "MR-full-mAP@0.75": 28.51, "MR-long-mAP": 40.44, "MR-middle-mAP": 29.4, "MR-short-mAP": 2.85, "HL-min-Fair-mAP": 68.66, "HL-min-Fair-Hit1": 69.29, "HL-min-Good-mAP": 58.29, "HL-min-Good-Hit1": 67.29, "HL-min-VeryGood-mAP": 35.77, "HL-min-VeryGood-Hit1": 56.39}, "HL-min-Fair": {"HL-mAP": 68.66, "HL-Hit1": 69.29}, "HL-min-Good": {"HL-mAP": 58.29, "HL-Hit1": 67.29}, "HL-min-VeryGood": {"HL-mAP": 35.77, "HL-Hit1": 56.39}, "full": {"MR-mAP": {"0.5": 53.92, "0.55": 48.0, "0.6": 43.3, "0.65": 37.39, "0.7": 32.98, "0.75": 28.51, "0.8": 22.84, "0.85": 16.99, "0.9": 11.8, "0.95": 5.69, "average": 30.14}, "MR-R1": {"0.5": 51.87, "0.55": 46.32, "0.6": 41.94, "0.65": 36.52, "0.7": 32.45, "0.75": 27.74, "0.8": 22.39, "0.85": 16.65, "0.9": 11.48, "0.95": 5.42}}, "long": {"MR-mAP": {"0.5": 63.62, "0.55": 58.74, "0.6": 53.42, "0.65": 48.78, "0.7": 44.4, "0.75": 40.48, "0.8": 34.18, "0.85": 28.76, "0.9": 20.68, "0.95": 11.31, "average": 40.44}, "MR-R1": {"0.5": 55.75, "0.55": 51.22, "0.6": 46.52, "0.65": 42.51, "0.7": 39.2, "0.75": 35.71, "0.8": 30.14, "0.85": 25.44, "0.9": 18.64, "0.95": 10.45}}, "middle": {"MR-mAP": {"0.5": 57.32, "0.55": 50.55, "0.6": 45.46, "0.65": 37.61, "0.7": 32.44, "0.75": 26.43, "0.8": 19.94, "0.85": 13.18, "0.9": 8.27, "0.95": 2.77, "average": 29.4}, "MR-R1": {"0.5": 48.17, "0.55": 42.84, "0.6": 38.77, "0.65": 32.92, "0.7": 28.53, "0.75": 23.2, "0.8": 17.87, "0.85": 11.6, "0.9": 7.31, "0.95": 2.4}}, "short": {"MR-mAP": {"0.5": 8.63, "0.55": 5.42, "0.6": 4.82, "0.65": 3.23, "0.7": 1.98, "0.75": 1.67, "0.8": 1.34, "0.85": 0.47, "0.9": 0.47, "0.95": 0.47, "average": 2.85}, "MR-R1": {"0.5": 5.36, "0.55": 3.26, "0.6": 2.8, "0.65": 1.63, "0.7": 1.17, "0.75": 0.7, "0.8": 0.7, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
+2021_08_04_16_20_32 [Epoch] 199 [Loss] loss_span 0.5908 loss_giou 0.5637 loss_label 0.9894 class_error 23.9894 loss_saliency 0.2397 loss_span_0 0.5944 loss_giou_0 0.5636 loss_label_0 0.9127 class_error_0 22.9809 loss_overall 4.4542 [Metrics] {"brief": {"MR-full-R1@0.5": 50.77, "MR-full-R1@0.7": 31.35, "MR-full-mAP": 29.18, "MR-full-mAP@0.5": 53.22, "MR-full-mAP@0.75": 27.3, "MR-long-mAP": 39.55, "MR-middle-mAP": 28.55, "MR-short-mAP": 2.51, "HL-min-Fair-mAP": 68.11, "HL-min-Fair-Hit1": 67.94, "HL-min-Good-mAP": 57.66, "HL-min-Good-Hit1": 65.48, "HL-min-VeryGood-mAP": 35.35, "HL-min-VeryGood-Hit1": 54.71}, "HL-min-Fair": {"HL-mAP": 68.11, "HL-Hit1": 67.94}, "HL-min-Good": {"HL-mAP": 57.66, "HL-Hit1": 65.48}, "HL-min-VeryGood": {"HL-mAP": 35.35, "HL-Hit1": 54.71}, "full": {"MR-mAP": {"0.5": 53.22, "0.55": 47.59, "0.6": 43.14, "0.65": 37.08, "0.7": 32.34, "0.75": 27.3, "0.8": 21.21, "0.85": 14.95, "0.9": 9.77, "0.95": 5.17, "average": 29.18}, "MR-R1": {"0.5": 50.77, "0.55": 45.94, "0.6": 41.55, "0.65": 35.55, "0.7": 31.35, "0.75": 26.26, "0.8": 20.39, "0.85": 14.32, "0.9": 9.29, "0.95": 4.9}}, "long": {"MR-mAP": {"0.5": 62.95, "0.55": 58.21, "0.6": 54.71, "0.65": 49.05, "0.7": 44.33, "0.75": 39.84, "0.8": 31.77, "0.85": 25.91, "0.9": 18.28, "0.95": 10.44, "average": 39.55}, "MR-R1": {"0.5": 53.83, "0.55": 49.83, "0.6": 46.86, "0.65": 41.46, "0.7": 37.8, "0.75": 34.15, "0.8": 26.83, "0.85": 22.3, "0.9": 15.51, "0.95": 9.23}}, "middle": {"MR-mAP": {"0.5": 56.94, "0.55": 50.54, "0.6": 44.5, "0.65": 37.48, "0.7": 31.71, "0.75": 25.02, "0.8": 19.08, "0.85": 11.34, "0.9": 6.43, "0.95": 2.46, "average": 28.55}, "MR-R1": {"0.5": 47.96, "0.55": 42.74, "0.6": 37.83, "0.65": 31.97, "0.7": 27.59, "0.75": 21.63, "0.8": 16.72, "0.85": 9.72, "0.9": 5.64, "0.95": 2.3}}, "short": {"MR-mAP": {"0.5": 7.08, "0.55": 5.08, "0.6": 4.28, "0.65": 2.88, "0.7": 1.93, "0.75": 1.78, "0.8": 1.11, "0.85": 0.31, "0.9": 0.31, "0.95": 0.31, "average": 2.51}, "MR-R1": {"0.5": 4.43, "0.55": 3.96, "0.6": 3.03, "0.65": 1.63, "0.7": 1.17, "0.75": 0.93, "0.8": 0.47, "0.85": 0.23, "0.9": 0.23, "0.95": 0.23}}}
diff --git a/run_on_video/moment_detr_ckpt/inference_hl_val_test_code_preds.jsonl b/run_on_video/moment_detr_ckpt/inference_hl_val_test_code_preds.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..b95f9b7e2db6eded5c9df3a17eb8dff38f586794
--- /dev/null
+++ b/run_on_video/moment_detr_ckpt/inference_hl_val_test_code_preds.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4b38328e316be63a2e7ca667566a76223ffe7b349a0b2edce66f028673f393ab
+size 2443100
diff --git a/run_on_video/moment_detr_ckpt/inference_hl_val_test_code_preds_metrics.json b/run_on_video/moment_detr_ckpt/inference_hl_val_test_code_preds_metrics.json
new file mode 100644
index 0000000000000000000000000000000000000000..fcd542429d8af9ad150bd57c9ce011f74c3d8b4c
--- /dev/null
+++ b/run_on_video/moment_detr_ckpt/inference_hl_val_test_code_preds_metrics.json
@@ -0,0 +1,138 @@
+{
+ "brief": {
+ "MR-full-R1@0.5": 53.23,
+ "MR-full-R1@0.7": 34.0,
+ "MR-full-mAP": 30.58,
+ "MR-full-mAP@0.5": 54.8,
+ "MR-full-mAP@0.75": 29.02,
+ "MR-long-mAP": 41.27,
+ "MR-middle-mAP": 29.42,
+ "MR-short-mAP": 3.11,
+ "HL-min-Fair-mAP": 68.29,
+ "HL-min-Fair-Hit1": 68.32,
+ "HL-min-Good-mAP": 57.93,
+ "HL-min-Good-Hit1": 66.26,
+ "HL-min-VeryGood-mAP": 35.51,
+ "HL-min-VeryGood-Hit1": 55.87
+ },
+ "HL-min-Fair": {
+ "HL-mAP": 68.29,
+ "HL-Hit1": 68.32
+ },
+ "HL-min-Good": {
+ "HL-mAP": 57.93,
+ "HL-Hit1": 66.26
+ },
+ "HL-min-VeryGood": {
+ "HL-mAP": 35.51,
+ "HL-Hit1": 55.87
+ },
+ "full": {
+ "MR-mAP": {
+ "0.5": 54.8,
+ "0.55": 48.28,
+ "0.6": 43.8,
+ "0.65": 38.62,
+ "0.7": 34.39,
+ "0.75": 29.02,
+ "0.8": 23.3,
+ "0.85": 16.75,
+ "0.9": 11.04,
+ "0.95": 5.8,
+ "average": 30.58
+ },
+ "MR-R1": {
+ "0.5": 53.23,
+ "0.55": 47.48,
+ "0.6": 43.03,
+ "0.65": 38.45,
+ "0.7": 34.0,
+ "0.75": 28.65,
+ "0.8": 22.97,
+ "0.85": 16.84,
+ "0.9": 11.29,
+ "0.95": 5.81
+ }
+ },
+ "long": {
+ "MR-mAP": {
+ "0.5": 64.99,
+ "0.55": 59.64,
+ "0.6": 55.6,
+ "0.65": 51.5,
+ "0.7": 47.39,
+ "0.75": 42.77,
+ "0.8": 34.76,
+ "0.85": 27.53,
+ "0.9": 18.31,
+ "0.95": 10.26,
+ "average": 41.27
+ },
+ "MR-R1": {
+ "0.5": 57.49,
+ "0.55": 53.14,
+ "0.6": 49.3,
+ "0.65": 45.82,
+ "0.7": 41.99,
+ "0.75": 38.5,
+ "0.8": 31.36,
+ "0.85": 25.09,
+ "0.9": 17.07,
+ "0.95": 9.23
+ }
+ },
+ "middle": {
+ "MR-mAP": {
+ "0.5": 57.68,
+ "0.55": 50.3,
+ "0.6": 44.71,
+ "0.65": 38.22,
+ "0.7": 32.78,
+ "0.75": 25.68,
+ "0.8": 20.05,
+ "0.85": 13.05,
+ "0.9": 8.2,
+ "0.95": 3.53,
+ "average": 29.42
+ },
+ "MR-R1": {
+ "0.5": 48.9,
+ "0.55": 43.16,
+ "0.6": 38.56,
+ "0.65": 33.54,
+ "0.7": 29.15,
+ "0.75": 22.88,
+ "0.8": 18.08,
+ "0.85": 12.02,
+ "0.9": 7.84,
+ "0.95": 3.66
+ }
+ },
+ "short": {
+ "MR-mAP": {
+ "0.5": 8.89,
+ "0.55": 5.69,
+ "0.6": 4.75,
+ "0.65": 3.32,
+ "0.7": 2.53,
+ "0.75": 2.09,
+ "0.8": 1.5,
+ "0.85": 0.77,
+ "0.9": 0.77,
+ "0.95": 0.77,
+ "average": 3.11
+ },
+ "MR-R1": {
+ "0.5": 6.29,
+ "0.55": 4.2,
+ "0.6": 3.5,
+ "0.65": 2.8,
+ "0.7": 1.63,
+ "0.75": 0.93,
+ "0.8": 0.7,
+ "0.85": 0.47,
+ "0.9": 0.47,
+ "0.95": 0.47
+ }
+ }
+}
\ No newline at end of file
diff --git a/run_on_video/moment_detr_ckpt/model_best.ckpt b/run_on_video/moment_detr_ckpt/model_best.ckpt
new file mode 100644
index 0000000000000000000000000000000000000000..f25999f951848d98a6f640af1d128639322d4db4
--- /dev/null
+++ b/run_on_video/moment_detr_ckpt/model_best.ckpt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ad5facec03008800536466c7d33f9232c3db2b71a1bd445073957bd6f4d0edbf
+size 16952721
diff --git a/run_on_video/moment_detr_ckpt/opt.json b/run_on_video/moment_detr_ckpt/opt.json
new file mode 100644
index 0000000000000000000000000000000000000000..a909362c99b89c2ae98e62e3a129e8da2ce715d1
--- /dev/null
+++ b/run_on_video/moment_detr_ckpt/opt.json
@@ -0,0 +1,75 @@
+{
+ "dset_name": "hl",
+ "eval_split_name": "val",
+ "debug": false,
+ "data_ratio": 1.0,
+ "results_root": "baselines/detr/results",
+ "exp_id": "final_base_clip_clip_mq10_noalign",
+ "seed": 2018,
+ "device": 0,
+ "num_workers": 4,
+ "no_pin_memory": false,
+ "lr": 0.0001,
+ "lr_drop": 400,
+ "wd": 0.0001,
+ "n_epoch": 200,
+ "max_es_cnt": 200,
+ "bsz": 32,
+ "eval_bsz": 100,
+ "grad_clip": 0.1,
+ "eval_untrained": false,
+ "resume": null,
+ "resume_all": false,
+ "start_epoch": null,
+ "max_q_l": 32,
+ "max_v_l": 75,
+ "clip_length": 2,
+ "max_windows": 5,
+ "train_path": "annotations/highlight_train.jsonl",
+ "eval_path": "annotations/highlight_val.jsonl",
+ "no_norm_vfeat": false,
+ "no_norm_tfeat": false,
+ "v_feat_dirs": [
+ "features/clip_features"
+ ],
+ "t_feat_dir": "features/clip_text_features/",
+ "v_feat_dim": 512,
+ "t_feat_dim": 512,
+ "ctx_mode": "video_tef",
+ "no_hard_neg": false,
+ "no_easy_neg": false,
+ "position_embedding": "sine",
+ "enc_layers": 2,
+ "dec_layers": 2,
+ "dim_feedforward": 1024,
+ "hidden_dim": 256,
+ "input_dropout": 0.5,
+ "dropout": 0.1,
+ "txt_drop_ratio": 0,
+ "use_txt_pos": false,
+ "nheads": 8,
+ "num_queries": 10,
+ "pre_norm": false,
+ "n_input_proj": 2,
+ "contrastive_hdim": 64,
+ "temperature": 0.07,
+ "lw_saliency": 1.0,
+ "saliency_margin": 0.2,
+ "aux_loss": true,
+ "span_loss_type": "l1",
+ "contrastive_align_loss": false,
+ "set_cost_span": 10,
+ "set_cost_giou": 1,
+ "set_cost_class": 4,
+ "span_loss_coef": 10,
+ "giou_loss_coef": 1,
+ "label_loss_coef": 4,
+ "eos_coef": 0.1,
+ "contrastive_align_loss_coef": 0.02,
+ "no_sort_results": false,
+ "max_before_nms": 10,
+ "max_after_nms": 10,
+ "conf_thd": 0.0,
+ "nms_thd": -1.0,
+ "results_dir": "moment_detr/tmp/clip_only_non_pt/"
+}
diff --git a/run_on_video/moment_detr_ckpt/train.log.txt b/run_on_video/moment_detr_ckpt/train.log.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6a8107d98c93b5cf7c93771da1cd8682ffa9863e
--- /dev/null
+++ b/run_on_video/moment_detr_ckpt/train.log.txt
@@ -0,0 +1,200 @@
+2021_08_04_12_44_04 [Epoch] 001 [Loss] loss_span 1.2972 loss_giou 0.8287 loss_label 0.5786 class_error 10.6986 loss_saliency 0.4536 loss_span_0 1.3355 loss_giou_0 0.8531 loss_label_0 0.5712 class_error_0 9.1542 loss_overall 5.9179
+2021_08_04_12_45_03 [Epoch] 002 [Loss] loss_span 0.9610 loss_giou 0.6673 loss_label 0.6130 class_error 7.9499 loss_saliency 0.4074 loss_span_0 1.0072 loss_giou_0 0.6896 loss_label_0 0.6068 class_error_0 8.7446 loss_overall 4.9524
+2021_08_04_12_46_00 [Epoch] 003 [Loss] loss_span 0.9251 loss_giou 0.6528 loss_label 0.6185 class_error 6.9243 loss_saliency 0.3960 loss_span_0 0.9675 loss_giou_0 0.6762 loss_label_0 0.6124 class_error_0 7.5102 loss_overall 4.8485
+2021_08_04_12_46_58 [Epoch] 004 [Loss] loss_span 0.9149 loss_giou 0.6477 loss_label 0.6126 class_error 4.7459 loss_saliency 0.3836 loss_span_0 0.9530 loss_giou_0 0.6704 loss_label_0 0.6115 class_error_0 6.5554 loss_overall 4.7936
+2021_08_04_12_47_56 [Epoch] 005 [Loss] loss_span 0.8941 loss_giou 0.6431 loss_label 0.6141 class_error 5.4848 loss_saliency 0.3736 loss_span_0 0.9455 loss_giou_0 0.6690 loss_label_0 0.6125 class_error_0 6.7207 loss_overall 4.7520
+2021_08_04_12_49_22 [Epoch] 006 [Loss] loss_span 0.8818 loss_giou 0.6366 loss_label 0.6116 class_error 6.2694 loss_saliency 0.3688 loss_span_0 0.9255 loss_giou_0 0.6651 loss_label_0 0.6167 class_error_0 6.4318 loss_overall 4.7061
+2021_08_04_12_50_20 [Epoch] 007 [Loss] loss_span 0.8644 loss_giou 0.6341 loss_label 0.6105 class_error 5.5753 loss_saliency 0.3654 loss_span_0 0.8842 loss_giou_0 0.6523 loss_label_0 0.6145 class_error_0 4.9088 loss_overall 4.6253
+2021_08_04_12_51_18 [Epoch] 008 [Loss] loss_span 0.7577 loss_giou 0.6302 loss_label 0.5960 class_error 8.4945 loss_saliency 0.3634 loss_span_0 0.7785 loss_giou_0 0.6370 loss_label_0 0.6186 class_error_0 5.8819 loss_overall 4.3814
+2021_08_04_12_52_17 [Epoch] 009 [Loss] loss_span 0.6922 loss_giou 0.5997 loss_label 0.5183 class_error 7.5825 loss_saliency 0.3628 loss_span_0 0.6874 loss_giou_0 0.5978 loss_label_0 0.5542 class_error_0 6.7843 loss_overall 4.0124
+2021_08_04_12_53_14 [Epoch] 010 [Loss] loss_span 0.6576 loss_giou 0.5844 loss_label 0.5190 class_error 6.6664 loss_saliency 0.3569 loss_span_0 0.6487 loss_giou_0 0.5885 loss_label_0 0.5435 class_error_0 6.7383 loss_overall 3.8986
+2021_08_04_12_54_39 [Epoch] 011 [Loss] loss_span 0.6314 loss_giou 0.5711 loss_label 0.5253 class_error 6.8657 loss_saliency 0.3481 loss_span_0 0.6423 loss_giou_0 0.5881 loss_label_0 0.5335 class_error_0 7.8473 loss_overall 3.8398
+2021_08_04_12_55_39 [Epoch] 012 [Loss] loss_span 0.6399 loss_giou 0.5757 loss_label 0.5142 class_error 6.2857 loss_saliency 0.3484 loss_span_0 0.6313 loss_giou_0 0.5815 loss_label_0 0.5255 class_error_0 7.3446 loss_overall 3.8164
+2021_08_04_12_56_37 [Epoch] 013 [Loss] loss_span 0.6239 loss_giou 0.5736 loss_label 0.5145 class_error 6.2704 loss_saliency 0.3437 loss_span_0 0.6118 loss_giou_0 0.5712 loss_label_0 0.5236 class_error_0 8.4098 loss_overall 3.7622
+2021_08_04_12_57_37 [Epoch] 014 [Loss] loss_span 0.6114 loss_giou 0.5666 loss_label 0.5164 class_error 6.1901 loss_saliency 0.3382 loss_span_0 0.6112 loss_giou_0 0.5720 loss_label_0 0.5182 class_error_0 8.6985 loss_overall 3.7339
+2021_08_04_12_58_37 [Epoch] 015 [Loss] loss_span 0.6074 loss_giou 0.5624 loss_label 0.5100 class_error 5.9347 loss_saliency 0.3337 loss_span_0 0.6097 loss_giou_0 0.5708 loss_label_0 0.5157 class_error_0 7.8520 loss_overall 3.7096
+2021_08_04_13_00_03 [Epoch] 016 [Loss] loss_span 0.6034 loss_giou 0.5615 loss_label 0.5122 class_error 5.9122 loss_saliency 0.3219 loss_span_0 0.6121 loss_giou_0 0.5734 loss_label_0 0.5075 class_error_0 7.5621 loss_overall 3.6920
+2021_08_04_13_01_02 [Epoch] 017 [Loss] loss_span 0.6016 loss_giou 0.5636 loss_label 0.5023 class_error 5.5376 loss_saliency 0.3225 loss_span_0 0.6104 loss_giou_0 0.5772 loss_label_0 0.4986 class_error_0 7.7640 loss_overall 3.6763
+2021_08_04_13_02_01 [Epoch] 018 [Loss] loss_span 0.5969 loss_giou 0.5614 loss_label 0.5034 class_error 5.6997 loss_saliency 0.3156 loss_span_0 0.6057 loss_giou_0 0.5715 loss_label_0 0.4994 class_error_0 7.5964 loss_overall 3.6538
+2021_08_04_13_02_59 [Epoch] 019 [Loss] loss_span 0.5892 loss_giou 0.5573 loss_label 0.4988 class_error 5.6585 loss_saliency 0.3128 loss_span_0 0.5997 loss_giou_0 0.5685 loss_label_0 0.4981 class_error_0 7.2079 loss_overall 3.6244
+2021_08_04_13_03_58 [Epoch] 020 [Loss] loss_span 0.5884 loss_giou 0.5578 loss_label 0.4902 class_error 5.8618 loss_saliency 0.3110 loss_span_0 0.5992 loss_giou_0 0.5707 loss_label_0 0.4906 class_error_0 7.3296 loss_overall 3.6078
+2021_08_04_13_05_22 [Epoch] 021 [Loss] loss_span 0.5829 loss_giou 0.5562 loss_label 0.4887 class_error 5.6675 loss_saliency 0.3124 loss_span_0 0.5917 loss_giou_0 0.5644 loss_label_0 0.4863 class_error_0 6.6632 loss_overall 3.5827
+2021_08_04_13_06_23 [Epoch] 022 [Loss] loss_span 0.5783 loss_giou 0.5540 loss_label 0.4751 class_error 5.9591 loss_saliency 0.3065 loss_span_0 0.5943 loss_giou_0 0.5680 loss_label_0 0.4740 class_error_0 6.9004 loss_overall 3.5501
+2021_08_04_13_07_22 [Epoch] 023 [Loss] loss_span 0.5708 loss_giou 0.5444 loss_label 0.4778 class_error 6.6102 loss_saliency 0.3018 loss_span_0 0.6004 loss_giou_0 0.5724 loss_label_0 0.4691 class_error_0 7.0948 loss_overall 3.5367
+2021_08_04_13_08_20 [Epoch] 024 [Loss] loss_span 0.5645 loss_giou 0.5465 loss_label 0.4679 class_error 6.7828 loss_saliency 0.2953 loss_span_0 0.5845 loss_giou_0 0.5605 loss_label_0 0.4649 class_error_0 6.2931 loss_overall 3.4841
+2021_08_04_13_09_19 [Epoch] 025 [Loss] loss_span 0.5594 loss_giou 0.5455 loss_label 0.4611 class_error 7.0777 loss_saliency 0.2925 loss_span_0 0.5800 loss_giou_0 0.5607 loss_label_0 0.4588 class_error_0 6.9143 loss_overall 3.4581
+2021_08_04_13_10_46 [Epoch] 026 [Loss] loss_span 0.5527 loss_giou 0.5408 loss_label 0.4534 class_error 6.8014 loss_saliency 0.2903 loss_span_0 0.5754 loss_giou_0 0.5579 loss_label_0 0.4483 class_error_0 7.3016 loss_overall 3.4187
+2021_08_04_13_11_46 [Epoch] 027 [Loss] loss_span 0.5468 loss_giou 0.5384 loss_label 0.4506 class_error 7.0799 loss_saliency 0.2870 loss_span_0 0.5706 loss_giou_0 0.5533 loss_label_0 0.4467 class_error_0 6.9152 loss_overall 3.3934
+2021_08_04_13_12_43 [Epoch] 028 [Loss] loss_span 0.5433 loss_giou 0.5374 loss_label 0.4438 class_error 6.9976 loss_saliency 0.2839 loss_span_0 0.5648 loss_giou_0 0.5523 loss_label_0 0.4374 class_error_0 6.8193 loss_overall 3.3630
+2021_08_04_13_13_42 [Epoch] 029 [Loss] loss_span 0.5325 loss_giou 0.5299 loss_label 0.4421 class_error 7.2091 loss_saliency 0.2729 loss_span_0 0.5544 loss_giou_0 0.5488 loss_label_0 0.4356 class_error_0 7.3670 loss_overall 3.3163
+2021_08_04_13_14_41 [Epoch] 030 [Loss] loss_span 0.5283 loss_giou 0.5291 loss_label 0.4322 class_error 6.9750 loss_saliency 0.2716 loss_span_0 0.5492 loss_giou_0 0.5473 loss_label_0 0.4246 class_error_0 7.1994 loss_overall 3.2821
+2021_08_04_13_16_07 [Epoch] 031 [Loss] loss_span 0.5161 loss_giou 0.5213 loss_label 0.4340 class_error 7.0319 loss_saliency 0.2668 loss_span_0 0.5406 loss_giou_0 0.5436 loss_label_0 0.4221 class_error_0 7.3913 loss_overall 3.2444
+2021_08_04_13_17_04 [Epoch] 032 [Loss] loss_span 0.5198 loss_giou 0.5210 loss_label 0.4234 class_error 6.9696 loss_saliency 0.2629 loss_span_0 0.5406 loss_giou_0 0.5406 loss_label_0 0.4178 class_error_0 7.1132 loss_overall 3.2260
+2021_08_04_13_18_03 [Epoch] 033 [Loss] loss_span 0.5140 loss_giou 0.5201 loss_label 0.4201 class_error 6.8230 loss_saliency 0.2590 loss_span_0 0.5333 loss_giou_0 0.5358 loss_label_0 0.4185 class_error_0 7.2699 loss_overall 3.2007
+2021_08_04_13_19_02 [Epoch] 034 [Loss] loss_span 0.5061 loss_giou 0.5157 loss_label 0.4125 class_error 6.9812 loss_saliency 0.2595 loss_span_0 0.5260 loss_giou_0 0.5346 loss_label_0 0.4056 class_error_0 7.2665 loss_overall 3.1600
+2021_08_04_13_20_00 [Epoch] 035 [Loss] loss_span 0.4975 loss_giou 0.5137 loss_label 0.4085 class_error 6.9593 loss_saliency 0.2511 loss_span_0 0.5240 loss_giou_0 0.5349 loss_label_0 0.4010 class_error_0 6.8786 loss_overall 3.1308
+2021_08_04_13_21_26 [Epoch] 036 [Loss] loss_span 0.5017 loss_giou 0.5112 loss_label 0.4048 class_error 6.7255 loss_saliency 0.2520 loss_span_0 0.5180 loss_giou_0 0.5296 loss_label_0 0.3997 class_error_0 7.2124 loss_overall 3.1170
+2021_08_04_13_22_25 [Epoch] 037 [Loss] loss_span 0.4918 loss_giou 0.5101 loss_label 0.3992 class_error 7.2220 loss_saliency 0.2512 loss_span_0 0.5116 loss_giou_0 0.5270 loss_label_0 0.3950 class_error_0 7.0849 loss_overall 3.0860
+2021_08_04_13_23_23 [Epoch] 038 [Loss] loss_span 0.4910 loss_giou 0.5071 loss_label 0.3942 class_error 6.5723 loss_saliency 0.2470 loss_span_0 0.5080 loss_giou_0 0.5266 loss_label_0 0.3895 class_error_0 7.0217 loss_overall 3.0634
+2021_08_04_13_24_21 [Epoch] 039 [Loss] loss_span 0.4821 loss_giou 0.5035 loss_label 0.3983 class_error 7.2142 loss_saliency 0.2394 loss_span_0 0.5060 loss_giou_0 0.5250 loss_label_0 0.3883 class_error_0 7.4257 loss_overall 3.0426
+2021_08_04_13_25_21 [Epoch] 040 [Loss] loss_span 0.4816 loss_giou 0.5014 loss_label 0.3897 class_error 7.1229 loss_saliency 0.2400 loss_span_0 0.5029 loss_giou_0 0.5211 loss_label_0 0.3821 class_error_0 7.2083 loss_overall 3.0188
+2021_08_04_13_26_47 [Epoch] 041 [Loss] loss_span 0.4743 loss_giou 0.4964 loss_label 0.3887 class_error 6.7816 loss_saliency 0.2378 loss_span_0 0.4972 loss_giou_0 0.5199 loss_label_0 0.3796 class_error_0 6.9007 loss_overall 2.9940
+2021_08_04_13_27_46 [Epoch] 042 [Loss] loss_span 0.4729 loss_giou 0.4981 loss_label 0.3814 class_error 6.9122 loss_saliency 0.2365 loss_span_0 0.4916 loss_giou_0 0.5176 loss_label_0 0.3774 class_error_0 6.9401 loss_overall 2.9755
+2021_08_04_13_28_44 [Epoch] 043 [Loss] loss_span 0.4760 loss_giou 0.5008 loss_label 0.3761 class_error 6.8140 loss_saliency 0.2345 loss_span_0 0.4944 loss_giou_0 0.5168 loss_label_0 0.3692 class_error_0 6.8928 loss_overall 2.9677
+2021_08_04_13_29_43 [Epoch] 044 [Loss] loss_span 0.4689 loss_giou 0.4950 loss_label 0.3718 class_error 6.6328 loss_saliency 0.2323 loss_span_0 0.4886 loss_giou_0 0.5173 loss_label_0 0.3673 class_error_0 6.9695 loss_overall 2.9411
+2021_08_04_13_30_41 [Epoch] 045 [Loss] loss_span 0.4676 loss_giou 0.4951 loss_label 0.3716 class_error 6.8737 loss_saliency 0.2308 loss_span_0 0.4819 loss_giou_0 0.5093 loss_label_0 0.3685 class_error_0 7.0836 loss_overall 2.9247
+2021_08_04_13_32_04 [Epoch] 046 [Loss] loss_span 0.4680 loss_giou 0.4940 loss_label 0.3724 class_error 6.8449 loss_saliency 0.2297 loss_span_0 0.4816 loss_giou_0 0.5096 loss_label_0 0.3703 class_error_0 7.3820 loss_overall 2.9256
+2021_08_04_13_33_03 [Epoch] 047 [Loss] loss_span 0.4584 loss_giou 0.4873 loss_label 0.3702 class_error 6.8917 loss_saliency 0.2289 loss_span_0 0.4762 loss_giou_0 0.5106 loss_label_0 0.3636 class_error_0 7.0993 loss_overall 2.8952
+2021_08_04_13_34_03 [Epoch] 048 [Loss] loss_span 0.4518 loss_giou 0.4885 loss_label 0.3648 class_error 6.8557 loss_saliency 0.2215 loss_span_0 0.4730 loss_giou_0 0.5079 loss_label_0 0.3573 class_error_0 6.8289 loss_overall 2.8648
+2021_08_04_13_35_03 [Epoch] 049 [Loss] loss_span 0.4525 loss_giou 0.4881 loss_label 0.3608 class_error 6.8747 loss_saliency 0.2221 loss_span_0 0.4729 loss_giou_0 0.5091 loss_label_0 0.3528 class_error_0 7.0079 loss_overall 2.8584
+2021_08_04_13_36_02 [Epoch] 050 [Loss] loss_span 0.4490 loss_giou 0.4813 loss_label 0.3529 class_error 6.6830 loss_saliency 0.2212 loss_span_0 0.4658 loss_giou_0 0.5020 loss_label_0 0.3505 class_error_0 6.9031 loss_overall 2.8227
+2021_08_04_13_37_27 [Epoch] 051 [Loss] loss_span 0.4450 loss_giou 0.4835 loss_label 0.3503 class_error 6.7983 loss_saliency 0.2217 loss_span_0 0.4633 loss_giou_0 0.5041 loss_label_0 0.3497 class_error_0 7.1129 loss_overall 2.8176
+2021_08_04_13_38_30 [Epoch] 052 [Loss] loss_span 0.4493 loss_giou 0.4859 loss_label 0.3480 class_error 6.6603 loss_saliency 0.2218 loss_span_0 0.4655 loss_giou_0 0.5036 loss_label_0 0.3478 class_error_0 6.9336 loss_overall 2.8218
+2021_08_04_13_39_31 [Epoch] 053 [Loss] loss_span 0.4418 loss_giou 0.4806 loss_label 0.3438 class_error 6.4108 loss_saliency 0.2210 loss_span_0 0.4604 loss_giou_0 0.5025 loss_label_0 0.3439 class_error_0 6.6419 loss_overall 2.7939
+2021_08_04_13_40_31 [Epoch] 054 [Loss] loss_span 0.4310 loss_giou 0.4725 loss_label 0.3482 class_error 6.4051 loss_saliency 0.2156 loss_span_0 0.4570 loss_giou_0 0.4971 loss_label_0 0.3425 class_error_0 6.4731 loss_overall 2.7639
+2021_08_04_13_41_33 [Epoch] 055 [Loss] loss_span 0.4310 loss_giou 0.4765 loss_label 0.3466 class_error 6.5199 loss_saliency 0.2149 loss_span_0 0.4577 loss_giou_0 0.4984 loss_label_0 0.3398 class_error_0 6.6712 loss_overall 2.7649
+2021_08_04_13_43_02 [Epoch] 056 [Loss] loss_span 0.4335 loss_giou 0.4762 loss_label 0.3370 class_error 6.0320 loss_saliency 0.2155 loss_span_0 0.4517 loss_giou_0 0.4965 loss_label_0 0.3356 class_error_0 6.6508 loss_overall 2.7461
+2021_08_04_13_44_03 [Epoch] 057 [Loss] loss_span 0.4299 loss_giou 0.4749 loss_label 0.3384 class_error 6.6863 loss_saliency 0.2090 loss_span_0 0.4465 loss_giou_0 0.4941 loss_label_0 0.3370 class_error_0 6.7015 loss_overall 2.7297
+2021_08_04_13_45_03 [Epoch] 058 [Loss] loss_span 0.4312 loss_giou 0.4767 loss_label 0.3319 class_error 6.0844 loss_saliency 0.2078 loss_span_0 0.4500 loss_giou_0 0.4961 loss_label_0 0.3322 class_error_0 6.7411 loss_overall 2.7257
+2021_08_04_13_46_04 [Epoch] 059 [Loss] loss_span 0.4247 loss_giou 0.4720 loss_label 0.3302 class_error 6.0152 loss_saliency 0.2104 loss_span_0 0.4444 loss_giou_0 0.4904 loss_label_0 0.3294 class_error_0 6.5831 loss_overall 2.7015
+2021_08_04_13_47_06 [Epoch] 060 [Loss] loss_span 0.4228 loss_giou 0.4703 loss_label 0.3273 class_error 6.0433 loss_saliency 0.2078 loss_span_0 0.4458 loss_giou_0 0.4902 loss_label_0 0.3256 class_error_0 6.4944 loss_overall 2.6898
+2021_08_04_13_48_35 [Epoch] 061 [Loss] loss_span 0.4211 loss_giou 0.4714 loss_label 0.3224 class_error 5.8920 loss_saliency 0.2078 loss_span_0 0.4388 loss_giou_0 0.4885 loss_label_0 0.3245 class_error_0 6.6426 loss_overall 2.6746
+2021_08_04_13_49_36 [Epoch] 062 [Loss] loss_span 0.4209 loss_giou 0.4683 loss_label 0.3220 class_error 6.0979 loss_saliency 0.2089 loss_span_0 0.4378 loss_giou_0 0.4859 loss_label_0 0.3214 class_error_0 6.3339 loss_overall 2.6653
+2021_08_04_13_50_38 [Epoch] 063 [Loss] loss_span 0.4216 loss_giou 0.4685 loss_label 0.3175 class_error 5.8250 loss_saliency 0.2070 loss_span_0 0.4403 loss_giou_0 0.4871 loss_label_0 0.3164 class_error_0 6.1904 loss_overall 2.6584
+2021_08_04_13_51_39 [Epoch] 064 [Loss] loss_span 0.4177 loss_giou 0.4668 loss_label 0.3157 class_error 5.9412 loss_saliency 0.2045 loss_span_0 0.4325 loss_giou_0 0.4836 loss_label_0 0.3196 class_error_0 6.5120 loss_overall 2.6404
+2021_08_04_13_52_40 [Epoch] 065 [Loss] loss_span 0.4161 loss_giou 0.4669 loss_label 0.3127 class_error 6.0815 loss_saliency 0.2030 loss_span_0 0.4323 loss_giou_0 0.4814 loss_label_0 0.3152 class_error_0 6.3298 loss_overall 2.6277
+2021_08_04_13_54_10 [Epoch] 066 [Loss] loss_span 0.4093 loss_giou 0.4645 loss_label 0.3142 class_error 6.0262 loss_saliency 0.2025 loss_span_0 0.4244 loss_giou_0 0.4768 loss_label_0 0.3144 class_error_0 6.4693 loss_overall 2.6061
+2021_08_04_13_55_11 [Epoch] 067 [Loss] loss_span 0.4081 loss_giou 0.4608 loss_label 0.3066 class_error 5.8625 loss_saliency 0.1987 loss_span_0 0.4279 loss_giou_0 0.4810 loss_label_0 0.3057 class_error_0 6.4179 loss_overall 2.5888
+2021_08_04_13_56_11 [Epoch] 068 [Loss] loss_span 0.4110 loss_giou 0.4628 loss_label 0.3056 class_error 5.9852 loss_saliency 0.1992 loss_span_0 0.4297 loss_giou_0 0.4827 loss_label_0 0.3058 class_error_0 5.9720 loss_overall 2.5968
+2021_08_04_13_57_13 [Epoch] 069 [Loss] loss_span 0.4008 loss_giou 0.4582 loss_label 0.2997 class_error 5.5085 loss_saliency 0.1991 loss_span_0 0.4238 loss_giou_0 0.4773 loss_label_0 0.3017 class_error_0 6.0937 loss_overall 2.5607
+2021_08_04_13_58_15 [Epoch] 070 [Loss] loss_span 0.4027 loss_giou 0.4587 loss_label 0.3016 class_error 5.7656 loss_saliency 0.1975 loss_span_0 0.4223 loss_giou_0 0.4753 loss_label_0 0.3032 class_error_0 5.9609 loss_overall 2.5612
+2021_08_04_13_59_44 [Epoch] 071 [Loss] loss_span 0.3948 loss_giou 0.4546 loss_label 0.2983 class_error 5.6519 loss_saliency 0.1940 loss_span_0 0.4141 loss_giou_0 0.4756 loss_label_0 0.2978 class_error_0 5.9183 loss_overall 2.5293
+2021_08_04_14_00_45 [Epoch] 072 [Loss] loss_span 0.3996 loss_giou 0.4558 loss_label 0.2962 class_error 5.8896 loss_saliency 0.1974 loss_span_0 0.4155 loss_giou_0 0.4735 loss_label_0 0.3004 class_error_0 6.0945 loss_overall 2.5382
+2021_08_04_14_01_45 [Epoch] 073 [Loss] loss_span 0.3899 loss_giou 0.4529 loss_label 0.2941 class_error 5.5762 loss_saliency 0.1930 loss_span_0 0.4113 loss_giou_0 0.4709 loss_label_0 0.2958 class_error_0 5.9347 loss_overall 2.5079
+2021_08_04_14_02_46 [Epoch] 074 [Loss] loss_span 0.3896 loss_giou 0.4518 loss_label 0.2886 class_error 5.5352 loss_saliency 0.1977 loss_span_0 0.4076 loss_giou_0 0.4675 loss_label_0 0.2927 class_error_0 5.8779 loss_overall 2.4955
+2021_08_04_14_03_47 [Epoch] 075 [Loss] loss_span 0.3874 loss_giou 0.4532 loss_label 0.2875 class_error 5.1885 loss_saliency 0.1924 loss_span_0 0.4072 loss_giou_0 0.4712 loss_label_0 0.2926 class_error_0 6.1507 loss_overall 2.4915
+2021_08_04_14_05_16 [Epoch] 076 [Loss] loss_span 0.3894 loss_giou 0.4502 loss_label 0.2842 class_error 5.2805 loss_saliency 0.1932 loss_span_0 0.4073 loss_giou_0 0.4690 loss_label_0 0.2901 class_error_0 5.8795 loss_overall 2.4833
+2021_08_04_14_06_18 [Epoch] 077 [Loss] loss_span 0.3831 loss_giou 0.4476 loss_label 0.2816 class_error 5.2822 loss_saliency 0.1920 loss_span_0 0.4032 loss_giou_0 0.4646 loss_label_0 0.2838 class_error_0 5.7896 loss_overall 2.4561
+2021_08_04_14_07_19 [Epoch] 078 [Loss] loss_span 0.3833 loss_giou 0.4483 loss_label 0.2794 class_error 5.2491 loss_saliency 0.1876 loss_span_0 0.3996 loss_giou_0 0.4650 loss_label_0 0.2869 class_error_0 5.9313 loss_overall 2.4501
+2021_08_04_14_08_20 [Epoch] 079 [Loss] loss_span 0.3811 loss_giou 0.4457 loss_label 0.2779 class_error 5.0539 loss_saliency 0.1905 loss_span_0 0.4008 loss_giou_0 0.4654 loss_label_0 0.2812 class_error_0 5.6117 loss_overall 2.4426
+2021_08_04_14_09_21 [Epoch] 080 [Loss] loss_span 0.3841 loss_giou 0.4474 loss_label 0.2726 class_error 5.0343 loss_saliency 0.1888 loss_span_0 0.4020 loss_giou_0 0.4662 loss_label_0 0.2794 class_error_0 5.4786 loss_overall 2.4404
+2021_08_04_14_10_49 [Epoch] 081 [Loss] loss_span 0.3766 loss_giou 0.4462 loss_label 0.2717 class_error 5.1204 loss_saliency 0.1866 loss_span_0 0.3936 loss_giou_0 0.4613 loss_label_0 0.2779 class_error_0 5.5739 loss_overall 2.4140
+2021_08_04_14_11_50 [Epoch] 082 [Loss] loss_span 0.3781 loss_giou 0.4463 loss_label 0.2702 class_error 4.9730 loss_saliency 0.1859 loss_span_0 0.3968 loss_giou_0 0.4641 loss_label_0 0.2732 class_error_0 5.5585 loss_overall 2.4146
+2021_08_04_14_12_50 [Epoch] 083 [Loss] loss_span 0.3785 loss_giou 0.4477 loss_label 0.2664 class_error 5.0058 loss_saliency 0.1882 loss_span_0 0.3953 loss_giou_0 0.4619 loss_label_0 0.2696 class_error_0 5.4664 loss_overall 2.4077
+2021_08_04_14_13_50 [Epoch] 084 [Loss] loss_span 0.3754 loss_giou 0.4435 loss_label 0.2619 class_error 4.5683 loss_saliency 0.1842 loss_span_0 0.3895 loss_giou_0 0.4592 loss_label_0 0.2717 class_error_0 5.3395 loss_overall 2.3854
+2021_08_04_14_14_50 [Epoch] 085 [Loss] loss_span 0.3713 loss_giou 0.4403 loss_label 0.2623 class_error 5.0401 loss_saliency 0.1829 loss_span_0 0.3867 loss_giou_0 0.4570 loss_label_0 0.2707 class_error_0 5.4510 loss_overall 2.3712
+2021_08_04_14_16_20 [Epoch] 086 [Loss] loss_span 0.3702 loss_giou 0.4399 loss_label 0.2640 class_error 4.8080 loss_saliency 0.1857 loss_span_0 0.3878 loss_giou_0 0.4560 loss_label_0 0.2712 class_error_0 5.6187 loss_overall 2.3749
+2021_08_04_14_17_21 [Epoch] 087 [Loss] loss_span 0.3726 loss_giou 0.4406 loss_label 0.2574 class_error 4.8079 loss_saliency 0.1846 loss_span_0 0.3883 loss_giou_0 0.4553 loss_label_0 0.2676 class_error_0 5.4877 loss_overall 2.3665
+2021_08_04_14_18_22 [Epoch] 088 [Loss] loss_span 0.3709 loss_giou 0.4391 loss_label 0.2587 class_error 5.1116 loss_saliency 0.1834 loss_span_0 0.3849 loss_giou_0 0.4572 loss_label_0 0.2646 class_error_0 5.4610 loss_overall 2.3588
+2021_08_04_14_19_25 [Epoch] 089 [Loss] loss_span 0.3674 loss_giou 0.4394 loss_label 0.2525 class_error 4.7836 loss_saliency 0.1853 loss_span_0 0.3806 loss_giou_0 0.4529 loss_label_0 0.2613 class_error_0 5.2184 loss_overall 2.3394
+2021_08_04_14_20_26 [Epoch] 090 [Loss] loss_span 0.3613 loss_giou 0.4349 loss_label 0.2519 class_error 4.7219 loss_saliency 0.1813 loss_span_0 0.3788 loss_giou_0 0.4523 loss_label_0 0.2597 class_error_0 5.2277 loss_overall 2.3202
+2021_08_04_14_21_55 [Epoch] 091 [Loss] loss_span 0.3648 loss_giou 0.4344 loss_label 0.2473 class_error 4.5663 loss_saliency 0.1824 loss_span_0 0.3802 loss_giou_0 0.4528 loss_label_0 0.2542 class_error_0 5.3019 loss_overall 2.3160
+2021_08_04_14_22_56 [Epoch] 092 [Loss] loss_span 0.3583 loss_giou 0.4323 loss_label 0.2471 class_error 4.5833 loss_saliency 0.1808 loss_span_0 0.3769 loss_giou_0 0.4504 loss_label_0 0.2536 class_error_0 4.9397 loss_overall 2.2994
+2021_08_04_14_23_57 [Epoch] 093 [Loss] loss_span 0.3589 loss_giou 0.4329 loss_label 0.2480 class_error 4.5618 loss_saliency 0.1801 loss_span_0 0.3758 loss_giou_0 0.4511 loss_label_0 0.2563 class_error_0 5.1501 loss_overall 2.3031
+2021_08_04_14_24_58 [Epoch] 094 [Loss] loss_span 0.3561 loss_giou 0.4321 loss_label 0.2440 class_error 4.6280 loss_saliency 0.1777 loss_span_0 0.3728 loss_giou_0 0.4485 loss_label_0 0.2530 class_error_0 4.9064 loss_overall 2.2841
+2021_08_04_14_26_02 [Epoch] 095 [Loss] loss_span 0.3564 loss_giou 0.4284 loss_label 0.2470 class_error 4.7366 loss_saliency 0.1802 loss_span_0 0.3729 loss_giou_0 0.4450 loss_label_0 0.2539 class_error_0 5.3536 loss_overall 2.2837
+2021_08_04_14_27_29 [Epoch] 096 [Loss] loss_span 0.3531 loss_giou 0.4298 loss_label 0.2438 class_error 4.4720 loss_saliency 0.1795 loss_span_0 0.3669 loss_giou_0 0.4424 loss_label_0 0.2531 class_error_0 4.9912 loss_overall 2.2687
+2021_08_04_14_28_30 [Epoch] 097 [Loss] loss_span 0.3545 loss_giou 0.4292 loss_label 0.2389 class_error 4.2460 loss_saliency 0.1794 loss_span_0 0.3696 loss_giou_0 0.4451 loss_label_0 0.2472 class_error_0 4.7524 loss_overall 2.2638
+2021_08_04_14_29_31 [Epoch] 098 [Loss] loss_span 0.3556 loss_giou 0.4323 loss_label 0.2386 class_error 4.3526 loss_saliency 0.1791 loss_span_0 0.3690 loss_giou_0 0.4455 loss_label_0 0.2473 class_error_0 4.7186 loss_overall 2.2674
+2021_08_04_14_30_32 [Epoch] 099 [Loss] loss_span 0.3482 loss_giou 0.4251 loss_label 0.2386 class_error 4.2954 loss_saliency 0.1760 loss_span_0 0.3673 loss_giou_0 0.4429 loss_label_0 0.2463 class_error_0 5.0847 loss_overall 2.2443
+2021_08_04_14_31_32 [Epoch] 100 [Loss] loss_span 0.3505 loss_giou 0.4303 loss_label 0.2325 class_error 4.2448 loss_saliency 0.1743 loss_span_0 0.3644 loss_giou_0 0.4437 loss_label_0 0.2423 class_error_0 4.8983 loss_overall 2.2379
+2021_08_04_14_33_04 [Epoch] 101 [Loss] loss_span 0.3472 loss_giou 0.4252 loss_label 0.2339 class_error 4.2455 loss_saliency 0.1739 loss_span_0 0.3675 loss_giou_0 0.4419 loss_label_0 0.2423 class_error_0 4.8791 loss_overall 2.2320
+2021_08_04_14_34_05 [Epoch] 102 [Loss] loss_span 0.3399 loss_giou 0.4203 loss_label 0.2257 class_error 4.2149 loss_saliency 0.1741 loss_span_0 0.3583 loss_giou_0 0.4377 loss_label_0 0.2368 class_error_0 4.4555 loss_overall 2.1928
+2021_08_04_14_35_06 [Epoch] 103 [Loss] loss_span 0.3417 loss_giou 0.4222 loss_label 0.2268 class_error 4.1977 loss_saliency 0.1731 loss_span_0 0.3598 loss_giou_0 0.4390 loss_label_0 0.2336 class_error_0 4.6748 loss_overall 2.1963
+2021_08_04_14_36_07 [Epoch] 104 [Loss] loss_span 0.3407 loss_giou 0.4249 loss_label 0.2237 class_error 4.1355 loss_saliency 0.1742 loss_span_0 0.3543 loss_giou_0 0.4388 loss_label_0 0.2338 class_error_0 4.5341 loss_overall 2.1902
+2021_08_04_14_37_10 [Epoch] 105 [Loss] loss_span 0.3385 loss_giou 0.4188 loss_label 0.2260 class_error 4.1884 loss_saliency 0.1718 loss_span_0 0.3562 loss_giou_0 0.4361 loss_label_0 0.2364 class_error_0 4.3986 loss_overall 2.1838
+2021_08_04_14_38_37 [Epoch] 106 [Loss] loss_span 0.3373 loss_giou 0.4203 loss_label 0.2230 class_error 3.9524 loss_saliency 0.1707 loss_span_0 0.3528 loss_giou_0 0.4353 loss_label_0 0.2355 class_error_0 4.5857 loss_overall 2.1748
+2021_08_04_14_39_38 [Epoch] 107 [Loss] loss_span 0.3342 loss_giou 0.4198 loss_label 0.2201 class_error 4.0337 loss_saliency 0.1731 loss_span_0 0.3535 loss_giou_0 0.4373 loss_label_0 0.2294 class_error_0 4.3019 loss_overall 2.1676
+2021_08_04_14_40_40 [Epoch] 108 [Loss] loss_span 0.3349 loss_giou 0.4191 loss_label 0.2205 class_error 3.9333 loss_saliency 0.1698 loss_span_0 0.3482 loss_giou_0 0.4340 loss_label_0 0.2282 class_error_0 4.2782 loss_overall 2.1546
+2021_08_04_14_41_40 [Epoch] 109 [Loss] loss_span 0.3323 loss_giou 0.4152 loss_label 0.2180 class_error 3.8188 loss_saliency 0.1700 loss_span_0 0.3513 loss_giou_0 0.4353 loss_label_0 0.2268 class_error_0 4.2483 loss_overall 2.1490
+2021_08_04_14_42_40 [Epoch] 110 [Loss] loss_span 0.3321 loss_giou 0.4166 loss_label 0.2193 class_error 4.0816 loss_saliency 0.1726 loss_span_0 0.3494 loss_giou_0 0.4338 loss_label_0 0.2296 class_error_0 4.5403 loss_overall 2.1534
+2021_08_04_14_44_06 [Epoch] 111 [Loss] loss_span 0.3297 loss_giou 0.4124 loss_label 0.2172 class_error 3.9085 loss_saliency 0.1687 loss_span_0 0.3438 loss_giou_0 0.4292 loss_label_0 0.2270 class_error_0 4.2952 loss_overall 2.1280
+2021_08_04_14_45_05 [Epoch] 112 [Loss] loss_span 0.3296 loss_giou 0.4162 loss_label 0.2161 class_error 3.8610 loss_saliency 0.1672 loss_span_0 0.3436 loss_giou_0 0.4299 loss_label_0 0.2259 class_error_0 4.3925 loss_overall 2.1285
+2021_08_04_14_46_05 [Epoch] 113 [Loss] loss_span 0.3297 loss_giou 0.4140 loss_label 0.2120 class_error 3.6386 loss_saliency 0.1698 loss_span_0 0.3428 loss_giou_0 0.4283 loss_label_0 0.2234 class_error_0 4.2161 loss_overall 2.1200
+2021_08_04_14_47_04 [Epoch] 114 [Loss] loss_span 0.3266 loss_giou 0.4133 loss_label 0.2090 class_error 3.8373 loss_saliency 0.1685 loss_span_0 0.3435 loss_giou_0 0.4303 loss_label_0 0.2172 class_error_0 3.9232 loss_overall 2.1084
+2021_08_04_14_48_04 [Epoch] 115 [Loss] loss_span 0.3281 loss_giou 0.4099 loss_label 0.2098 class_error 3.7227 loss_saliency 0.1700 loss_span_0 0.3419 loss_giou_0 0.4275 loss_label_0 0.2191 class_error_0 4.1054 loss_overall 2.1063
+2021_08_04_14_49_32 [Epoch] 116 [Loss] loss_span 0.3246 loss_giou 0.4120 loss_label 0.2050 class_error 3.5181 loss_saliency 0.1710 loss_span_0 0.3398 loss_giou_0 0.4276 loss_label_0 0.2116 class_error_0 4.0029 loss_overall 2.0917
+2021_08_04_14_50_32 [Epoch] 117 [Loss] loss_span 0.3213 loss_giou 0.4076 loss_label 0.2016 class_error 3.6359 loss_saliency 0.1649 loss_span_0 0.3360 loss_giou_0 0.4234 loss_label_0 0.2145 class_error_0 4.0928 loss_overall 2.0694
+2021_08_04_14_51_33 [Epoch] 118 [Loss] loss_span 0.3169 loss_giou 0.4056 loss_label 0.1990 class_error 3.4006 loss_saliency 0.1650 loss_span_0 0.3365 loss_giou_0 0.4250 loss_label_0 0.2085 class_error_0 3.7403 loss_overall 2.0565
+2021_08_04_14_52_32 [Epoch] 119 [Loss] loss_span 0.3194 loss_giou 0.4075 loss_label 0.1977 class_error 3.4690 loss_saliency 0.1655 loss_span_0 0.3357 loss_giou_0 0.4240 loss_label_0 0.2108 class_error_0 3.9946 loss_overall 2.0607
+2021_08_04_14_53_32 [Epoch] 120 [Loss] loss_span 0.3203 loss_giou 0.4067 loss_label 0.1982 class_error 3.4715 loss_saliency 0.1626 loss_span_0 0.3334 loss_giou_0 0.4223 loss_label_0 0.2123 class_error_0 3.9178 loss_overall 2.0559
+2021_08_04_14_54_59 [Epoch] 121 [Loss] loss_span 0.3183 loss_giou 0.4033 loss_label 0.2039 class_error 3.6320 loss_saliency 0.1638 loss_span_0 0.3301 loss_giou_0 0.4205 loss_label_0 0.2127 class_error_0 4.1402 loss_overall 2.0526
+2021_08_04_14_55_58 [Epoch] 122 [Loss] loss_span 0.3152 loss_giou 0.4043 loss_label 0.1946 class_error 3.3929 loss_saliency 0.1650 loss_span_0 0.3304 loss_giou_0 0.4212 loss_label_0 0.2051 class_error_0 3.7802 loss_overall 2.0359
+2021_08_04_14_56_58 [Epoch] 123 [Loss] loss_span 0.3145 loss_giou 0.4035 loss_label 0.1983 class_error 3.5264 loss_saliency 0.1625 loss_span_0 0.3308 loss_giou_0 0.4204 loss_label_0 0.2087 class_error_0 3.6907 loss_overall 2.0388
+2021_08_04_14_57_58 [Epoch] 124 [Loss] loss_span 0.3074 loss_giou 0.4005 loss_label 0.1914 class_error 3.4976 loss_saliency 0.1627 loss_span_0 0.3229 loss_giou_0 0.4164 loss_label_0 0.2072 class_error_0 3.9096 loss_overall 2.0085
+2021_08_04_14_58_57 [Epoch] 125 [Loss] loss_span 0.3106 loss_giou 0.4016 loss_label 0.1908 class_error 3.2744 loss_saliency 0.1642 loss_span_0 0.3256 loss_giou_0 0.4154 loss_label_0 0.2049 class_error_0 3.6893 loss_overall 2.0130
+2021_08_04_15_00_24 [Epoch] 126 [Loss] loss_span 0.3061 loss_giou 0.3994 loss_label 0.1932 class_error 3.4730 loss_saliency 0.1612 loss_span_0 0.3228 loss_giou_0 0.4159 loss_label_0 0.2029 class_error_0 3.7799 loss_overall 2.0015
+2021_08_04_15_01_24 [Epoch] 127 [Loss] loss_span 0.3080 loss_giou 0.3997 loss_label 0.1892 class_error 3.2892 loss_saliency 0.1582 loss_span_0 0.3272 loss_giou_0 0.4172 loss_label_0 0.1969 class_error_0 3.6708 loss_overall 1.9963
+2021_08_04_15_02_23 [Epoch] 128 [Loss] loss_span 0.3080 loss_giou 0.3981 loss_label 0.1890 class_error 3.1337 loss_saliency 0.1610 loss_span_0 0.3237 loss_giou_0 0.4152 loss_label_0 0.1993 class_error_0 3.4701 loss_overall 1.9944
+2021_08_04_15_03_22 [Epoch] 129 [Loss] loss_span 0.3038 loss_giou 0.3994 loss_label 0.1867 class_error 3.2974 loss_saliency 0.1581 loss_span_0 0.3187 loss_giou_0 0.4142 loss_label_0 0.2006 class_error_0 3.6422 loss_overall 1.9816
+2021_08_04_15_04_22 [Epoch] 130 [Loss] loss_span 0.3062 loss_giou 0.3989 loss_label 0.1820 class_error 3.1423 loss_saliency 0.1602 loss_span_0 0.3214 loss_giou_0 0.4183 loss_label_0 0.1958 class_error_0 3.6474 loss_overall 1.9827
+2021_08_04_15_05_47 [Epoch] 131 [Loss] loss_span 0.3047 loss_giou 0.3991 loss_label 0.1813 class_error 3.1361 loss_saliency 0.1613 loss_span_0 0.3172 loss_giou_0 0.4116 loss_label_0 0.1967 class_error_0 3.6585 loss_overall 1.9719
+2021_08_04_15_06_45 [Epoch] 132 [Loss] loss_span 0.3034 loss_giou 0.3960 loss_label 0.1797 class_error 2.9289 loss_saliency 0.1570 loss_span_0 0.3177 loss_giou_0 0.4116 loss_label_0 0.1979 class_error_0 3.4351 loss_overall 1.9634
+2021_08_04_15_07_45 [Epoch] 133 [Loss] loss_span 0.3017 loss_giou 0.3943 loss_label 0.1805 class_error 3.1074 loss_saliency 0.1583 loss_span_0 0.3166 loss_giou_0 0.4090 loss_label_0 0.1936 class_error_0 3.4266 loss_overall 1.9540
+2021_08_04_15_08_45 [Epoch] 134 [Loss] loss_span 0.3030 loss_giou 0.3977 loss_label 0.1814 class_error 3.1439 loss_saliency 0.1587 loss_span_0 0.3150 loss_giou_0 0.4117 loss_label_0 0.1943 class_error_0 3.5817 loss_overall 1.9619
+2021_08_04_15_09_44 [Epoch] 135 [Loss] loss_span 0.2985 loss_giou 0.3913 loss_label 0.1801 class_error 3.2043 loss_saliency 0.1579 loss_span_0 0.3135 loss_giou_0 0.4096 loss_label_0 0.1895 class_error_0 3.4347 loss_overall 1.9404
+2021_08_04_15_11_11 [Epoch] 136 [Loss] loss_span 0.2979 loss_giou 0.3922 loss_label 0.1785 class_error 3.0857 loss_saliency 0.1583 loss_span_0 0.3095 loss_giou_0 0.4057 loss_label_0 0.1914 class_error_0 3.6656 loss_overall 1.9335
+2021_08_04_15_12_10 [Epoch] 137 [Loss] loss_span 0.2949 loss_giou 0.3933 loss_label 0.1798 class_error 2.9996 loss_saliency 0.1574 loss_span_0 0.3117 loss_giou_0 0.4099 loss_label_0 0.1904 class_error_0 3.4402 loss_overall 1.9374
+2021_08_04_15_13_09 [Epoch] 138 [Loss] loss_span 0.2912 loss_giou 0.3885 loss_label 0.1765 class_error 3.0080 loss_saliency 0.1564 loss_span_0 0.3071 loss_giou_0 0.4056 loss_label_0 0.1888 class_error_0 3.4500 loss_overall 1.9141
+2021_08_04_15_14_08 [Epoch] 139 [Loss] loss_span 0.2887 loss_giou 0.3840 loss_label 0.1755 class_error 3.1152 loss_saliency 0.1582 loss_span_0 0.3036 loss_giou_0 0.4008 loss_label_0 0.1887 class_error_0 3.2850 loss_overall 1.8995
+2021_08_04_15_15_08 [Epoch] 140 [Loss] loss_span 0.2917 loss_giou 0.3904 loss_label 0.1673 class_error 2.8572 loss_saliency 0.1552 loss_span_0 0.3069 loss_giou_0 0.4090 loss_label_0 0.1784 class_error_0 3.0141 loss_overall 1.8990
+2021_08_04_15_16_37 [Epoch] 141 [Loss] loss_span 0.2918 loss_giou 0.3902 loss_label 0.1703 class_error 2.8095 loss_saliency 0.1520 loss_span_0 0.3079 loss_giou_0 0.4089 loss_label_0 0.1834 class_error_0 3.4561 loss_overall 1.9044
+2021_08_04_15_17_38 [Epoch] 142 [Loss] loss_span 0.2913 loss_giou 0.3894 loss_label 0.1724 class_error 2.8330 loss_saliency 0.1518 loss_span_0 0.3067 loss_giou_0 0.4060 loss_label_0 0.1859 class_error_0 3.2875 loss_overall 1.9036
+2021_08_04_15_18_39 [Epoch] 143 [Loss] loss_span 0.2888 loss_giou 0.3850 loss_label 0.1710 class_error 2.8611 loss_saliency 0.1503 loss_span_0 0.3045 loss_giou_0 0.4014 loss_label_0 0.1813 class_error_0 3.1913 loss_overall 1.8823
+2021_08_04_15_19_40 [Epoch] 144 [Loss] loss_span 0.2899 loss_giou 0.3863 loss_label 0.1676 class_error 2.8735 loss_saliency 0.1524 loss_span_0 0.3057 loss_giou_0 0.4037 loss_label_0 0.1793 class_error_0 3.1955 loss_overall 1.8848
+2021_08_04_15_20_41 [Epoch] 145 [Loss] loss_span 0.2859 loss_giou 0.3850 loss_label 0.1679 class_error 2.7106 loss_saliency 0.1520 loss_span_0 0.3018 loss_giou_0 0.4005 loss_label_0 0.1797 class_error_0 3.1690 loss_overall 1.8728
+2021_08_04_15_22_08 [Epoch] 146 [Loss] loss_span 0.2863 loss_giou 0.3833 loss_label 0.1676 class_error 2.8849 loss_saliency 0.1556 loss_span_0 0.3006 loss_giou_0 0.4001 loss_label_0 0.1826 class_error_0 3.3652 loss_overall 1.8761
+2021_08_04_15_23_08 [Epoch] 147 [Loss] loss_span 0.2848 loss_giou 0.3848 loss_label 0.1639 class_error 2.7943 loss_saliency 0.1520 loss_span_0 0.3000 loss_giou_0 0.3996 loss_label_0 0.1745 class_error_0 3.1314 loss_overall 1.8597
+2021_08_04_15_24_09 [Epoch] 148 [Loss] loss_span 0.2830 loss_giou 0.3831 loss_label 0.1680 class_error 2.8535 loss_saliency 0.1538 loss_span_0 0.2982 loss_giou_0 0.3977 loss_label_0 0.1787 class_error_0 3.0520 loss_overall 1.8625
+2021_08_04_15_25_09 [Epoch] 149 [Loss] loss_span 0.2811 loss_giou 0.3792 loss_label 0.1639 class_error 2.9072 loss_saliency 0.1512 loss_span_0 0.2963 loss_giou_0 0.3958 loss_label_0 0.1755 class_error_0 3.2742 loss_overall 1.8430
+2021_08_04_15_26_09 [Epoch] 150 [Loss] loss_span 0.2806 loss_giou 0.3779 loss_label 0.1594 class_error 2.7003 loss_saliency 0.1513 loss_span_0 0.2956 loss_giou_0 0.3953 loss_label_0 0.1728 class_error_0 3.1146 loss_overall 1.8330
+2021_08_04_15_27_39 [Epoch] 151 [Loss] loss_span 0.2807 loss_giou 0.3786 loss_label 0.1600 class_error 2.7823 loss_saliency 0.1503 loss_span_0 0.2958 loss_giou_0 0.3970 loss_label_0 0.1716 class_error_0 3.0476 loss_overall 1.8341
+2021_08_04_15_28_41 [Epoch] 152 [Loss] loss_span 0.2789 loss_giou 0.3780 loss_label 0.1588 class_error 2.5760 loss_saliency 0.1488 loss_span_0 0.2956 loss_giou_0 0.3953 loss_label_0 0.1709 class_error_0 2.8832 loss_overall 1.8263
+2021_08_04_15_29_42 [Epoch] 153 [Loss] loss_span 0.2737 loss_giou 0.3772 loss_label 0.1584 class_error 2.5702 loss_saliency 0.1505 loss_span_0 0.2923 loss_giou_0 0.3955 loss_label_0 0.1707 class_error_0 2.8542 loss_overall 1.8182
+2021_08_04_15_30_43 [Epoch] 154 [Loss] loss_span 0.2757 loss_giou 0.3765 loss_label 0.1571 class_error 2.6065 loss_saliency 0.1497 loss_span_0 0.2934 loss_giou_0 0.3948 loss_label_0 0.1695 class_error_0 3.1185 loss_overall 1.8168
+2021_08_04_15_31_45 [Epoch] 155 [Loss] loss_span 0.2750 loss_giou 0.3761 loss_label 0.1526 class_error 2.6418 loss_saliency 0.1508 loss_span_0 0.2890 loss_giou_0 0.3933 loss_label_0 0.1675 class_error_0 2.8686 loss_overall 1.8044
+2021_08_04_15_33_14 [Epoch] 156 [Loss] loss_span 0.2739 loss_giou 0.3754 loss_label 0.1519 class_error 2.4531 loss_saliency 0.1473 loss_span_0 0.2887 loss_giou_0 0.3934 loss_label_0 0.1644 class_error_0 2.7928 loss_overall 1.7950
+2021_08_04_15_34_16 [Epoch] 157 [Loss] loss_span 0.2744 loss_giou 0.3753 loss_label 0.1513 class_error 2.3596 loss_saliency 0.1512 loss_span_0 0.2907 loss_giou_0 0.3908 loss_label_0 0.1650 class_error_0 2.8359 loss_overall 1.7987
+2021_08_04_15_35_17 [Epoch] 158 [Loss] loss_span 0.2696 loss_giou 0.3727 loss_label 0.1477 class_error 2.3840 loss_saliency 0.1478 loss_span_0 0.2870 loss_giou_0 0.3908 loss_label_0 0.1605 class_error_0 2.5061 loss_overall 1.7761
+2021_08_04_15_36_17 [Epoch] 159 [Loss] loss_span 0.2662 loss_giou 0.3712 loss_label 0.1516 class_error 2.4106 loss_saliency 0.1447 loss_span_0 0.2860 loss_giou_0 0.3905 loss_label_0 0.1654 class_error_0 3.0264 loss_overall 1.7756
+2021_08_04_15_37_17 [Epoch] 160 [Loss] loss_span 0.2664 loss_giou 0.3718 loss_label 0.1473 class_error 2.3810 loss_saliency 0.1451 loss_span_0 0.2833 loss_giou_0 0.3868 loss_label_0 0.1598 class_error_0 2.6970 loss_overall 1.7605
+2021_08_04_15_38_41 [Epoch] 161 [Loss] loss_span 0.2688 loss_giou 0.3715 loss_label 0.1496 class_error 2.5530 loss_saliency 0.1439 loss_span_0 0.2842 loss_giou_0 0.3874 loss_label_0 0.1631 class_error_0 2.8635 loss_overall 1.7685
+2021_08_04_15_39_39 [Epoch] 162 [Loss] loss_span 0.2675 loss_giou 0.3712 loss_label 0.1438 class_error 2.2517 loss_saliency 0.1449 loss_span_0 0.2876 loss_giou_0 0.3899 loss_label_0 0.1577 class_error_0 2.7662 loss_overall 1.7625
+2021_08_04_15_40_38 [Epoch] 163 [Loss] loss_span 0.2660 loss_giou 0.3724 loss_label 0.1476 class_error 2.4018 loss_saliency 0.1463 loss_span_0 0.2821 loss_giou_0 0.3878 loss_label_0 0.1593 class_error_0 2.6189 loss_overall 1.7615
+2021_08_04_15_41_38 [Epoch] 164 [Loss] loss_span 0.2688 loss_giou 0.3718 loss_label 0.1424 class_error 2.2719 loss_saliency 0.1486 loss_span_0 0.2860 loss_giou_0 0.3911 loss_label_0 0.1572 class_error_0 2.7330 loss_overall 1.7659
+2021_08_04_15_42_38 [Epoch] 165 [Loss] loss_span 0.2634 loss_giou 0.3700 loss_label 0.1412 class_error 2.3051 loss_saliency 0.1467 loss_span_0 0.2812 loss_giou_0 0.3880 loss_label_0 0.1542 class_error_0 2.5147 loss_overall 1.7448
+2021_08_04_15_44_01 [Epoch] 166 [Loss] loss_span 0.2606 loss_giou 0.3658 loss_label 0.1437 class_error 2.4155 loss_saliency 0.1465 loss_span_0 0.2801 loss_giou_0 0.3855 loss_label_0 0.1571 class_error_0 2.6513 loss_overall 1.7392
+2021_08_04_15_45_01 [Epoch] 167 [Loss] loss_span 0.2611 loss_giou 0.3637 loss_label 0.1434 class_error 2.3979 loss_saliency 0.1429 loss_span_0 0.2788 loss_giou_0 0.3807 loss_label_0 0.1546 class_error_0 2.7567 loss_overall 1.7252
+2021_08_04_15_46_01 [Epoch] 168 [Loss] loss_span 0.2609 loss_giou 0.3650 loss_label 0.1399 class_error 2.1873 loss_saliency 0.1438 loss_span_0 0.2759 loss_giou_0 0.3819 loss_label_0 0.1530 class_error_0 2.5145 loss_overall 1.7205
+2021_08_04_15_47_00 [Epoch] 169 [Loss] loss_span 0.2606 loss_giou 0.3657 loss_label 0.1389 class_error 2.2921 loss_saliency 0.1444 loss_span_0 0.2750 loss_giou_0 0.3819 loss_label_0 0.1548 class_error_0 2.8847 loss_overall 1.7213
+2021_08_04_15_48_00 [Epoch] 170 [Loss] loss_span 0.2598 loss_giou 0.3652 loss_label 0.1345 class_error 2.2562 loss_saliency 0.1402 loss_span_0 0.2746 loss_giou_0 0.3824 loss_label_0 0.1505 class_error_0 2.6511 loss_overall 1.7073
+2021_08_04_15_49_26 [Epoch] 171 [Loss] loss_span 0.2555 loss_giou 0.3606 loss_label 0.1372 class_error 2.1929 loss_saliency 0.1400 loss_span_0 0.2734 loss_giou_0 0.3782 loss_label_0 0.1488 class_error_0 2.7173 loss_overall 1.6937
+2021_08_04_15_50_25 [Epoch] 172 [Loss] loss_span 0.2582 loss_giou 0.3630 loss_label 0.1384 class_error 2.2284 loss_saliency 0.1419 loss_span_0 0.2737 loss_giou_0 0.3791 loss_label_0 0.1505 class_error_0 2.6517 loss_overall 1.7048
+2021_08_04_15_51_24 [Epoch] 173 [Loss] loss_span 0.2560 loss_giou 0.3611 loss_label 0.1356 class_error 2.2456 loss_saliency 0.1453 loss_span_0 0.2706 loss_giou_0 0.3772 loss_label_0 0.1512 class_error_0 2.6532 loss_overall 1.6970
+2021_08_04_15_52_25 [Epoch] 174 [Loss] loss_span 0.2559 loss_giou 0.3609 loss_label 0.1359 class_error 2.1697 loss_saliency 0.1411 loss_span_0 0.2729 loss_giou_0 0.3787 loss_label_0 0.1469 class_error_0 2.5194 loss_overall 1.6922
+2021_08_04_15_53_26 [Epoch] 175 [Loss] loss_span 0.2525 loss_giou 0.3594 loss_label 0.1386 class_error 2.2496 loss_saliency 0.1391 loss_span_0 0.2696 loss_giou_0 0.3768 loss_label_0 0.1550 class_error_0 2.6409 loss_overall 1.6911
+2021_08_04_15_54_51 [Epoch] 176 [Loss] loss_span 0.2535 loss_giou 0.3596 loss_label 0.1364 class_error 2.1629 loss_saliency 0.1403 loss_span_0 0.2705 loss_giou_0 0.3761 loss_label_0 0.1501 class_error_0 2.7246 loss_overall 1.6865
+2021_08_04_15_55_49 [Epoch] 177 [Loss] loss_span 0.2514 loss_giou 0.3579 loss_label 0.1326 class_error 2.1063 loss_saliency 0.1383 loss_span_0 0.2667 loss_giou_0 0.3764 loss_label_0 0.1433 class_error_0 2.5593 loss_overall 1.6667
+2021_08_04_15_56_48 [Epoch] 178 [Loss] loss_span 0.2497 loss_giou 0.3568 loss_label 0.1334 class_error 2.2729 loss_saliency 0.1385 loss_span_0 0.2642 loss_giou_0 0.3715 loss_label_0 0.1454 class_error_0 2.4777 loss_overall 1.6596
+2021_08_04_15_57_46 [Epoch] 179 [Loss] loss_span 0.2501 loss_giou 0.3578 loss_label 0.1323 class_error 2.1815 loss_saliency 0.1335 loss_span_0 0.2665 loss_giou_0 0.3753 loss_label_0 0.1464 class_error_0 2.6389 loss_overall 1.6619
+2021_08_04_15_58_46 [Epoch] 180 [Loss] loss_span 0.2475 loss_giou 0.3558 loss_label 0.1287 class_error 2.1187 loss_saliency 0.1384 loss_span_0 0.2646 loss_giou_0 0.3722 loss_label_0 0.1425 class_error_0 2.4137 loss_overall 1.6497
+2021_08_04_16_00_09 [Epoch] 181 [Loss] loss_span 0.2501 loss_giou 0.3564 loss_label 0.1284 class_error 2.0690 loss_saliency 0.1423 loss_span_0 0.2671 loss_giou_0 0.3764 loss_label_0 0.1422 class_error_0 2.4760 loss_overall 1.6629
+2021_08_04_16_01_07 [Epoch] 182 [Loss] loss_span 0.2506 loss_giou 0.3559 loss_label 0.1246 class_error 2.0133 loss_saliency 0.1367 loss_span_0 0.2653 loss_giou_0 0.3738 loss_label_0 0.1417 class_error_0 2.5186 loss_overall 1.6486
+2021_08_04_16_02_08 [Epoch] 183 [Loss] loss_span 0.2435 loss_giou 0.3531 loss_label 0.1296 class_error 2.0768 loss_saliency 0.1342 loss_span_0 0.2591 loss_giou_0 0.3703 loss_label_0 0.1409 class_error_0 2.4239 loss_overall 1.6307
+2021_08_04_16_03_06 [Epoch] 184 [Loss] loss_span 0.2470 loss_giou 0.3551 loss_label 0.1282 class_error 2.1464 loss_saliency 0.1386 loss_span_0 0.2641 loss_giou_0 0.3735 loss_label_0 0.1410 class_error_0 2.4869 loss_overall 1.6476
+2021_08_04_16_04_05 [Epoch] 185 [Loss] loss_span 0.2466 loss_giou 0.3528 loss_label 0.1251 class_error 2.0378 loss_saliency 0.1356 loss_span_0 0.2629 loss_giou_0 0.3728 loss_label_0 0.1375 class_error_0 2.3513 loss_overall 1.6333
+2021_08_04_16_05_29 [Epoch] 186 [Loss] loss_span 0.2416 loss_giou 0.3495 loss_label 0.1217 class_error 1.9513 loss_saliency 0.1360 loss_span_0 0.2583 loss_giou_0 0.3696 loss_label_0 0.1372 class_error_0 2.3763 loss_overall 1.6139
+2021_08_04_16_06_29 [Epoch] 187 [Loss] loss_span 0.2411 loss_giou 0.3523 loss_label 0.1226 class_error 2.1811 loss_saliency 0.1379 loss_span_0 0.2572 loss_giou_0 0.3693 loss_label_0 0.1364 class_error_0 2.3852 loss_overall 1.6167
+2021_08_04_16_07_29 [Epoch] 188 [Loss] loss_span 0.2419 loss_giou 0.3516 loss_label 0.1280 class_error 1.9720 loss_saliency 0.1370 loss_span_0 0.2597 loss_giou_0 0.3687 loss_label_0 0.1414 class_error_0 2.3192 loss_overall 1.6283
+2021_08_04_16_08_28 [Epoch] 189 [Loss] loss_span 0.2417 loss_giou 0.3496 loss_label 0.1240 class_error 2.0014 loss_saliency 0.1338 loss_span_0 0.2560 loss_giou_0 0.3657 loss_label_0 0.1363 class_error_0 2.3553 loss_overall 1.6072
+2021_08_04_16_09_27 [Epoch] 190 [Loss] loss_span 0.2386 loss_giou 0.3476 loss_label 0.1223 class_error 2.1402 loss_saliency 0.1341 loss_span_0 0.2528 loss_giou_0 0.3650 loss_label_0 0.1351 class_error_0 2.3068 loss_overall 1.5955
+2021_08_04_16_10_52 [Epoch] 191 [Loss] loss_span 0.2395 loss_giou 0.3490 loss_label 0.1203 class_error 1.9214 loss_saliency 0.1356 loss_span_0 0.2567 loss_giou_0 0.3671 loss_label_0 0.1314 class_error_0 2.2649 loss_overall 1.5996
+2021_08_04_16_11_51 [Epoch] 192 [Loss] loss_span 0.2368 loss_giou 0.3471 loss_label 0.1209 class_error 1.9345 loss_saliency 0.1344 loss_span_0 0.2538 loss_giou_0 0.3637 loss_label_0 0.1353 class_error_0 2.3957 loss_overall 1.5920
+2021_08_04_16_12_51 [Epoch] 193 [Loss] loss_span 0.2368 loss_giou 0.3458 loss_label 0.1153 class_error 1.7987 loss_saliency 0.1315 loss_span_0 0.2554 loss_giou_0 0.3643 loss_label_0 0.1292 class_error_0 2.2018 loss_overall 1.5782
+2021_08_04_16_13_51 [Epoch] 194 [Loss] loss_span 0.2371 loss_giou 0.3442 loss_label 0.1164 class_error 2.0287 loss_saliency 0.1295 loss_span_0 0.2521 loss_giou_0 0.3606 loss_label_0 0.1326 class_error_0 2.2596 loss_overall 1.5724
+2021_08_04_16_14_49 [Epoch] 195 [Loss] loss_span 0.2348 loss_giou 0.3436 loss_label 0.1158 class_error 1.7287 loss_saliency 0.1301 loss_span_0 0.2504 loss_giou_0 0.3614 loss_label_0 0.1289 class_error_0 2.1681 loss_overall 1.5650
+2021_08_04_16_16_12 [Epoch] 196 [Loss] loss_span 0.2329 loss_giou 0.3434 loss_label 0.1161 class_error 1.9459 loss_saliency 0.1332 loss_span_0 0.2512 loss_giou_0 0.3626 loss_label_0 0.1299 class_error_0 2.1898 loss_overall 1.5693
+2021_08_04_16_17_11 [Epoch] 197 [Loss] loss_span 0.2323 loss_giou 0.3426 loss_label 0.1182 class_error 1.8828 loss_saliency 0.1350 loss_span_0 0.2498 loss_giou_0 0.3624 loss_label_0 0.1320 class_error_0 2.3939 loss_overall 1.5722
+2021_08_04_16_18_10 [Epoch] 198 [Loss] loss_span 0.2352 loss_giou 0.3439 loss_label 0.1172 class_error 1.8767 loss_saliency 0.1297 loss_span_0 0.2504 loss_giou_0 0.3604 loss_label_0 0.1290 class_error_0 2.2872 loss_overall 1.5657
+2021_08_04_16_19_08 [Epoch] 199 [Loss] loss_span 0.2321 loss_giou 0.3425 loss_label 0.1177 class_error 1.7712 loss_saliency 0.1314 loss_span_0 0.2482 loss_giou_0 0.3588 loss_label_0 0.1269 class_error_0 2.0950 loss_overall 1.5576
+2021_08_04_16_20_07 [Epoch] 200 [Loss] loss_span 0.2293 loss_giou 0.3389 loss_label 0.1141 class_error 1.9119 loss_saliency 0.1327 loss_span_0 0.2455 loss_giou_0 0.3562 loss_label_0 0.1293 class_error_0 2.2245 loss_overall 1.5462
diff --git a/run_on_video/run.py b/run_on_video/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce8da1cdde2ac1dab8309aef3c646dcbd8c36659
--- /dev/null
+++ b/run_on_video/run.py
@@ -0,0 +1,170 @@
+import torch
+
+from data_utils import ClipFeatureExtractor
+from model_utils import build_inference_model
+from utils.tensor_utils import pad_sequences_1d
+from moment_detr.span_utils import span_cxw_to_xx
+from utils.basic_utils import l2_normalize_np_array
+import torch.nn.functional as F
+import numpy as np
+import os
+from PIL import Image
+
+from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
+from moviepy.video.io.VideoFileClip import VideoFileClip
+
+
+class MomentDETRPredictor:
+ def __init__(self, ckpt_path, clip_model_name_or_path="ViT-B/32", device="cuda"):
+ self.clip_len = 2 # seconds
+ self.device = device
+ print("Loading feature extractors...")
+ self.feature_extractor = ClipFeatureExtractor(
+ framerate=1/self.clip_len, size=224, centercrop=True,
+ model_name_or_path=clip_model_name_or_path, device=device
+ )
+ print("Loading trained Moment-DETR model...")
+ self.model = build_inference_model(ckpt_path).to(self.device)
+
+ @torch.no_grad()
+ def localize_moment(self, video_path, query_list):
+ """
+ Args:
+ video_path: str, path to the video file
+ query_list: List[str], each str is a query for this video
+ """
+ # construct model inputs
+ n_query = len(query_list)
+ video_feats, video_frames = self.feature_extractor.encode_video(video_path)
+ video_feats = F.normalize(video_feats, dim=-1, eps=1e-5)
+ n_frames = len(video_feats)
+ # add tef
+ tef_st = torch.arange(0, n_frames, 1.0) / n_frames
+ tef_ed = tef_st + 1.0 / n_frames
+ tef = torch.stack([tef_st, tef_ed], dim=1).to(self.device) # (n_frames, 2)
+ video_feats = torch.cat([video_feats, tef], dim=1)
+
+ assert n_frames <= 75, "The positional embedding of this pretrained MomentDETR only support video up " \
+ "to 150 secs (i.e., 75 2-sec clips) in length"
+ video_feats = video_feats.unsqueeze(0).repeat(n_query, 1, 1) # (#text, T, d)
+ video_mask = torch.ones(n_query, n_frames).to(self.device)
+ query_feats = self.feature_extractor.encode_text(query_list) # #text * (L, d)
+ query_feats, query_mask = pad_sequences_1d(
+ query_feats, dtype=torch.float32, device=self.device, fixed_length=None)
+ query_feats = F.normalize(query_feats, dim=-1, eps=1e-5)
+ model_inputs = dict(
+ src_vid=video_feats,
+ src_vid_mask=video_mask,
+ src_txt=query_feats,
+ src_txt_mask=query_mask
+ )
+
+ # decode outputs
+ outputs = self.model(**model_inputs)
+ # #moment_queries refers to the positional embeddings in MomentDETR's decoder, not the input text query
+ prob = F.softmax(outputs["pred_logits"], -1) # (batch_size, #moment_queries=10, #classes=2)
+ scores = prob[..., 0] # * (batch_size, #moment_queries) foreground label is 0, we directly take it
+ pred_spans = outputs["pred_spans"] # (bsz, #moment_queries, 2)
+ print(pred_spans)
+ _saliency_scores = outputs["saliency_scores"].half() # (bsz, L)
+ saliency_scores = []
+ valid_vid_lengths = model_inputs["src_vid_mask"].sum(1).cpu().tolist()
+ for j in range(len(valid_vid_lengths)):
+ _score = _saliency_scores[j, :int(valid_vid_lengths[j])].tolist()
+ _score = [round(e, 4) for e in _score]
+ saliency_scores.append(_score)
+
+ # compose predictions
+ predictions = []
+ video_duration = n_frames * self.clip_len
+ for idx, (spans, score) in enumerate(zip(pred_spans.cpu(), scores.cpu())):
+ spans = span_cxw_to_xx(spans) * video_duration
+ # # (#queries, 3), [st(float), ed(float), score(float)]
+ cur_ranked_preds = torch.cat([spans, score[:, None]], dim=1).tolist()
+ cur_ranked_preds = sorted(cur_ranked_preds, key=lambda x: x[2], reverse=True)
+ cur_ranked_preds = [[float(f"{e:.4f}") for e in row] for row in cur_ranked_preds]
+ cur_query_pred = dict(
+ query=query_list[idx], # str
+ vid=video_path,
+ pred_relevant_windows=cur_ranked_preds, # List([st(float), ed(float), score(float)])
+ pred_saliency_scores=saliency_scores[idx] # List(float), len==n_frames, scores for each frame
+ )
+ predictions.append(cur_query_pred)
+
+ return predictions, video_frames
+
+
+def run_example():
+ # load example data
+ from utils.basic_utils import load_jsonl
+ video_dir = "run_on_video/example/testing_videos/dogs"
+
+ #video_path = "run_on_video/example/testing_videos/"
+ query_path = "run_on_video/example/queries_highlight.jsonl"
+ queries = load_jsonl(query_path)
+ query_text_list = [e["query"] for e in queries]
+ ckpt_path = "run_on_video/moment_detr_ckpt/model_best.ckpt"
+
+ # run predictions
+ print("Build models...")
+ clip_model_name_or_path = "ViT-B/32"
+ # clip_model_name_or_path = "tmp/ViT-B-32.pt"
+ moment_detr_predictor = MomentDETRPredictor(
+ ckpt_path=ckpt_path,
+ clip_model_name_or_path=clip_model_name_or_path,
+ device="cuda"
+ )
+ print("Run prediction...")
+ video_paths = [os.path.join(video_dir, e) for e in os.listdir(video_dir)]
+ #video_paths = ["run_on_video/example/testing_videos/celebration_18s.mov"]
+
+ for video_path in video_paths:
+ output_dir = os.path.join("run_on_video/example/output/dog/empty_str", os.path.basename(video_path))
+ predictions, video_frames = moment_detr_predictor.localize_moment(
+ video_path=video_path, query_list=query_text_list)
+ #check output directory exists
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+
+ # print data
+ for idx, query_data in enumerate(queries):
+ print("-"*30 + f"idx{idx}")
+ print(f">> query: {query_data['query']}")
+ print(f">> video_path: {video_path}")
+ #print(f">> GT moments: {query_data['relevant_windows']}")
+ print(f">> Predicted moments ([start_in_seconds, end_in_seconds, score]): "
+ f"{predictions[idx]['pred_relevant_windows']}")
+ #print(f">> GT saliency scores (only localized 2-sec clips): {query_data['saliency_scores']}")
+ print(f">> Predicted saliency scores (for all 2-sec clip): "
+ f"{predictions[idx]['pred_saliency_scores']}")
+ #output the retrievved moments
+ #sort the moment by the third element in the list
+ predictions[idx]['pred_relevant_windows'] = sorted(predictions[idx]['pred_relevant_windows'], key=lambda x: x[2], reverse=True)
+ for i, (start_time, end_time, score) in enumerate(predictions[idx]['pred_relevant_windows']):
+ print(start_time, end_time, score)
+ ffmpeg_extract_subclip(video_path, start_time, end_time, targetname=os.path.join(output_dir, f'moment_{i}.mp4'))
+ #store the sorted pred_relevant_windows scores and time
+ with open(os.path.join(output_dir, 'moment_scores.txt'), 'w') as f:
+ for i, (start_time, end_time, score) in enumerate(predictions[idx]['pred_relevant_windows']):
+ f.write(str(i)+'. '+str(start_time)+' '+str(end_time)+' '+str(score) + '\n')
+ #To-dos: save the video frames sorted by pred_saliency_scores
+ sorted_frames = [frame for _, frame in sorted(zip(predictions[idx]['pred_saliency_scores'], video_frames), reverse=True)]
+ #save the sorted scores and also the original index
+ sorted_scores = sorted(predictions[idx]['pred_saliency_scores'], reverse=True)
+ print(sorted_scores)
+ #save frames to output directory
+ for i, frame in enumerate(sorted_frames):
+ #transfer frame from tensor to PIL image
+ frame = frame.permute(1, 2, 0).cpu().numpy()
+ frame = frame.astype(np.uint8)
+ frame = Image.fromarray(frame)
+ frame.save(os.path.join(output_dir, str(i) + '.jpg'))
+ #save scores to output directory
+ with open(os.path.join(output_dir, 'scores.txt'), 'w') as f:
+ for i, score in enumerate(sorted_scores):
+ f.write(str(i)+'. '+str(score) + '\n')
+
+
+
+if __name__ == "__main__":
+ run_example()
diff --git a/standalone_eval/README.md b/standalone_eval/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d1fe1c3db9d7a66a744d3b1cd343aff7cd6181f1
--- /dev/null
+++ b/standalone_eval/README.md
@@ -0,0 +1,54 @@
+QVHighlights Evaluation and Codalab Submission
+==================
+
+### Task Definition
+Given a video and a natural language query, our task requires a system to retrieve the most relevant moments in the video, and detect the highlightness of the clips in the video.
+
+### Evaluation
+At project root, run
+```
+bash standalone_eval/eval_sample.sh
+```
+This command will use [eval.py](eval.py) to evaluate the provided prediction file [sample_val_preds.jsonl](sample_val_preds.jsonl),
+the output will be written into `sample_val_preds_metrics.json`.
+The content in this generated file should be similar if not the same as [sample_val_preds_metrics_raw.json](sample_val_preds_metrics_raw.json) file.
+
+### Format
+
+The prediction file [sample_val_preds.jsonl](sample_val_preds.jsonl) is in [JSON Line](https://jsonlines.org/) format, each row of the files can be loaded as a single `dict` in Python. Below is an example of a single line in the prediction file:
+```
+{
+ "qid": 2579,
+ "query": "A girl and her mother cooked while talking with each other on facetime.",
+ "vid": "NUsG9BgSes0_210.0_360.0",
+ "pred_relevant_windows": [
+ [0, 70, 0.9986],
+ [78, 146, 0.4138],
+ [0, 146, 0.0444],
+ ...
+ ],
+ "pred_saliency_scores": [-0.2452, -0.3779, -0.4746, ...]
+}
+
+```
+
+
+
+| entry | description |
+| --- | ----|
+| `qid` | `int`, unique query id |
+| `query` | `str`, natural language query, not used by the evaluation script |
+| `vid` | `str`, unique video id |
+| `pred_relevant_windows` | `list(list)`, moment retrieval predictions. Each sublist contains 3 elements, `[start (seconds), end (seconds), score]`|
+| `pred_saliency_scores` | `list(float)`, highlight prediction scores. The higher the better. This list should contain a score for each of the 2-second clip in the videos, and is ordered. |
+
+
+### Codalab Submission
+To test your model's performance on `test` split,
+please submit both `val` and `test` predictions to our
+[Codalab evaluation server](https://codalab.lisn.upsaclay.fr/competitions/6937).
+The submission file should be a single `.zip ` file (no enclosing folder)
+that contains the two prediction files
+`hl_val_submission.jsonl` and `hl_test_submission.jsonl`, each of the `*submission.jsonl` file
+should be formatted as instructed above.
+
diff --git a/standalone_eval/eval.py b/standalone_eval/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..786f8262cb36817f20225c1eaba244386028ce9f
--- /dev/null
+++ b/standalone_eval/eval.py
@@ -0,0 +1,344 @@
+import numpy as np
+from collections import OrderedDict, defaultdict
+import json
+import time
+import copy
+import multiprocessing as mp
+from standalone_eval.utils import compute_average_precision_detection, \
+ compute_temporal_iou_batch_cross, compute_temporal_iou_batch_paired, load_jsonl, get_ap
+
+
+def compute_average_precision_detection_wrapper(
+ input_triple, tiou_thresholds=np.linspace(0.5, 0.95, 10)):
+ qid, ground_truth, prediction = input_triple
+ scores = compute_average_precision_detection(
+ ground_truth, prediction, tiou_thresholds=tiou_thresholds)
+ return qid, scores
+
+
+def compute_mr_ap(submission, ground_truth, iou_thds=np.linspace(0.5, 0.95, 10),
+ max_gt_windows=None, max_pred_windows=10, num_workers=8, chunksize=50):
+ iou_thds = [float(f"{e:.2f}") for e in iou_thds]
+ pred_qid2data = defaultdict(list)
+ for d in submission:
+ pred_windows = d["pred_relevant_windows"][:max_pred_windows] \
+ if max_pred_windows is not None else d["pred_relevant_windows"]
+ qid = d["qid"]
+ for w in pred_windows:
+ pred_qid2data[qid].append({
+ "video-id": d["qid"], # in order to use the API
+ "t-start": w[0],
+ "t-end": w[1],
+ "score": w[2]
+ })
+
+ gt_qid2data = defaultdict(list)
+ for d in ground_truth:
+ gt_windows = d["relevant_windows"][:max_gt_windows] \
+ if max_gt_windows is not None else d["relevant_windows"]
+ qid = d["qid"]
+ for w in gt_windows:
+ gt_qid2data[qid].append({
+ "video-id": d["qid"],
+ "t-start": w[0],
+ "t-end": w[1]
+ })
+ qid2ap_list = {}
+ # start_time = time.time()
+ data_triples = [[qid, gt_qid2data[qid], pred_qid2data[qid]] for qid in pred_qid2data]
+ from functools import partial
+ compute_ap_from_triple = partial(
+ compute_average_precision_detection_wrapper, tiou_thresholds=iou_thds)
+
+ if num_workers > 1:
+ with mp.Pool(num_workers) as pool:
+ for qid, scores in pool.imap_unordered(compute_ap_from_triple, data_triples, chunksize=chunksize):
+ qid2ap_list[qid] = scores
+ else:
+ for data_triple in data_triples:
+ qid, scores = compute_ap_from_triple(data_triple)
+ qid2ap_list[qid] = scores
+
+ # print(f"compute_average_precision_detection {time.time() - start_time:.2f} seconds.")
+ ap_array = np.array(list(qid2ap_list.values())) # (#queries, #thd)
+ ap_thds = ap_array.mean(0) # mAP at different IoU thresholds.
+ iou_thd2ap = dict(zip([str(e) for e in iou_thds], ap_thds))
+ iou_thd2ap["average"] = np.mean(ap_thds)
+ # formatting
+ iou_thd2ap = {k: float(f"{100 * v:.2f}") for k, v in iou_thd2ap.items()}
+ return iou_thd2ap
+
+
+def compute_mr_r1(submission, ground_truth, iou_thds=np.linspace(0.5, 0.95, 10)):
+ """If a predicted segment has IoU >= iou_thd with one of the 1st GT segment, we define it positive"""
+ iou_thds = [float(f"{e:.2f}") for e in iou_thds]
+ pred_qid2window = {d["qid"]: d["pred_relevant_windows"][0][:2] for d in submission} # :2 rm scores
+ # gt_qid2window = {d["qid"]: d["relevant_windows"][0] for d in ground_truth}
+ gt_qid2window = {}
+ for d in ground_truth:
+ cur_gt_windows = d["relevant_windows"]
+ cur_qid = d["qid"]
+ cur_max_iou_idx = 0
+ if len(cur_gt_windows) > 0: # select the GT window that has the highest IoU
+ cur_ious = compute_temporal_iou_batch_cross(
+ np.array([pred_qid2window[cur_qid]]), np.array(d["relevant_windows"])
+ )[0]
+ cur_max_iou_idx = np.argmax(cur_ious)
+ gt_qid2window[cur_qid] = cur_gt_windows[cur_max_iou_idx]
+
+ qids = list(pred_qid2window.keys())
+ pred_windows = np.array([pred_qid2window[k] for k in qids]).astype(float)
+ gt_windows = np.array([gt_qid2window[k] for k in qids]).astype(float)
+ pred_gt_iou = compute_temporal_iou_batch_paired(pred_windows, gt_windows)
+ iou_thd2recall_at_one = {}
+ for thd in iou_thds:
+ iou_thd2recall_at_one[str(thd)] = float(f"{np.mean(pred_gt_iou >= thd) * 100:.2f}")
+ return iou_thd2recall_at_one
+
+
+def get_window_len(window):
+ return window[1] - window[0]
+
+
+def get_data_by_range(submission, ground_truth, len_range):
+ """ keep queries with ground truth window length in the specified length range.
+ Args:
+ submission:
+ ground_truth:
+ len_range: [min_l (int), max_l (int)]. the range is (min_l, max_l], i.e., min_l < l <= max_l
+ """
+ min_l, max_l = len_range
+ if min_l == 0 and max_l == 150: # min and max l in dataset
+ return submission, ground_truth
+
+ # only keep ground truth with windows in the specified length range
+ # if multiple GT windows exists, we only keep the ones in the range
+ ground_truth_in_range = []
+ gt_qids_in_range = set()
+ for d in ground_truth:
+ rel_windows_in_range = [
+ w for w in d["relevant_windows"] if min_l < get_window_len(w) <= max_l]
+ if len(rel_windows_in_range) > 0:
+ d = copy.deepcopy(d)
+ d["relevant_windows"] = rel_windows_in_range
+ ground_truth_in_range.append(d)
+ gt_qids_in_range.add(d["qid"])
+
+ # keep only submissions for ground_truth_in_range
+ submission_in_range = []
+ for d in submission:
+ if d["qid"] in gt_qids_in_range:
+ submission_in_range.append(copy.deepcopy(d))
+
+ return submission_in_range, ground_truth_in_range
+
+
+def eval_moment_retrieval(submission, ground_truth, verbose=True):
+ length_ranges = [[0, 10], [10, 30], [30, 150], [0, 150], ] #
+ range_names = ["short", "middle", "long", "full"]
+
+ ret_metrics = {}
+ for l_range, name in zip(length_ranges, range_names):
+ if verbose:
+ start_time = time.time()
+ _submission, _ground_truth = get_data_by_range(submission, ground_truth, l_range)
+ print(f"{name}: {l_range}, {len(_ground_truth)}/{len(ground_truth)}="
+ f"{100*len(_ground_truth)/len(ground_truth):.2f} examples.")
+ iou_thd2average_precision = compute_mr_ap(_submission, _ground_truth, num_workers=8, chunksize=50)
+ iou_thd2recall_at_one = compute_mr_r1(_submission, _ground_truth)
+ ret_metrics[name] = {"MR-mAP": iou_thd2average_precision, "MR-R1": iou_thd2recall_at_one}
+ if verbose:
+ print(f"[eval_moment_retrieval] [{name}] {time.time() - start_time:.2f} seconds")
+ return ret_metrics
+
+
+def compute_hl_hit1(qid2preds, qid2gt_scores_binary):
+ qid2max_scored_clip_idx = {k: np.argmax(v["pred_saliency_scores"]) for k, v in qid2preds.items()}
+ hit_scores = np.zeros((len(qid2preds), 3))
+ qids = list(qid2preds.keys())
+ for idx, qid in enumerate(qids):
+ pred_clip_idx = qid2max_scored_clip_idx[qid]
+ gt_scores_binary = qid2gt_scores_binary[qid] # (#clips, 3)
+ if pred_clip_idx < len(gt_scores_binary):
+ hit_scores[idx] = gt_scores_binary[pred_clip_idx]
+ # aggregate scores from 3 separate annotations (3 workers) by taking the max.
+ # then average scores from all queries.
+ hit_at_one = float(f"{100 * np.mean(np.max(hit_scores, 1)):.2f}")
+ return hit_at_one
+
+
+def compute_hl_ap(qid2preds, qid2gt_scores_binary, num_workers=8, chunksize=50):
+ qid2pred_scores = {k: v["pred_saliency_scores"] for k, v in qid2preds.items()}
+ ap_scores = np.zeros((len(qid2preds), 3)) # (#preds, 3)
+ qids = list(qid2preds.keys())
+ input_tuples = []
+ for idx, qid in enumerate(qids):
+ for w_idx in range(3): # annotation score idx
+ y_true = qid2gt_scores_binary[qid][:, w_idx]
+ y_predict = np.array(qid2pred_scores[qid])
+ input_tuples.append((idx, w_idx, y_true, y_predict))
+
+ if num_workers > 1:
+ with mp.Pool(num_workers) as pool:
+ for idx, w_idx, score in pool.imap_unordered(
+ compute_ap_from_tuple, input_tuples, chunksize=chunksize):
+ ap_scores[idx, w_idx] = score
+ else:
+ for input_tuple in input_tuples:
+ idx, w_idx, score = compute_ap_from_tuple(input_tuple)
+ ap_scores[idx, w_idx] = score
+
+ # it's the same if we first average across different annotations, then average across queries
+ # since all queries have the same #annotations.
+ mean_ap = float(f"{100 * np.mean(ap_scores):.2f}")
+ return mean_ap
+
+
+def compute_ap_from_tuple(input_tuple):
+ idx, w_idx, y_true, y_predict = input_tuple
+ if len(y_true) < len(y_predict):
+ # print(f"len(y_true) < len(y_predict) {len(y_true), len(y_predict)}")
+ y_predict = y_predict[:len(y_true)]
+ elif len(y_true) > len(y_predict):
+ # print(f"len(y_true) > len(y_predict) {len(y_true), len(y_predict)}")
+ _y_predict = np.zeros(len(y_true))
+ _y_predict[:len(y_predict)] = y_predict
+ y_predict = _y_predict
+
+ score = get_ap(y_true, y_predict)
+ return idx, w_idx, score
+
+
+def mk_gt_scores(gt_data, clip_length=2):
+ """gt_data, dict, """
+ num_clips = int(gt_data["duration"] / clip_length)
+ saliency_scores_full_video = np.zeros((num_clips, 3))
+ relevant_clip_ids = np.array(gt_data["relevant_clip_ids"]) # (#relevant_clip_ids, )
+ saliency_scores_relevant_clips = np.array(gt_data["saliency_scores"]) # (#relevant_clip_ids, 3)
+ saliency_scores_full_video[relevant_clip_ids] = saliency_scores_relevant_clips
+ return saliency_scores_full_video # (#clips_in_video, 3) the scores are in range [0, 4]
+
+
+def eval_highlight(submission, ground_truth, verbose=True):
+ """
+ Args:
+ submission:
+ ground_truth:
+ verbose:
+ """
+ qid2preds = {d["qid"]: d for d in submission}
+ qid2gt_scores_full_range = {d["qid"]: mk_gt_scores(d) for d in ground_truth} # scores in range [0, 4]
+ # gt_saliency_score_min: int, in [0, 1, 2, 3, 4]. The minimum score for a positive clip.
+ gt_saliency_score_min_list = [2, 3, 4]
+ saliency_score_names = ["Fair", "Good", "VeryGood"]
+ highlight_det_metrics = {}
+ for gt_saliency_score_min, score_name in zip(gt_saliency_score_min_list, saliency_score_names):
+ start_time = time.time()
+ qid2gt_scores_binary = {
+ k: (v >= gt_saliency_score_min).astype(float)
+ for k, v in qid2gt_scores_full_range.items()} # scores in [0, 1]
+ hit_at_one = compute_hl_hit1(qid2preds, qid2gt_scores_binary)
+ mean_ap = compute_hl_ap(qid2preds, qid2gt_scores_binary)
+ highlight_det_metrics[f"HL-min-{score_name}"] = {"HL-mAP": mean_ap, "HL-Hit1": hit_at_one}
+ if verbose:
+ print(f"Calculating highlight scores with min score {gt_saliency_score_min} ({score_name})")
+ print(f"Time cost {time.time() - start_time:.2f} seconds")
+ return highlight_det_metrics
+
+
+def eval_submission(submission, ground_truth, verbose=True, match_number=True):
+ """
+ Args:
+ submission: list(dict), each dict is {
+ qid: str,
+ query: str,
+ vid: str,
+ pred_relevant_windows: list([st, ed]),
+ pred_saliency_scores: list(float), len == #clips in video.
+ i.e., each clip in the video will have a saliency score.
+ }
+ ground_truth: list(dict), each dict is {
+ "qid": 7803,
+ "query": "Man in gray top walks from outside to inside.",
+ "duration": 150,
+ "vid": "RoripwjYFp8_360.0_510.0",
+ "relevant_clip_ids": [13, 14, 15, 16, 17]
+ "saliency_scores": [[4, 4, 2], [3, 4, 2], [2, 2, 3], [2, 2, 2], [0, 1, 3]]
+ each sublist corresponds to one clip in relevant_clip_ids.
+ The 3 elements in the sublist are scores from 3 different workers. The
+ scores are in [0, 1, 2, 3, 4], meaning [Very Bad, ..., Good, Very Good]
+ }
+ verbose:
+ match_number:
+
+ Returns:
+
+ """
+ pred_qids = set([e["qid"] for e in submission])
+ gt_qids = set([e["qid"] for e in ground_truth])
+ if match_number:
+ assert pred_qids == gt_qids, \
+ f"qids in ground_truth and submission must match. " \
+ f"use `match_number=False` if you wish to disable this check"
+ else: # only leave the items that exists in both submission and ground_truth
+ shared_qids = pred_qids.intersection(gt_qids)
+ submission = [e for e in submission if e["qid"] in shared_qids]
+ ground_truth = [e for e in ground_truth if e["qid"] in shared_qids]
+
+ eval_metrics = {}
+ eval_metrics_brief = OrderedDict()
+ if "pred_relevant_windows" in submission[0]:
+ moment_ret_scores = eval_moment_retrieval(
+ submission, ground_truth, verbose=verbose)
+ eval_metrics.update(moment_ret_scores)
+ moment_ret_scores_brief = {
+ "MR-full-mAP": moment_ret_scores["full"]["MR-mAP"]["average"],
+ "MR-full-mAP@0.5": moment_ret_scores["full"]["MR-mAP"]["0.5"],
+ "MR-full-mAP@0.75": moment_ret_scores["full"]["MR-mAP"]["0.75"],
+ "MR-short-mAP": moment_ret_scores["short"]["MR-mAP"]["average"],
+ "MR-middle-mAP": moment_ret_scores["middle"]["MR-mAP"]["average"],
+ "MR-long-mAP": moment_ret_scores["long"]["MR-mAP"]["average"],
+ "MR-full-R1@0.5": moment_ret_scores["full"]["MR-R1"]["0.5"],
+ "MR-full-R1@0.7": moment_ret_scores["full"]["MR-R1"]["0.7"],
+ }
+ eval_metrics_brief.update(
+ sorted([(k, v) for k, v in moment_ret_scores_brief.items()], key=lambda x: x[0]))
+
+ if "pred_saliency_scores" in submission[0]:
+ highlight_det_scores = eval_highlight(
+ submission, ground_truth, verbose=verbose)
+ eval_metrics.update(highlight_det_scores)
+ highlight_det_scores_brief = dict([
+ (f"{k}-{sub_k.split('-')[1]}", v[sub_k])
+ for k, v in highlight_det_scores.items() for sub_k in v])
+ eval_metrics_brief.update(highlight_det_scores_brief)
+
+ # sort by keys
+ final_eval_metrics = OrderedDict()
+ final_eval_metrics["brief"] = eval_metrics_brief
+ final_eval_metrics.update(sorted([(k, v) for k, v in eval_metrics.items()], key=lambda x: x[0]))
+ return final_eval_metrics
+
+
+def eval_main():
+ import argparse
+ parser = argparse.ArgumentParser(description="Moments and Highlights Evaluation Script")
+ parser.add_argument("--submission_path", type=str, help="path to generated prediction file")
+ parser.add_argument("--gt_path", type=str, help="path to GT file")
+ parser.add_argument("--save_path", type=str, help="path to save the results")
+ parser.add_argument("--not_verbose", action="store_true")
+ args = parser.parse_args()
+
+ verbose = not args.not_verbose
+ submission = load_jsonl(args.submission_path)
+ gt = load_jsonl(args.gt_path)
+ results = eval_submission(submission, gt, verbose=verbose)
+ if verbose:
+ print(json.dumps(results, indent=4))
+
+ with open(args.save_path, "w") as f:
+ f.write(json.dumps(results, indent=4))
+
+
+if __name__ == '__main__':
+ eval_main()
diff --git a/standalone_eval/eval_sample.sh b/standalone_eval/eval_sample.sh
new file mode 100644
index 0000000000000000000000000000000000000000..58f61f28f4cfe804ad4443dbfcea68247bef88df
--- /dev/null
+++ b/standalone_eval/eval_sample.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+# Usage: bash standalone_eval/eval_sample.sh
+submission_path=standalone_eval/sample_val_preds.jsonl
+gt_path=data/highlight_val_release.jsonl
+save_path=standalone_eval/sample_val_preds_metrics.json
+
+PYTHONPATH=$PYTHONPATH:. python standalone_eval/eval.py \
+--submission_path ${submission_path} \
+--gt_path ${gt_path} \
+--save_path ${save_path}
diff --git a/standalone_eval/sample_val_preds.jsonl b/standalone_eval/sample_val_preds.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..993ae42a444e6a03af466b4c95942d580c7c168e
--- /dev/null
+++ b/standalone_eval/sample_val_preds.jsonl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:df3227ef84985db846f102e75cda43b5f9f962d12b95b990028eed88f1901b40
+size 2399408
diff --git a/standalone_eval/sample_val_preds_metrics_raw.json b/standalone_eval/sample_val_preds_metrics_raw.json
new file mode 100644
index 0000000000000000000000000000000000000000..66b6b5626c6fcd648fe3ef713b55d214d6fa263b
--- /dev/null
+++ b/standalone_eval/sample_val_preds_metrics_raw.json
@@ -0,0 +1,138 @@
+{
+ "brief": {
+ "MR-full-R1@0.5": 53.94,
+ "MR-full-R1@0.7": 34.84,
+ "MR-full-mAP": 32.2,
+ "MR-full-mAP@0.5": 54.96,
+ "MR-full-mAP@0.75": 31.01,
+ "MR-long-mAP": 41.11,
+ "MR-middle-mAP": 32.3,
+ "MR-short-mAP": 3.28,
+ "HL-min-Fair-mAP": 67.77,
+ "HL-min-Fair-Hit1": 66.45,
+ "HL-min-Good-mAP": 58.09,
+ "HL-min-Good-Hit1": 64.45,
+ "HL-min-VeryGood-mAP": 35.65,
+ "HL-min-VeryGood-Hit1": 55.55
+ },
+ "HL-min-Fair": {
+ "HL-mAP": 67.77,
+ "HL-Hit1": 66.45
+ },
+ "HL-min-Good": {
+ "HL-mAP": 58.09,
+ "HL-Hit1": 64.45
+ },
+ "HL-min-VeryGood": {
+ "HL-mAP": 35.65,
+ "HL-Hit1": 55.55
+ },
+ "full": {
+ "MR-mAP": {
+ "0.5": 54.96,
+ "0.55": 49.88,
+ "0.6": 46.62,
+ "0.65": 40.2,
+ "0.7": 35.49,
+ "0.75": 31.01,
+ "0.8": 24.79,
+ "0.85": 18.72,
+ "0.9": 13.21,
+ "0.95": 7.16,
+ "average": 32.2
+ },
+ "MR-R1": {
+ "0.5": 53.94,
+ "0.55": 48.97,
+ "0.6": 46.06,
+ "0.65": 39.42,
+ "0.7": 34.84,
+ "0.75": 30.71,
+ "0.8": 24.97,
+ "0.85": 18.9,
+ "0.9": 13.35,
+ "0.95": 7.23
+ }
+ },
+ "long": {
+ "MR-mAP": {
+ "0.5": 64.08,
+ "0.55": 60.3,
+ "0.6": 56.52,
+ "0.65": 49.24,
+ "0.7": 44.73,
+ "0.75": 40.56,
+ "0.8": 34.59,
+ "0.85": 28.53,
+ "0.9": 20.42,
+ "0.95": 12.12,
+ "average": 41.11
+ },
+ "MR-R1": {
+ "0.5": 56.1,
+ "0.55": 53.66,
+ "0.6": 50.52,
+ "0.65": 43.55,
+ "0.7": 40.24,
+ "0.75": 37.11,
+ "0.8": 32.06,
+ "0.85": 26.83,
+ "0.9": 19.51,
+ "0.95": 11.67
+ }
+ },
+ "middle": {
+ "MR-mAP": {
+ "0.5": 58.81,
+ "0.55": 52.43,
+ "0.6": 48.77,
+ "0.65": 41.68,
+ "0.7": 36.02,
+ "0.75": 30.51,
+ "0.8": 23.09,
+ "0.85": 16.04,
+ "0.9": 10.79,
+ "0.95": 4.84,
+ "average": 32.3
+ },
+ "MR-R1": {
+ "0.5": 50.26,
+ "0.55": 44.83,
+ "0.6": 42.22,
+ "0.65": 36.26,
+ "0.7": 31.24,
+ "0.75": 27.06,
+ "0.8": 20.9,
+ "0.85": 14.52,
+ "0.9": 9.93,
+ "0.95": 4.7
+ }
+ },
+ "short": {
+ "MR-mAP": {
+ "0.5": 9.38,
+ "0.55": 6.27,
+ "0.6": 5.74,
+ "0.65": 3.95,
+ "0.7": 2.83,
+ "0.75": 1.99,
+ "0.8": 1.15,
+ "0.85": 0.49,
+ "0.9": 0.49,
+ "0.95": 0.49,
+ "average": 3.28
+ },
+ "MR-R1": {
+ "0.5": 7.69,
+ "0.55": 5.13,
+ "0.6": 4.66,
+ "0.65": 3.26,
+ "0.7": 2.33,
+ "0.75": 0.93,
+ "0.8": 0.7,
+ "0.85": 0.0,
+ "0.9": 0.0,
+ "0.95": 0.0
+ }
+ }
+}
\ No newline at end of file
diff --git a/standalone_eval/utils.py b/standalone_eval/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec1a1a39121c5c1618aefbd37d3af81ae7d9220f
--- /dev/null
+++ b/standalone_eval/utils.py
@@ -0,0 +1,209 @@
+"""
+Copied from MMAction2
+https://github.com/open-mmlab/mmaction2/blob/master/mmaction/core/evaluation/eval_detection.py
+"""
+import json
+import numpy as np
+from sklearn.metrics import precision_recall_curve
+
+
+def load_jsonl(filename):
+ with open(filename, "r") as f:
+ return [json.loads(l.strip("\n")) for l in f.readlines()]
+
+
+def compute_temporal_iou_batch_paired(pred_windows, gt_windows):
+ """ compute intersection-over-union along temporal axis for each pair of windows in pred_windows and gt_windows.
+ Args:
+ pred_windows: np.ndarray, (N, 2), [st (float), ed (float)] * N
+ gt_windows: np.ndarray, (N, 2), [st (float), ed (float)] * N
+ Returns:
+ iou (float): np.ndarray, (N, )
+
+ References:
+ for np.divide with zeros, see https://stackoverflow.com/a/37977222
+ """
+ intersection = np.maximum(
+ 0, np.minimum(pred_windows[:, 1], gt_windows[:, 1]) - np.maximum(pred_windows[:, 0], gt_windows[:, 0])
+ )
+ union = np.maximum(pred_windows[:, 1], gt_windows[:, 1]) \
+ - np.minimum(pred_windows[:, 0], gt_windows[:, 0]) # not the correct union though
+ return np.divide(intersection, union, out=np.zeros_like(intersection), where=union != 0)
+
+
+def compute_temporal_iou_batch_cross(spans1, spans2):
+ """
+ Args:
+ spans1: (N, 2) np.ndarray, each row defines a span [st, ed]
+ spans2: (M, 2) np.ndarray, ...
+
+ Returns:
+ iou: (N, M) np.ndarray
+ union: (N, M) np.ndarray
+ >>> spans1 = np.array([[0, 0.2, 0.9], [0.5, 1.0, 0.2]])
+ >>> spans2 = np.array([[0, 0.3], [0., 1.0]])
+ >>> compute_temporal_iou_batch_cross(spans1, spans2)
+ (tensor([[0.6667, 0.2000],
+ [0.0000, 0.5000]]),
+ tensor([[0.3000, 1.0000],
+ [0.8000, 1.0000]]))
+ """
+ areas1 = spans1[:, 1] - spans1[:, 0] # (N, )
+ areas2 = spans2[:, 1] - spans2[:, 0] # (M, )
+
+ left = np.maximum(spans1[:, None, 0], spans2[None, :, 0]) # (N, M)
+ right = np.minimum(spans1[:, None, 1], spans2[None, :, 1]) # (N, M)
+
+ inter = np.clip(right - left, 0, None) # (N, M)
+ union = areas1[:, None] + areas2[None, :] - inter # (N, M)
+
+ iou = inter / union
+ return iou, union
+
+
+def interpolated_precision_recall(precision, recall):
+ """Interpolated AP - VOCdevkit from VOC 2011.
+
+ Args:
+ precision (np.ndarray): The precision of different thresholds.
+ recall (np.ndarray): The recall of different thresholds.
+
+ Returns:
+ float: Average precision score.
+ """
+ mprecision = np.hstack([[0], precision, [0]])
+ mrecall = np.hstack([[0], recall, [1]])
+ for i in range(len(mprecision) - 1)[::-1]:
+ mprecision[i] = max(mprecision[i], mprecision[i + 1])
+ idx = np.where(mrecall[1::] != mrecall[0:-1])[0] + 1
+ ap = np.sum((mrecall[idx] - mrecall[idx - 1]) * mprecision[idx])
+ return ap
+
+
+def compute_average_precision_detection(ground_truth,
+ prediction,
+ tiou_thresholds=np.linspace(
+ 0.5, 0.95, 10)):
+ """Compute average precision (detection task) between ground truth and
+ predictions data frames. If multiple predictions occurs for the same
+ predicted segment, only the one with highest score is matches as true
+ positive. This code is greatly inspired by Pascal VOC devkit.
+
+ Args:
+ ground_truth (list[dict]): List containing the ground truth instances
+ (dictionaries). Required keys are 'video-id', 't-start' and
+ 't-end'.
+ prediction (list[dict]): List containing the prediction instances
+ (dictionaries). Required keys are: 'video-id', 't-start', 't-end'
+ and 'score'.
+ tiou_thresholds (np.ndarray): A 1darray indicates the temporal
+ intersection over union threshold, which is optional.
+ Default: ``np.linspace(0.5, 0.95, 10)``.
+
+ Returns:
+ Float: ap, Average precision score.
+ """
+ num_thresholds = len(tiou_thresholds)
+ num_gts = len(ground_truth)
+ num_preds = len(prediction)
+ ap = np.zeros(num_thresholds)
+ if len(prediction) == 0:
+ return ap
+
+ num_positive = float(num_gts)
+ lock_gt = np.ones((num_thresholds, num_gts)) * -1
+ # Sort predictions by decreasing score order.
+ prediction.sort(key=lambda x: -x['score'])
+ # Initialize true positive and false positive vectors.
+ tp = np.zeros((num_thresholds, num_preds))
+ fp = np.zeros((num_thresholds, num_preds))
+
+ # Adaptation to query faster
+ ground_truth_by_videoid = {}
+ for i, item in enumerate(ground_truth):
+ item['index'] = i
+ ground_truth_by_videoid.setdefault(item['video-id'], []).append(item)
+
+ # Assigning true positive to truly grount truth instances.
+ for idx, pred in enumerate(prediction):
+ if pred['video-id'] in ground_truth_by_videoid:
+ gts = ground_truth_by_videoid[pred['video-id']]
+ else:
+ fp[:, idx] = 1
+ continue
+
+ _pred = np.array([[pred['t-start'], pred['t-end']], ])
+ _gt = np.array([[gt['t-start'], gt['t-end']] for gt in gts])
+ tiou_arr = compute_temporal_iou_batch_cross(_pred, _gt)[0]
+
+ tiou_arr = tiou_arr.reshape(-1)
+ # We would like to retrieve the predictions with highest tiou score.
+ tiou_sorted_idx = tiou_arr.argsort()[::-1]
+ for t_idx, tiou_threshold in enumerate(tiou_thresholds):
+ for j_idx in tiou_sorted_idx:
+ if tiou_arr[j_idx] < tiou_threshold:
+ fp[t_idx, idx] = 1
+ break
+ if lock_gt[t_idx, gts[j_idx]['index']] >= 0:
+ continue
+ # Assign as true positive after the filters above.
+ tp[t_idx, idx] = 1
+ lock_gt[t_idx, gts[j_idx]['index']] = idx
+ break
+
+ if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
+ fp[t_idx, idx] = 1
+
+ tp_cumsum = np.cumsum(tp, axis=1).astype(float)
+ fp_cumsum = np.cumsum(fp, axis=1).astype(float)
+ recall_cumsum = tp_cumsum / num_positive
+
+ precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
+
+ for t_idx in range(len(tiou_thresholds)):
+ ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
+ recall_cumsum[t_idx, :])
+ return ap
+
+
+def get_ap(y_true, y_predict, interpolate=True, point_11=False):
+ """
+ Average precision in different formats: (non-) interpolated and/or 11-point approximated
+ point_11=True and interpolate=True corresponds to the 11-point interpolated AP used in
+ the PASCAL VOC challenge up to the 2008 edition and has been verfied against the vlfeat implementation
+ The exact average precision (interpolate=False, point_11=False) corresponds to the one of vl_feat
+
+ :param y_true: list/ numpy vector of true labels in {0,1} for each element
+ :param y_predict: predicted score for each element
+ :param interpolate: Use interpolation?
+ :param point_11: Use 11-point approximation to average precision?
+ :return: average precision
+
+ ref: https://github.com/gyglim/video2gif_dataset/blob/master/v2g_evaluation/__init__.py
+
+ """
+ # Check inputs
+ assert len(y_true) == len(y_predict), "Prediction and ground truth need to be of the same length"
+ if len(set(y_true)) == 1:
+ if y_true[0] == 0:
+ return 0 # True labels are all zeros
+ # raise ValueError('True labels cannot all be zero')
+ else:
+ return 1
+ else:
+ assert sorted(set(y_true)) == [0, 1], "Ground truth can only contain elements {0,1}"
+
+ # Compute precision and recall
+ precision, recall, _ = precision_recall_curve(y_true, y_predict)
+ recall = recall.astype(np.float32)
+
+ if interpolate: # Compute the interpolated precision
+ for i in range(1, len(precision)):
+ precision[i] = max(precision[i - 1], precision[i])
+
+ if point_11: # Compute the 11-point approximated AP
+ precision_11 = [precision[np.where(recall >= t)[0][-1]] for t in np.arange(0, 1.01, 0.1)]
+ return np.mean(precision_11)
+ else: # Compute the AP using precision at every additionally recalled sample
+ indices = np.where(np.diff(recall))
+ return np.mean(precision[indices])
diff --git a/utils/basic_utils.py b/utils/basic_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..62a55d743a3b01d5b54b43412c616418be9e3ee8
--- /dev/null
+++ b/utils/basic_utils.py
@@ -0,0 +1,221 @@
+import os
+import json
+import zipfile
+import numpy as np
+import pickle
+from collections import OrderedDict, Counter
+import pandas as pd
+
+
+def load_pickle(filename):
+ with open(filename, "rb") as f:
+ return pickle.load(f)
+
+
+def save_pickle(data, filename):
+ with open(filename, "wb") as f:
+ pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
+
+
+def load_json(filename):
+ with open(filename, "r") as f:
+ return json.load(f)
+
+
+def save_json(data, filename, save_pretty=False, sort_keys=False):
+ with open(filename, "w") as f:
+ if save_pretty:
+ f.write(json.dumps(data, indent=4, sort_keys=sort_keys))
+ else:
+ json.dump(data, f)
+
+
+def load_jsonl(filename):
+ with open(filename, "r") as f:
+ return [json.loads(l.strip("\n")) for l in f.readlines()]
+
+
+def save_jsonl(data, filename):
+ """data is a list"""
+ with open(filename, "w") as f:
+ f.write("\n".join([json.dumps(e) for e in data]))
+
+
+def save_lines(list_of_str, filepath):
+ with open(filepath, "w") as f:
+ f.write("\n".join(list_of_str))
+
+
+def read_lines(filepath):
+ with open(filepath, "r") as f:
+ return [e.strip("\n") for e in f.readlines()]
+
+
+def mkdirp(p):
+ if not os.path.exists(p):
+ os.makedirs(p)
+
+
+def flat_list_of_lists(l):
+ """flatten a list of lists [[1,2], [3,4]] to [1,2,3,4]"""
+ return [item for sublist in l for item in sublist]
+
+
+def convert_to_seconds(hms_time):
+ """ convert '00:01:12' to 72 seconds.
+ :hms_time (str): time in comma separated string, e.g. '00:01:12'
+ :return (int): time in seconds, e.g. 72
+ """
+ times = [float(t) for t in hms_time.split(":")]
+ return times[0] * 3600 + times[1] * 60 + times[2]
+
+
+def get_video_name_from_url(url):
+ return url.split("/")[-1][:-4]
+
+
+def merge_dicts(list_dicts):
+ merged_dict = list_dicts[0].copy()
+ for i in range(1, len(list_dicts)):
+ merged_dict.update(list_dicts[i])
+ return merged_dict
+
+
+def l2_normalize_np_array(np_array, eps=1e-5):
+ """np_array: np.ndarray, (*, D), where the last dim will be normalized"""
+ return np_array / (np.linalg.norm(np_array, axis=-1, keepdims=True) + eps)
+
+
+def make_zipfile(src_dir, save_path, enclosing_dir="", exclude_dirs=None, exclude_extensions=None,
+ exclude_dirs_substring=None):
+ """make a zip file of root_dir, save it to save_path.
+ exclude_paths will be excluded if it is a subdir of root_dir.
+ An enclosing_dir is added is specified.
+ """
+ abs_src = os.path.abspath(src_dir)
+ with zipfile.ZipFile(save_path, "w") as zf:
+ for dirname, subdirs, files in os.walk(src_dir):
+ if exclude_dirs is not None:
+ for e_p in exclude_dirs:
+ if e_p in subdirs:
+ subdirs.remove(e_p)
+ if exclude_dirs_substring is not None:
+ to_rm = []
+ for d in subdirs:
+ if exclude_dirs_substring in d:
+ to_rm.append(d)
+ for e in to_rm:
+ subdirs.remove(e)
+ arcname = os.path.join(enclosing_dir, dirname[len(abs_src) + 1:])
+ zf.write(dirname, arcname)
+ for filename in files:
+ if exclude_extensions is not None:
+ if os.path.splitext(filename)[1] in exclude_extensions:
+ continue # do not zip it
+ absname = os.path.join(dirname, filename)
+ arcname = os.path.join(enclosing_dir, absname[len(abs_src) + 1:])
+ zf.write(absname, arcname)
+
+
+class AverageMeter(object):
+ """Computes and stores the average and current/max/min value"""
+ def __init__(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+ self.max = -1e10
+ self.min = 1e10
+ self.reset()
+
+ def reset(self):
+ self.val = 0
+ self.avg = 0
+ self.sum = 0
+ self.count = 0
+ self.max = -1e10
+ self.min = 1e10
+
+ def update(self, val, n=1):
+ self.max = max(val, self.max)
+ self.min = min(val, self.min)
+ self.val = val
+ self.sum += val * n
+ self.count += n
+ self.avg = self.sum / self.count
+
+
+def dissect_by_lengths(np_array, lengths, dim=0, assert_equal=True):
+ """Dissect an array (N, D) into a list a sub-array,
+ np_array.shape[0] == sum(lengths), Output is a list of nd arrays, singlton dimention is kept"""
+ if assert_equal:
+ assert len(np_array) == sum(lengths)
+ length_indices = [0, ]
+ for i in range(len(lengths)):
+ length_indices.append(length_indices[i] + lengths[i])
+ if dim == 0:
+ array_list = [np_array[length_indices[i]:length_indices[i+1]] for i in range(len(lengths))]
+ elif dim == 1:
+ array_list = [np_array[:, length_indices[i]:length_indices[i + 1]] for i in range(len(lengths))]
+ elif dim == 2:
+ array_list = [np_array[:, :, length_indices[i]:length_indices[i + 1]] for i in range(len(lengths))]
+ else:
+ raise NotImplementedError
+ return array_list
+
+
+def get_ratio_from_counter(counter_obj, threshold=200):
+ keys = counter_obj.keys()
+ values = counter_obj.values()
+ filtered_values = [counter_obj[k] for k in keys if k > threshold]
+ return float(sum(filtered_values)) / sum(values)
+
+
+def get_counter_dist(counter_object, sort_type="none"):
+ _sum = sum(counter_object.values())
+ dist = {k: float(f"{100 * v / _sum:.2f}") for k, v in counter_object.items()}
+ if sort_type == "value":
+ dist = OrderedDict(sorted(dist.items(), reverse=True))
+ return dist
+
+
+def get_show_name(vid_name):
+ """
+ get tvshow name from vid_name
+ :param vid_name: video clip name
+ :return: tvshow name
+ """
+ show_list = ["friends", "met", "castle", "house", "grey"]
+ vid_name_prefix = vid_name.split("_")[0]
+ show_name = vid_name_prefix if vid_name_prefix in show_list else "bbt"
+ return show_name
+
+
+def get_abspaths_by_ext(dir_path, ext=(".jpg",)):
+ """Get absolute paths to files in dir_path with extensions specified by ext.
+ Note this function does work recursively.
+ """
+ if isinstance(ext, list):
+ ext = tuple(ext)
+ if isinstance(ext, str):
+ ext = tuple([ext, ])
+ filepaths = [os.path.join(root, name)
+ for root, dirs, files in os.walk(dir_path)
+ for name in files
+ if name.endswith(tuple(ext))]
+ return filepaths
+
+
+def get_basename_no_ext(path):
+ """ '/data/movienet/240p_keyframe_feats/tt7672188.npz' --> 'tt7672188' """
+ return os.path.splitext(os.path.split(path)[1])[0]
+
+
+def dict_to_markdown(d, max_str_len=120):
+ # convert list into its str representation
+ d = {k: v.__repr__() if isinstance(v, list) else v for k, v in d.items()}
+ # truncate string that is longer than max_str_len
+ if max_str_len is not None:
+ d = {k: v[-max_str_len:] if isinstance(v, str) else v for k, v in d.items()}
+ return pd.DataFrame(d, index=[0]).transpose().to_markdown()
+
diff --git a/utils/model_utils.py b/utils/model_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..06eed4751ad15e78692e64926dfd2741664949ce
--- /dev/null
+++ b/utils/model_utils.py
@@ -0,0 +1,15 @@
+def count_parameters(model, verbose=True):
+ """Count number of parameters in PyTorch model,
+ References: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/7.
+
+ from utils.utils import count_parameters
+ count_parameters(model)
+ import sys
+ sys.exit(1)
+ """
+ n_all = sum(p.numel() for p in model.parameters())
+ n_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
+ if verbose:
+ print("Parameter Count: all {:,d}; trainable {:,d}".format(n_all, n_trainable))
+ return n_all, n_trainable
+
diff --git a/utils/temporal_nms.py b/utils/temporal_nms.py
new file mode 100644
index 0000000000000000000000000000000000000000..2844f5d4c1ac71760cd82c7aaf82c6b2daa9a207
--- /dev/null
+++ b/utils/temporal_nms.py
@@ -0,0 +1,74 @@
+"""
+Non-Maximum Suppression for video proposals.
+"""
+
+
+def compute_temporal_iou(pred, gt):
+ """ deprecated due to performance concerns
+ compute intersection-over-union along temporal axis
+ Args:
+ pred: [st (float), ed (float)]
+ gt: [st (float), ed (float)]
+ Returns:
+ iou (float):
+
+ Ref: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py
+ """
+ intersection = max(0, min(pred[1], gt[1]) - max(pred[0], gt[0]))
+ union = max(pred[1], gt[1]) - min(pred[0], gt[0]) # not the correct union though
+ if union == 0:
+ return 0
+ else:
+ return 1.0 * intersection / union
+
+
+def temporal_nms(predictions, nms_thd, max_after_nms=100):
+ """
+ Args:
+ predictions: list(sublist), each sublist is [st (float), ed(float), score (float)],
+ note larger scores are better and are preserved. For metrics that are better when smaller,
+ please convert to its negative, e.g., convert distance to negative distance.
+ nms_thd: float in [0, 1]
+ max_after_nms:
+ Returns:
+ predictions_after_nms: list(sublist), each sublist is [st (float), ed(float), score (float)]
+ References:
+ https://github.com/wzmsltw/BSN-boundary-sensitive-network/blob/7b101fc5978802aa3c95ba5779eb54151c6173c6/Post_processing.py#L42
+ """
+ if len(predictions) == 1: # only has one prediction, no need for nms
+ return predictions
+
+ predictions = sorted(predictions, key=lambda x: x[2], reverse=True) # descending order
+
+ tstart = [e[0] for e in predictions]
+ tend = [e[1] for e in predictions]
+ tscore = [e[2] for e in predictions]
+ rstart = []
+ rend = []
+ rscore = []
+ while len(tstart) > 1 and len(rscore) < max_after_nms: # max 100 after nms
+ idx = 1
+ while idx < len(tstart): # compare with every prediction in the list.
+ if compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]) > nms_thd:
+ # rm highly overlapped lower score entries.
+ tstart.pop(idx)
+ tend.pop(idx)
+ tscore.pop(idx)
+ # print("--------------------------------")
+ # print(compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]))
+ # print([tstart[0], tend[0]], [tstart[idx], tend[idx]])
+ # print(tstart.pop(idx), tend.pop(idx), tscore.pop(idx))
+ else:
+ # move to next
+ idx += 1
+ rstart.append(tstart.pop(0))
+ rend.append(tend.pop(0))
+ rscore.append(tscore.pop(0))
+
+ if len(rscore) < max_after_nms and len(tstart) >= 1: # add the last, possibly empty.
+ rstart.append(tstart.pop(0))
+ rend.append(tend.pop(0))
+ rscore.append(tscore.pop(0))
+
+ predictions_after_nms = [[st, ed, s] for s, st, ed in zip(rscore, rstart, rend)]
+ return predictions_after_nms
diff --git a/utils/tensor_utils.py b/utils/tensor_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c2c25a83b66092b1ce8731b4d9fae1523438b29
--- /dev/null
+++ b/utils/tensor_utils.py
@@ -0,0 +1,93 @@
+import numpy as np
+import torch
+
+
+def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device("cpu"), fixed_length=None):
+ """ Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
+ into a (n+1)-d array, only allow the first dim has variable lengths.
+ Args:
+ sequences: list(n-d tensor or list)
+ dtype: np.dtype or torch.dtype
+ device:
+ fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
+ return will be of shape [len(sequences), fixed_length, ...]
+ Returns:
+ padded_seqs: ((n+1)-d tensor) padded with zeros
+ mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
+ 1 indicate valid, 0 otherwise
+ Examples:
+ >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
+ >>> pad_sequences_1d(test_data_list, dtype=torch.long)
+ >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
+ >>> pad_sequences_1d(test_data_3d, dtype=torch.float)
+ >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
+ >>> pad_sequences_1d(test_data_list, dtype=np.float32)
+ >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
+ >>> pad_sequences_1d(test_data_3d, dtype=np.float32)
+ """
+ if isinstance(sequences[0], list):
+ if "torch" in str(dtype):
+ sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]
+ else:
+ sequences = [np.asarray(s, dtype=dtype) for s in sequences]
+
+ extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements
+ lengths = [len(seq) for seq in sequences]
+ if fixed_length is not None:
+ max_length = fixed_length
+ else:
+ max_length = max(lengths)
+ if isinstance(sequences[0], torch.Tensor):
+ assert "torch" in str(dtype), "dtype and input type does not match"
+ padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)
+ mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)
+ else: # np
+ assert "numpy" in str(dtype), "dtype and input type does not match"
+ padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)
+ mask = np.zeros((len(sequences), max_length), dtype=np.float32)
+
+ for idx, seq in enumerate(sequences):
+ end = lengths[idx]
+ padded_seqs[idx, :end] = seq
+ mask[idx, :end] = 1
+ return padded_seqs, mask # , lengths
+
+
+def pad_sequences_2d(sequences, dtype=torch.long):
+ """ Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
+ only allow the first two dims has variable lengths
+ Args:
+ sequences: list(n-d tensor or list)
+ dtype: torch.long for word indices / torch.float (float32) for other cases
+ Returns:
+ Examples:
+ >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]
+ >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])
+ >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]
+ >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])
+ >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]
+ >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])
+ # TODO add support for numpy array
+ """
+ bsz = len(sequences)
+ para_lengths = [len(seq) for seq in sequences]
+ max_para_len = max(para_lengths)
+ sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]
+ max_sen_len = max([max(e) for e in sen_lengths])
+
+ if isinstance(sequences[0], torch.Tensor):
+ extra_dims = sequences[0].shape[2:]
+ elif isinstance(sequences[0][0], torch.Tensor):
+ extra_dims = sequences[0][0].shape[1:]
+ else:
+ sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in seq] for seq in sequences]
+ extra_dims = ()
+
+ padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims, dtype=dtype)
+ mask = torch.zeros(bsz, max_para_len, max_sen_len).float()
+
+ for b_i in range(bsz):
+ for sen_i, sen_l in enumerate(sen_lengths[b_i]):
+ padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]
+ mask[b_i, sen_i, :sen_l] = 1
+ return padded_seqs, mask # , sen_lengths
diff --git a/utils/windows_utils.py b/utils/windows_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3527cdfd7107db5d7eb57afe47f3e8b3bbbc15d
--- /dev/null
+++ b/utils/windows_utils.py
@@ -0,0 +1,59 @@
+"""
+Find windows from a video with clip_ids.
+
+A window is defined by a [start_clip_idx, end_clip_idx] pair:
+For example, assuming clip_len = 2 seconds
+[0, 0] meaning a single clip window [0, 2] (seconds)
+[10, 19] meaning a 9 clip window [20, 40] (seconds)
+
+"""
+
+
+def convert_clip_ids_to_windows(clip_ids):
+ """ Inverse function of convert_windows_to_clip_ids
+ Args:
+ clip_ids: list(int), each is a index of a clip, starting from 0
+
+ Returns:
+ list(list(int)), each sublist contains two integers which are clip indices.
+ [10, 19] meaning a 9 clip window [20, 40] (seconds), if each clip is 2 seconds.
+
+ >>> test_clip_ids = [56, 57, 58, 59, 60, 61, 62] + [64, ] + [67, 68, 69, 70, 71]
+ >>> convert_clip_ids_to_windows(test_clip_ids)
+ [[56, 62], [64, 64], [67, 71]]
+ """
+ windows = []
+ _window = [clip_ids[0], None]
+ last_clip_id = clip_ids[0]
+ for clip_id in clip_ids:
+ if clip_id - last_clip_id > 1: # find gap
+ _window[1] = last_clip_id
+ windows.append(_window)
+ _window = [clip_id, None]
+ last_clip_id = clip_id
+ _window[1] = last_clip_id
+ windows.append(_window)
+ return windows
+
+
+def convert_windows_to_clip_ids(windows):
+ """ Inverse function of convert_clip_ids_to_windows
+ Args:
+ windows: list(list(int)), each sublist contains two integers which are clip indices.
+ [10, 11] meaning a 9 clip window [20, 40] (seconds), if each clip is 2 seconds.
+
+ Returns:
+ clip_ids: list(int)
+
+ >>> test_windows =[[56, 62], [64, 64], [67, 71]]
+ >>> convert_windows_to_clip_ids(test_windows)
+ [56, 57, 58, 59, 60, 61, 62] + [64, ] + [67, 68, 69, 70, 71]
+ """
+ clip_ids = []
+ for w in windows:
+ clip_ids += list(range(w[0], w[1]+1))
+ return clip_ids
+
+
+def convert_clip_window_to_seconds(window, clip_len=2):
+ return [window[0] * clip_len, (window[1] + 1) * clip_len]