Etienne-David commited on
Commit
9d9182c
·
verified ·
1 Parent(s): 0893ee2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +15 -14
README.md CHANGED
@@ -76,20 +76,21 @@ Please refer to the specific licensing agreements of the contributing institutio
76
  If you use the VegAnn dataset in your research, please cite the following:
77
 
78
 
79
- ```@article{madec_vegann_2023,
80
- title = {{VegAnn}, {Vegetation} {Annotation} of multi-crop {RGB} images acquired under diverse conditions for segmentation},
81
- volume = {10},
82
- issn = {2052-4463},
83
- url = {https://doi.org/10.1038/s41597-023-02098-y},
84
- doi = {10.1038/s41597-023-02098-y},
85
- abstract = {Applying deep learning to images of cropping systems provides new knowledge and insights in research and commercial applications. Semantic segmentation or pixel-wise classification, of RGB images acquired at the ground level, into vegetation and background is a critical step in the estimation of several canopy traits. Current state of the art methodologies based on convolutional neural networks (CNNs) are trained on datasets acquired under controlled or indoor environments. These models are unable to generalize to real-world images and hence need to be fine-tuned using new labelled datasets. This motivated the creation of the VegAnn - Vegetation Annotation - dataset, a collection of 3775 multi-crop RGB images acquired for different phenological stages using different systems and platforms in diverse illumination conditions. We anticipate that VegAnn will help improving segmentation algorithm performances, facilitate benchmarking and promote large-scale crop vegetation segmentation research.},
86
- number = {1},
87
- journal = {Scientific Data},
88
- author = {Madec, Simon and Irfan, Kamran and Velumani, Kaaviya and Baret, Frederic and David, Etienne and Daubige, Gaetan and Samatan, Lucas Bernigaud and Serouart, Mario and Smith, Daniel and James, Chrisbin and Camacho, Fernando and Guo, Wei and De Solan, Benoit and Chapman, Scott C. and Weiss, Marie},
89
- month = may,
90
- year = {2023},
91
- pages = {302},
92
- }
 
93
  ```
94
 
95
  ## Additional Information
 
76
  If you use the VegAnn dataset in your research, please cite the following:
77
 
78
 
79
+ ```
80
+ @article{madec_vegann_2023,
81
+ title = {{VegAnn}, {Vegetation} {Annotation} of multi-crop {RGB} images acquired under diverse conditions for segmentation},
82
+ volume = {10},
83
+ issn = {2052-4463},
84
+ url = {https://doi.org/10.1038/s41597-023-02098-y},
85
+ doi = {10.1038/s41597-023-02098-y},
86
+ abstract = {Applying deep learning to images of cropping systems provides new knowledge and insights in research and commercial applications. Semantic segmentation or pixel-wise classification, of RGB images acquired at the ground level, into vegetation and background is a critical step in the estimation of several canopy traits. Current state of the art methodologies based on convolutional neural networks (CNNs) are trained on datasets acquired under controlled or indoor environments. These models are unable to generalize to real-world images and hence need to be fine-tuned using new labelled datasets. This motivated the creation of the VegAnn - Vegetation Annotation - dataset, a collection of 3775 multi-crop RGB images acquired for different phenological stages using different systems and platforms in diverse illumination conditions. We anticipate that VegAnn will help improving segmentation algorithm performances, facilitate benchmarking and promote large-scale crop vegetation segmentation research.},
87
+ number = {1},
88
+ journal = {Scientific Data},
89
+ author = {Madec, Simon and Irfan, Kamran and Velumani, Kaaviya and Baret, Frederic and David, Etienne and Daubige, Gaetan and Samatan, Lucas Bernigaud and Serouart, Mario and Smith, Daniel and James, Chrisbin and Camacho, Fernando and Guo, Wei and De Solan, Benoit and Chapman, Scott C. and Weiss, Marie},
90
+ month = may,
91
+ year = {2023},
92
+ pages = {302},
93
+ }
94
  ```
95
 
96
  ## Additional Information