forked from fllc3-icme2021/fllc3-icme2021.github.io
-
Notifications
You must be signed in to change notification settings - Fork 1
/
datasets.html
439 lines (332 loc) · 18.9 KB
/
datasets.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
<!DOCTYPE html>
<html lang="en">
<head>
<title>FLL3</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link rel="icon" type="image/x-ico" href="images/106point.jpg" />
<link href="https://fonts.googleapis.com/css?family=B612+Mono|Cabin:400,700&display=swap" rel="stylesheet">
<link rel="stylesheet" href="fonts/icomoon/style.css">
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css"
integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
<link rel="stylesheet" href="css/jquery-ui.css">
<link rel="stylesheet" href="css/owl.carousel.min.css">
<link rel="stylesheet" href="css/owl.theme.default.min.css">
<link rel="stylesheet" href="css/owl.theme.default.min.css">
<link rel="stylesheet" href="css/jquery.fancybox.min.css">
<link rel="stylesheet" href="fonts/flaticon/font/flaticon.css">
<link rel="stylesheet" href="css/aos.css">
<link href="css/jquery.mb.YTPlayer.min.css" media="all" rel="stylesheet" type="text/css">
<link rel="stylesheet" href="css/style.css">
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
<script>
(function (i, s, o, g, r, a, m) {
i['GoogleAnalyticsObject'] = r;
i[r] = i[r] || function () {
(i[r].q = i[r].q || []).push(arguments)
}, i[r].l = 1 * new Date();
a = s.createElement(o),
m = s.getElementsByTagName(o)[0];
a.async = 1;
a.src = g;
m.parentNode.insertBefore(a, m)
})(window, document, 'script', 'https://www.google-analytics.com/analytics.js', 'ga');
ga('create', 'UA-88572407-1', 'auto');
ga('send', 'pageview');
</script>
</head>
<body data-spy="scroll" data-target=".site-navbar-target" data-offset="300">
<div class="site-wrap">
<div class="site-mobile-menu site-navbar-target">
<div class="site-mobile-menu-header">
<div class="site-mobile-menu-close mt-3">
<span class="icon-close2 js-menu-toggle"></span>
</div>
</div>
<div class="site-mobile-menu-body"></div>
</div>
<div class="site-navbar py-2 js-sticky-header site-navbar-target d-none pl-0 d-lg-block" role="banner">
<div class="container">
<div class="d-flex align-items-center">
<div class="mr-auto">
<nav class="site-navigation position-relative text-right" role="navigation">
<ul class="site-menu main-menu js-clone-nav mr-auto d-none pl-0 d-lg-block">
<li class="active">
<a href="index.html" class="nav-link text-left">Home</a>
</li>
<li>
<a href="index.html#dates" class="nav-link text-left">Important dates</a>
</li>
<li>
<a href="index.html#awards" class="nav-link text-left">organizer</a>
</li>
<li>
<a href="datasets.html" class="nav-link text-left">Datasets</a>
</li>
<li>
<a href="evaluation.html" class="nav-link text-left">evaluation</a>
</li>
<li>
<a href="leaderboard.html" class="nav-link text-left">leaderboard</a>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="challenge.html" id="navbarDropdown"
role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
Previous
</a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="https://fllc-icpr2020.github.io/home/">FLLC2_ICPR2020</a>
<a class="dropdown-item" href="https://facial-landmarks-localization-challenge.github.io/#index">FLLC1_ICME2019</a>
</div>
</li>
</ul>
</nav>
</div>
</div>
</div>
</div>
</div>
<div class="site-blocks-cover overlay inner-page-cover" style="background-image: url('images/intro-background2.jpg');"
data-stellar-background-ratio="0.5">
<div class="container">
<div class="row align-items-center justify-content-center">
<div class="col-md-10 text-center" data-aos="fade-up">
<!-- <h3> The 2nd Learning from Imperfect Data (LID) Workshop</h3>-->
<h1>The 3rd Grand Challenge of 106-Point Facial Landmark Localization</h1>
<h3> ICME 2021</h3>
<h3> July 5 2021, Shenzhen, China</h3>
</div>
</div>
</div>
</div>
<div class="site-section">
<div class="container">
<div class="row">
<div class="col-lg-12" id="schedule">
</div>
<!---------------------------- dataset ---------------------------------------->
<!-- Challenge 1-->
<div class="col-lg-12" id="datasets" style="padding-top:80px;margin-top:-80px;">
<div class="section-title">
<h2>Dataset Description</h2>
<br><br><br>
<div class="trend-entry d-flex">
<p>
   The structure of files that competitors can get is shown below. The file <i>picture_mask</i> means face images covered by virtual masks. The algorithm is publicly opened in <a href="https://github.com/JDAI-CV/FaceX-Zoo">FaceX-Zoo<sup>[3]</sup></a>.
Besides, we use a face parsing model trained on <a href="https://github.com/JDAI-CV/lapa-dataset">Lapa<sup>[2]</sup></a> dataset to deal with facial occlusion.
The file <i>bbox</i> means bounding box, which is obtained by our detector for training/validation sets. You are allowed to employ your own face detectors.
<br>
   We also allow participants to use their own mask-add algorithms and additional images for training.
However, once using, it is necessary to explain in detail
what additional data and mask-add methods are when you submit the model and paper.
Meanwhile, you need to send the addition data to fllc3_icme@163.com, so that we can reproduce and check the results.
</p>
</div>
<img src="images/tree.png" align="middle" width="20%">
<p>Figure1: Structure of file</p>
<br>
<div class="trend-entry d-flex">
<div class="trend-contents">
<!---------------- training set ------------>
<h4>Training dataset:</h4>
<br>
<p>
We collect an incremental dataset named JD-landmark-mask.
Base on the face images in JD-landmark<sup>[1, 4-10]</sup> dataset, we
provide the virtual-masked face images by utilizing our
virtual mask-add algorithm<sup>[3]</sup>. This dataset, containing
about 20,386 faces, is accessible to the participants (with landmark annotations).
participants should train their model on the masked pictures.
The images and landmarks will be released on March 4, 2021.
</p>
<div class="section-title">
<img src="images/trainset.png" width="80%">
<p>Figure2: Examples of training dataset</p>
</div>
</div>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<!---------------- validation set ------------>
<h4>Validation dataset:</h4>
<br>
<p>
It consists of 2000 virtual-masked images which are generated
by the same method as the training dataset. The participants’
models will be evaluated on this set before the final evaluation.
The masked images without ground truth will be released on March 4, 2021, simultaneously.
<br>
During the validation phase (March 7 - March 31, 2021), participants could send the results on the validation set to us,
and we will return the performance to the participants by updating the leaderboard. Each team could only submit once a day.
For more information, please refer to <a href="evaluation.html#guidelines" >submission guidelines</a>.
</p>
<div class="section-title">
<img src="images/valset.png" width="80%">
<p>Figure2: Examples of validation dataset</p>
</div>
</div>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<!---------------- test set ------------>
<h4>Test dataset:</h4>
<br>
<p>
It contains 2000 virtual-masked images and 2000 in-the-wild real-masked images
which are collected from the internet.
It will be used for the final evaluation.
<br>
To prevent cheating on the test set, the test images will <strong>not</strong> be released to participants.
And participants need to submit their model and paper within a week, from April 1 - April 7.
Specifically, all the training materials including codes, models, and technical reports need to be sent to us before April 7, 2021.
</p>
<ul>
<strong class="text-danger">Note!: </strong>
 
the test dataset is <strong>blind</strong> to participants
throughout the whole competition.There are inevitable domain gaps between
virtual-masked and real-masked face images. Besides, the real-masked face images,
which contain more noised, blurry, and low-resolution images,
are more challenging than the virtual-masked face images.
There are some examples below. For privacy reasons, We temporarily blur the real-masked faces.
</ul>
<div class="section-title">
<img src="images/testset.png" width="80%">
<p>Figure3: Examples of test dataset</p>
</div>
</div>
</div>
</div>
<!------------------------ Information ----------------------->
<div class="col-lg-12" id="commitment" style="padding-top:80px;margin-top:-80px;">
<div class="section-title">
<br><br><br>
<h2>commitment</h2>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<ul>
<li><p>The dataset is available for this grand challenge only.</p></li>
<li><p>You agree not to reproduce, duplicate, copy, sell, trade, resell or exploit for any commercial purposes, any portion of the images and any portion of derived data.</p></li>
<li><p>You agree not to further copy, publish or distribute any portion of annotations of the dataset. Except, for internal use at a single site within the same organization it is allowed to make copies of the dataset.</p></li>
<li><p>The authors acknowledge that if they decide to submit, the resulting curve might be used by the organizers in any related visualizations/results.
The authors are prohibited from sharing the results with other contesting teams.</p></li>
<li><p>We reserve the right to terminate your access to the dataset at any time.</p></li>
<li><p>You will be responsible for the consequences if violated.</p></li>
</ul>
</div>
</div>
</div>
</div>
<!------------------------------- citation --------------------------->
<div class="col-lg-12" id="citation" style="padding-top:80px;margin-top:-80px;">
<div class="section-title">
<br><br><br>
<h2>citation</h2>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<p>
<p>
<strong>Please cite our paper, if you want to use the dataset.</strong>
<br><br>
[1] Y. Liu, H. Shen, Y. Si, X. Wang, X. Zhu, H. Shi et al. Grand challenge of 106-point facial landmark localization. In ICMEW, 2019.
<br>
[2] Y. Liu, H. Shi, H. Shen, Y. Si, X. Wang, T. Mei. A New Dataset and Boundary-Attention Semantic Segmentation for Face Parsing. In AAAI, 2020.
<br>
[3] M. Xiang, Y. Liu, T. Liao, X. Zhu, C. Yang, W. Liu, H. Shi. The 3rd Grand Challenge of Lightweight 106-Point Facial Landmark Localization on Masked Faces. In ICMEW, 2021.
<br>
[4] J. Wang, Y. Liu, Y. Hu, H. Shi, T. Mei. FaceX-Zoo: A PyTorch Toolbox for Face Recognition. In arXiv, 2021.
</p>
</div>
</div>
</div>
<!------------------------ reference ------------------------>
<!------------------------------- citation --------------------------->
<div class="col-lg-12" id="reference" style="padding-top:80px;margin-top:-80px;">
<div class="section-title">
<br><br><br>
<h2>reference</h2>
</div>
<div class="trend-entry d-flex">
<div class="trend-contents">
<p>
<p>
[4] C. Sagonas, G. Tzimiropoulos, S. Zafeiriou, and M. Pantic. 300 faces in-the-wild challenge: The first facial landmark localization Challenge. In ICCVW, 2013.
<br>
[5] C. Sagonas, E. Antonakos, G. Tzimiropoulos, S. Zafeiriou, and M. Pantic. 300 faces in-the-wild challenge: Database and results. In IVC, 2016.
<br>
[6] Belhumeur, P., Jacobs, D., Kriegman, D., Kumar, N. Localizing parts of faces using a consensus of exemplars. In Computer Vision and Pattern Recognition. In CVPR, 2011.
<br>
[7] X. Zhu, D. Ramanan. Face detection, pose estimation and landmark localization in the wild. In CVPR, 2012.
<br>
[8] Vuong Le, Jonathan Brandt, Zhe Lin, Lubomir Boudev, Thomas S. Huang. Interactive facial feature localization. In ECCV, 2012.
<br>
[9] C. Sagonas, G. Tzimiropoulos, S. Zafeiriou, and M. Pantic. A semi-automatic methodology for facial landmark annotation. In CVPR, 2013.
<br>
[10] I. Kemelmacher-Shlizerman, S. M. Seitz, D. Miller, and E. Brossard. The megaface benchmark: 1 million faces for recognition at scale. In CVPR, 2016.
</p>
</div>
</div>
</div>
</div>
<!------------------------------- boarder ---------------------------------------->
<div class="col-lg-12">
<div style="display:inline-block;width:500px;">
<script type="text/javascript" src="//rc.rev
olvermaps.com/0/0/7.js?i=2hlmeh3dic1&m=0&c=ff0000&cr1=ffffff&br=19&sx=0"
async="async"></script>
</div>
</div>
</div>
</div>
<!-- END section -->
<div class="footer">
<div class="container">
<div class="row">
<div class="col-12">
<div class="copyright">
<p>
<!-- Link back to Colorlib can't be removed. Template is licensed under CC BY 3.0. -->
Copyright ©<script>document.write(new Date().getFullYear());</script>
All rights reserved | This template is made with <i class="icon-heart text-danger"
aria-hidden="true"></i> by <a
href="https://colorlib.com" target="_blank">Colorlib</a>
<!-- Link back to Colorlib can't be removed. Template is licensed under CC BY 3.0. -->
</p>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- .site-wrap -->
<!-- loader -->
<div id="loader" class="show fullscreen">
<svg class="circular" width="48px" height="48px">
<circle class="path-bg" cx="24" cy="24" r="22" fill="none" stroke-width="4" stroke="#eeeeee"/>
<circle class="path" cx="24" cy="24" r="22" fill="none" stroke-width="4" stroke-miterlimit="10"
stroke="#ff5e15"/>
</svg>
</div>
<script src="js/jquery-3.3.1.min.js"></script>
<script src="js/jquery-migrate-3.0.1.min.js"></script>
<script src="js/jquery-ui.js"></script>
<script src="js/popper.min.js"></script>
<script src="js/bootstrap.min.js"></script>
<script src="js/owl.carousel.min.js"></script>
<script src="js/jquery.stellar.min.js"></script>
<script src="js/jquery.countdown.min.js"></script>
<script src="js/bootstrap-datepicker.min.js"></script>
<script src="js/jquery.easing.1.3.js"></script>
<script src="js/aos.js"></script>
<script src="js/jquery.fancybox.min.js"></script>
<script src="js/jquery.sticky.js"></script>
<script src="js/jquery.mb.YTPlayer.min.js"></script>
<script src="js/main.js"></script>
</body>
</html>