data
dict |
---|
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYSWsV",
"doi": "10.1109/TVCG.2013.49",
"abstract": "Provides a listing of current committee members.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Provides a listing of current committee members.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Provides a listing of current committee members.",
"title": "International Program Committee and Steering Committee",
"normalizedTitle": "International Program Committee and Steering Committee",
"fno": "ttg2013040000ix",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "ix",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000viii",
"articleId": "13rRUxDIthd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000x",
"articleId": "13rRUEgs2tr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUEgs2tr",
"doi": "10.1109/TVCG.2013.55",
"abstract": "The publication offers a note of thanks and lists its reviewers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The publication offers a note of thanks and lists its reviewers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The publication offers a note of thanks and lists its reviewers.",
"title": "Paper reviewers",
"normalizedTitle": "Paper reviewers",
"fno": "ttg2013040000x",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "x-xi",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000ix",
"articleId": "13rRUyYSWsV",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xii",
"articleId": "13rRUILLkvp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILLkvp",
"doi": "10.1109/TVCG.2013.51",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Keynote Speaker: Virtual Reality: Current Uses in Medical Simulation and Future",
"normalizedTitle": "Keynote Speaker: Virtual Reality: Current Uses in Medical Simulation and Future",
"fno": "ttg2013040000xii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Richard",
"surname": "Satava",
"fullName": "Richard Satava",
"affiliation": "University of Washington Medical Center, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/colcom/2007/1318/0/04553882",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/colcom/2007/04553882/12OmNASILUZ",
"parentPublication": {
"id": "proceedings/colcom/2007/1318/0",
"title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mse/2011/0548/0/05937072",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/mse/2011/05937072/12OmNBQkx3o",
"parentPublication": {
"id": "proceedings/mse/2011/0548/0",
"title": "2011 IEEE International Conference on Microelectronic Systems Education (MSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hase/2001/1275/0/12750004",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/hase/2001/12750004/12OmNCd2rF1",
"parentPublication": {
"id": "proceedings/hase/2001/1275/0",
"title": "Proceedings Sixth IEEE International Symposium on High Assurance Systems Engineering. Special Topic: Impact of Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2015/6879/0/07156348",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2015/07156348/12OmNvlPkAl",
"parentPublication": {
"id": "proceedings/pacificvis/2015/6879/0",
"title": "2015 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2012/4668/0/4668a006",
"title": "Keynote Speaker-2: Emotion, Future Smart Technology, and Convergence",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2012/4668a006/12OmNwseES3",
"parentPublication": {
"id": "proceedings/isms/2012/4668/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taicpart-mutation/2007/2984/0/2984xxii",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/taicpart-mutation/2007/2984xxii/12OmNyOHFZr",
"parentPublication": {
"id": "proceedings/taicpart-mutation/2007/2984/0",
"title": "Testing: Academic and Industrial Conference Practice and Research Techniques - MUTATION",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/colcom/2007/1318/0/04553881",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/colcom/2007/04553881/12OmNzuZUqn",
"parentPublication": {
"id": "proceedings/colcom/2007/1318/0",
"title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549339",
"title": "Keynote speaker: Virtual reality: Current uses in medical simulation and future opportunities & medical technologies that VR can exploit in education and training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549339/12OmNzyGH3i",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2022/9744/0/974400z036",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2022/974400z036/1MrFQvu4EFi",
"parentPublication": {
"id": "proceedings/ictai/2022/9744/0",
"title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000x",
"articleId": "13rRUEgs2tr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xiii",
"articleId": "13rRUxYrbMe",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYrbMe",
"doi": "10.1109/TVCG.2013.52",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Keynote Speaker: Welcome to the Future! Technology and Innovation at Disney",
"normalizedTitle": "Keynote Speaker: Welcome to the Future! Technology and Innovation at Disney",
"fno": "ttg2013040000xiii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Mark",
"surname": "Mine",
"fullName": "Mark Mine",
"affiliation": "Walt Disney Imagineering, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xiii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/colcom/2007/1318/0/04553882",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/colcom/2007/04553882/12OmNASILUZ",
"parentPublication": {
"id": "proceedings/colcom/2007/1318/0",
"title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mse/2011/0548/0/05937072",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/mse/2011/05937072/12OmNBQkx3o",
"parentPublication": {
"id": "proceedings/mse/2011/0548/0",
"title": "2011 IEEE International Conference on Microelectronic Systems Education (MSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-amh/2009/5508/0/05336738",
"title": "Keynote speaker - Mixing reality magic at Disney theme parks",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-amh/2009/05336738/12OmNC1Gujd",
"parentPublication": {
"id": "proceedings/ismar-amh/2009/5508/0",
"title": "2009 IEEE International Symposium on Mixed and Augmented Reality - Arts, Media and Humanities",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hase/2001/1275/0/12750004",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/hase/2001/12750004/12OmNCd2rF1",
"parentPublication": {
"id": "proceedings/hase/2001/1275/0",
"title": "Proceedings Sixth IEEE International Symposium on High Assurance Systems Engineering. Special Topic: Impact of Networking",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isms/2012/4668/0/4668a006",
"title": "Keynote Speaker-2: Emotion, Future Smart Technology, and Convergence",
"doi": null,
"abstractUrl": "/proceedings-article/isms/2012/4668a006/12OmNwseES3",
"parentPublication": {
"id": "proceedings/isms/2012/4668/0",
"title": "Intelligent Systems, Modelling and Simulation, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549340",
"title": "Keynote speaker: Welcome to the future! technology and innovation at disney",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549340/12OmNxGALha",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/taicpart-mutation/2007/2984/0/2984xxii",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/taicpart-mutation/2007/2984xxii/12OmNyOHFZr",
"parentPublication": {
"id": "proceedings/taicpart-mutation/2007/2984/0",
"title": "Testing: Academic and Industrial Conference Practice and Research Techniques - MUTATION",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/colcom/2007/1318/0/04553881",
"title": "Keynote speaker",
"doi": null,
"abstractUrl": "/proceedings-article/colcom/2007/04553881/12OmNzuZUqn",
"parentPublication": {
"id": "proceedings/colcom/2007/1318/0",
"title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2022/9744/0/974400z036",
"title": "Keynote Speaker",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2022/974400z036/1MrFQvu4EFi",
"parentPublication": {
"id": "proceedings/ictai/2022/9744/0",
"title": "2022 IEEE 34th International Conference on Tools with Artificial Intelligence (ICTAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xii",
"articleId": "13rRUILLkvp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xiv",
"articleId": "13rRUIJuxpw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIJuxpw",
"doi": "10.1109/TVCG.2013.50",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Keynote Speaker: Infinite Reality: Avatars, Eternal Life, New Worlds, and the Dawn of the Virtual Revolution",
"normalizedTitle": "Keynote Speaker: Infinite Reality: Avatars, Eternal Life, New Worlds, and the Dawn of the Virtual Revolution",
"fno": "ttg2013040000xiv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Jeremy",
"surname": "Bailenson",
"fullName": "Jeremy Bailenson",
"affiliation": "Stanford University, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xiv",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ism/2016/4571/0/4571a253",
"title": "Infinite-Resolution 3D Modeling from a Single Image for Free-Form Revolution Objects",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2016/4571a253/12OmNCd2rCk",
"parentPublication": {
"id": "proceedings/ism/2016/4571/0",
"title": "2016 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sp/2013/4977/0/4977a048",
"title": "SoK: Eternal War in Memory",
"doi": null,
"abstractUrl": "/proceedings-article/sp/2013/4977a048/12OmNqyDjmB",
"parentPublication": {
"id": "proceedings/sp/2013/4977/0",
"title": "2013 IEEE Symposium on Security and Privacy",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549341",
"title": "Keynote speaker: Infinite reality: Avatars, eternal life, new worlds, and the dawn of the virtual revolution",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549341/12OmNvzJGas",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/agile/2008/3321/0/3321a125",
"title": "The Price of Agile Is Eternal Vigilance",
"doi": null,
"abstractUrl": "/proceedings-article/agile/2008/3321a125/12OmNxisQYZ",
"parentPublication": {
"id": "proceedings/agile/2008/3321/0",
"title": "AGILE Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dese/2011/4593/0/4593a392",
"title": "Eternal Cloud Computation Application Development",
"doi": null,
"abstractUrl": "/proceedings-article/dese/2011/4593a392/12OmNyvY9ub",
"parentPublication": {
"id": "proceedings/dese/2011/4593/0",
"title": "2011 Developments in E-systems Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/1987/03/mcg1987030045",
"title": "Infinite Control Points-A Method for Representing Surfaces of Revolution Using Boundary Data",
"doi": null,
"abstractUrl": "/magazine/cg/1987/03/mcg1987030045/13rRUB7a13p",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/an/2016/04/man2016040074",
"title": "The Dawn of Digital Light",
"doi": null,
"abstractUrl": "/magazine/an/2016/04/man2016040074/13rRUwInv6o",
"parentPublication": {
"id": "mags/an",
"title": "IEEE Annals of the History of Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/1997/10/rx016",
"title": "Dawn of the Internet Appliance",
"doi": null,
"abstractUrl": "/magazine/co/1997/10/rx016/13rRUxNmPH5",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/sp/2014/03/msp2014030045",
"title": "Eternal War in Memory",
"doi": null,
"abstractUrl": "/magazine/sp/2014/03/msp2014030045/13rRUyeTVgj",
"parentPublication": {
"id": "mags/sp",
"title": "IEEE Security & Privacy",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/5555/01/09932681",
"title": "DAWN: Domain Generalization Based Network Alignment",
"doi": null,
"abstractUrl": "/journal/bd/5555/01/09932681/1HVseFyPGik",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xiii",
"articleId": "13rRUxYrbMe",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xv",
"articleId": "13rRUxBa5bX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5bX",
"doi": "10.1109/TVCG.2013.56",
"abstract": "Presents the recipient of the 2012 Virtual Reality Career Award.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the recipient of the 2012 Virtual Reality Career Award.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the recipient of the 2012 Virtual Reality Career Award.",
"title": "The 2012 Virtual Reality Career Award",
"normalizedTitle": "The 2012 Virtual Reality Career Award",
"fno": "ttg2013040000xv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [
{
"givenName": "Lawrence",
"surname": "Rosenblum",
"fullName": "Lawrence Rosenblum",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xv",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xiv",
"articleId": "13rRUIJuxpw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xvi",
"articleId": "13rRUyfKIHM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyfKIHM",
"doi": "10.1109/TVCG.2013.57",
"abstract": "Presents the recipient of the 2012 Virtual Reality Technical Achievement Award.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the recipient of the 2012 Virtual Reality Technical Achievement Award.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the recipient of the 2012 Virtual Reality Technical Achievement Award.",
"title": "The 2012 Virtual Reality Technical Achievement Award",
"normalizedTitle": "The 2012 Virtual Reality Technical Achievement Award",
"fno": "ttg2013040000xvi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [
{
"givenName": "Dieter",
"surname": "Schmalstieg",
"fullName": "Dieter Schmalstieg",
"affiliation": "Graz University of Technology, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xvi",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xv",
"articleId": "13rRUxBa5bX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xvii",
"articleId": "13rRUx0gepX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0gepX",
"doi": "10.1109/TVCG.2013.58",
"abstract": "Presents the recipient of the 2013 Virtual Reality Career Award.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the recipient of the 2013 Virtual Reality Career Award.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the recipient of the 2013 Virtual Reality Career Award.",
"title": "The 2013 Virtual Reality Career Award",
"normalizedTitle": "The 2013 Virtual Reality Career Award",
"fno": "ttg2013040000xvii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [
{
"givenName": "Henry",
"surname": "Fuchs",
"fullName": "Henry Fuchs",
"affiliation": "The University of North Carolina at Chapel Hill",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xvii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xvi",
"articleId": "13rRUyfKIHM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xviii",
"articleId": "13rRUy0qnGl",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy0qnGl",
"doi": "10.1109/TVCG.2013.59",
"abstract": "Presents the recipient of the 2013 Virtual Reality Technical Achievement Award.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the recipient of the 2013 Virtual Reality Technical Achievement Award.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the recipient of the 2013 Virtual Reality Technical Achievement Award.",
"title": "The 2013 Virtual Reality Technical Achievement Award",
"normalizedTitle": "The 2013 Virtual Reality Technical Achievement Award",
"fno": "ttg2013040000xviii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [
{
"givenName": "Mark",
"surname": "Billinghurst",
"fullName": "Mark Billinghurst",
"affiliation": "University of Canterbury, New Zealand",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xviii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xvii",
"articleId": "13rRUx0gepX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040529",
"articleId": "13rRUwInvyx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvyx",
"doi": "10.1109/TVCG.2013.43",
"abstract": "In our research agenda to study the effects of immersion (level of fidelity) on various tasks in virtual reality (VR) systems, we have found that the most generalizable findings come not from direct comparisons of different technologies, but from controlled simulations of those technologies. We call this the mixed reality (MR) simulation approach. However, the validity of MR simulation, especially when different simulator platforms are used, can be questioned. In this paper, we report the results of an experiment examining the effects of field of regard (FOR) and head tracking on the analysis of volume visualized micro-CT datasets, and compare them with those from a previous study. The original study used a CAVE-like display as the MR simulator platform, while the present study used a high-end head-mounted display (HMD). Out of the 24 combinations of system characteristics and tasks tested on the two platforms, we found that the results produced by the two different MR simulators were similar in 20 cases. However, only one of the significant effects found in the original experiment for quantitative tasks was reproduced in the present study. Our observations provide evidence both for and against the validity of MR simulation, and give insight into the differences caused by different MR simulator platforms. The present experiment also examined new conditions not present in the original study, and produced new significant results, which confirm and extend previous existing knowledge on the effects of FOR and head tracking. We provide design guidelines for choosing display systems that can improve the effectiveness of volume visualization applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In our research agenda to study the effects of immersion (level of fidelity) on various tasks in virtual reality (VR) systems, we have found that the most generalizable findings come not from direct comparisons of different technologies, but from controlled simulations of those technologies. We call this the mixed reality (MR) simulation approach. However, the validity of MR simulation, especially when different simulator platforms are used, can be questioned. In this paper, we report the results of an experiment examining the effects of field of regard (FOR) and head tracking on the analysis of volume visualized micro-CT datasets, and compare them with those from a previous study. The original study used a CAVE-like display as the MR simulator platform, while the present study used a high-end head-mounted display (HMD). Out of the 24 combinations of system characteristics and tasks tested on the two platforms, we found that the results produced by the two different MR simulators were similar in 20 cases. However, only one of the significant effects found in the original experiment for quantitative tasks was reproduced in the present study. Our observations provide evidence both for and against the validity of MR simulation, and give insight into the differences caused by different MR simulator platforms. The present experiment also examined new conditions not present in the original study, and produced new significant results, which confirm and extend previous existing knowledge on the effects of FOR and head tracking. We provide design guidelines for choosing display systems that can improve the effectiveness of volume visualization applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In our research agenda to study the effects of immersion (level of fidelity) on various tasks in virtual reality (VR) systems, we have found that the most generalizable findings come not from direct comparisons of different technologies, but from controlled simulations of those technologies. We call this the mixed reality (MR) simulation approach. However, the validity of MR simulation, especially when different simulator platforms are used, can be questioned. In this paper, we report the results of an experiment examining the effects of field of regard (FOR) and head tracking on the analysis of volume visualized micro-CT datasets, and compare them with those from a previous study. The original study used a CAVE-like display as the MR simulator platform, while the present study used a high-end head-mounted display (HMD). Out of the 24 combinations of system characteristics and tasks tested on the two platforms, we found that the results produced by the two different MR simulators were similar in 20 cases. However, only one of the significant effects found in the original experiment for quantitative tasks was reproduced in the present study. Our observations provide evidence both for and against the validity of MR simulation, and give insight into the differences caused by different MR simulator platforms. The present experiment also examined new conditions not present in the original study, and produced new significant results, which confirm and extend previous existing knowledge on the effects of FOR and head tracking. We provide design guidelines for choosing display systems that can improve the effectiveness of volume visualization applications.",
"title": "Validation of the MR Simulation Approach for Evaluating the Effects of Immersion on Visual Analysis of Volume Data",
"normalizedTitle": "Validation of the MR Simulation Approach for Evaluating the Effects of Immersion on Visual Analysis of Volume Data",
"fno": "ttg2013040529",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Visualization",
"Mice",
"Solid Modeling",
"Head",
"Training",
"Computational Modeling",
"Virtual Environments",
"MR Simulator",
"Immersion",
"Micro CT",
"Volume Visualization",
"Virtual Reality",
"3 D Visualization",
"HMD"
],
"authors": [
{
"givenName": "B.",
"surname": "Laha",
"fullName": "B. Laha",
"affiliation": "Dept. of Comput. Sci., Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. A.",
"surname": "Bowman",
"fullName": "D. A. Bowman",
"affiliation": "Dept. of Comput. Sci., Center for Human-Comput. Interaction, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. D.",
"surname": "Schiffbauer",
"fullName": "J. D. Schiffbauer",
"affiliation": "Dept. of Geol. Sci., Univ. of Missouri, Columbia, MO, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "529-538",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2016/0836/0/07504730",
"title": "Effects of field of regard and stereoscopy and the validity of MR simulation for visual analysis of scientific data",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504730/12OmNBbJTpU",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2012/4651/0/4651a663",
"title": "Instruction for Remote MR Cooperative Work with Captured Still Worker's View Video",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2012/4651a663/12OmNxdDFSs",
"parentPublication": {
"id": "proceedings/aina/2012/4651/0",
"title": "2012 IEEE 26th International Conference on Advanced Information Networking and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836524",
"title": "Perceptual Issues of a Passive Haptics Feedback Based MR System",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836524/12OmNxecS4t",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isar/2001/1375/0/13750169",
"title": "MR2 (MR Square): A Mixed-Reality Meeting Room",
"doi": null,
"abstractUrl": "/proceedings-article/isar/2001/13750169/12OmNzyYibC",
"parentPublication": {
"id": "proceedings/isar/2001/1375/0",
"title": "Proceedings IEEE and ACM International Symposium on Augmented Reality",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg2012040597",
"title": "Effects of Immersion on Visual Analysis of Volume Data",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg2012040597/13rRUxASuve",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a414",
"title": "Evaluating the Object-Centered User Interface in Head-Worn Mixed Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a414/1JrRiVjEd44",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798024",
"title": "Head Pointer or Eye Gaze: Which Helps More in MR Remote Collaboration?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798024/1cJ0MmguvG8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797957",
"title": "A Simulation for Examining the Effects of Inaccurate Head Tracking on Drivers of Vehicles with Transparent Cockpit Projections",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797957/1cJ15UmejkI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/12/09199574",
"title": "Gaze-Dependent Simulation of Light Perception in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2020/12/09199574/1ncgnMqzLJm",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2021/4106/0/410600a389",
"title": "Effects of Immersive Spherical Video-based Virtual Reality on Cognition and Affect Outcomes of Learning: A Meta-analysis",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2021/410600a389/1vJZUuZGuWY",
"parentPublication": {
"id": "proceedings/icalt/2021/4106/0",
"title": "2021 International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040000xviii",
"articleId": "13rRUy0qnGl",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040539",
"articleId": "13rRUwInvl1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgCr",
"name": "ttg2013040529s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040529s1.zip",
"extension": "zip",
"size": "14.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvl1",
"doi": "10.1109/TVCG.2013.25",
"abstract": "Health sciences students often practice and are evaluated on interview and exam skills by working with standardized patients (people that role play having a disease or condition). However, standardized patients do not exist for certain vulnerable populations such as children and the intellectually disabled. As a result, students receive little to no exposure to vulnerable populations before becoming working professionals. To address this problem and thereby increase exposure to vulnerable populations, we propose using virtual humans to simulate members of vulnerable populations. We created a mixed reality pediatric patient that allowed students to practice pediatric developmental exams. Practicing several exams is necessary for students to understand how to properly interact with and correctly assess a variety of children. Practice also increases a student's confidence in performing the exam. Effective practice requires students to treat the virtual child realistically. Treating the child realistically might be affected by how the student and virtual child physically interact, so we created two object interaction interfaces - a natural interface and a mouse-based interface. We tested the complete mixed reality exam and also compared the two object interaction interfaces in a within-subjects user study with 22 participants. Our results showed that the participants accepted the virtual child as a child and treated it realistically. Participants also preferred the natural interface, but the interface did not affect how realistically participants treated the virtual child.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Health sciences students often practice and are evaluated on interview and exam skills by working with standardized patients (people that role play having a disease or condition). However, standardized patients do not exist for certain vulnerable populations such as children and the intellectually disabled. As a result, students receive little to no exposure to vulnerable populations before becoming working professionals. To address this problem and thereby increase exposure to vulnerable populations, we propose using virtual humans to simulate members of vulnerable populations. We created a mixed reality pediatric patient that allowed students to practice pediatric developmental exams. Practicing several exams is necessary for students to understand how to properly interact with and correctly assess a variety of children. Practice also increases a student's confidence in performing the exam. Effective practice requires students to treat the virtual child realistically. Treating the child realistically might be affected by how the student and virtual child physically interact, so we created two object interaction interfaces - a natural interface and a mouse-based interface. We tested the complete mixed reality exam and also compared the two object interaction interfaces in a within-subjects user study with 22 participants. Our results showed that the participants accepted the virtual child as a child and treated it realistically. Participants also preferred the natural interface, but the interface did not affect how realistically participants treated the virtual child.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Health sciences students often practice and are evaluated on interview and exam skills by working with standardized patients (people that role play having a disease or condition). However, standardized patients do not exist for certain vulnerable populations such as children and the intellectually disabled. As a result, students receive little to no exposure to vulnerable populations before becoming working professionals. To address this problem and thereby increase exposure to vulnerable populations, we propose using virtual humans to simulate members of vulnerable populations. We created a mixed reality pediatric patient that allowed students to practice pediatric developmental exams. Practicing several exams is necessary for students to understand how to properly interact with and correctly assess a variety of children. Practice also increases a student's confidence in performing the exam. Effective practice requires students to treat the virtual child realistically. Treating the child realistically might be affected by how the student and virtual child physically interact, so we created two object interaction interfaces - a natural interface and a mouse-based interface. We tested the complete mixed reality exam and also compared the two object interaction interfaces in a within-subjects user study with 22 participants. Our results showed that the participants accepted the virtual child as a child and treated it realistically. Participants also preferred the natural interface, but the interface did not affect how realistically participants treated the virtual child.",
"title": "Applying Mixed Reality to Simulate Vulnerable Populations for Practicing Clinical Communication Skills",
"normalizedTitle": "Applying Mixed Reality to Simulate Vulnerable Populations for Practicing Clinical Communication Skills",
"fno": "ttg2013040539",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Pediatrics",
"Shape",
"Virtual Reality",
"Sociology",
"Statistics",
"Training",
"Tutorials",
"Presence",
"Virtual Humans",
"Medical Education",
"Social Presence"
],
"authors": [
{
"givenName": null,
"surname": "Joon Hao Chuah",
"fullName": "Joon Hao Chuah",
"affiliation": "Univ. of Florida, Gainesville, FL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.",
"surname": "Lok",
"fullName": "B. Lok",
"affiliation": "Univ. of Florida, Gainesville, FL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "E.",
"surname": "Black",
"fullName": "E. Black",
"affiliation": "Univ. of Florida, Gainesville, FL, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "539-546",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2016/0641/0/07477735",
"title": "A two-sample test for statistical comparisons of shape populations",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2016/07477735/12OmNBqMDq2",
"parentPublication": {
"id": "proceedings/wacv/2016/0641/0",
"title": "2016 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icse-seis/2018/5661/0/566101a033",
"title": "Agile Development for Vulnerable Populations: Lessons Learned and Recommendations",
"doi": null,
"abstractUrl": "/proceedings-article/icse-seis/2018/566101a033/13bd1hyoTxM",
"parentPublication": {
"id": "proceedings/icse-seis/2018/5661/0",
"title": "2018 IEEE/ACM 40th International Conference on Software Engineering: Software Engineering in Society (ICSE-SEIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2017/04/07447731",
"title": "Identifying Cell Populations in Flow Cytometry Data Using Phenotypic Signatures",
"doi": null,
"abstractUrl": "/journal/tb/2017/04/07447731/13rRUwh80Fv",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2019/04/08062805",
"title": "Linear Time Algorithms to Construct Populations Fitting Multiple Constraint Distributions at Genomic Scales",
"doi": null,
"abstractUrl": "/journal/tb/2019/04/08062805/13rRUyoPSVG",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2021/3902/0/09671874",
"title": "Influence in Transient Populations",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2021/09671874/1A8gChPJumc",
"parentPublication": {
"id": "proceedings/big-data/2021/3902/0",
"title": "2021 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a142",
"title": "Twilight Rohingya: The Design and Evaluation of Different Navigation Controls in a Refugee VR Environment",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a142/1I6RQW8Lz4Q",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2022/6819/0/09995130",
"title": "A framework for associating structural variants with cell-specific transcription factors and histone modifications in defect phenotypes",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2022/09995130/1JC2iY1414k",
"parentPublication": {
"id": "proceedings/bibm/2022/6819/0",
"title": "2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2022/5305/0/10089301",
"title": "Responsible Artificial Intelligence for Preterm Birth Prediction in Vulnerable Populations",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2022/10089301/1M7Lh7vafhC",
"parentPublication": {
"id": "proceedings/csde/2022/5305/0",
"title": "2022 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vs-games/2019/4540/0/08864547",
"title": "Fancy Fruits - An Augmented Reality Application for Special Needs Education",
"doi": null,
"abstractUrl": "/proceedings-article/vs-games/2019/08864547/1e5ZssRoC3u",
"parentPublication": {
"id": "proceedings/vs-games/2019/4540/0",
"title": "2019 11th International Conference on Virtual Worlds and Games for Serious Applications (VS-Games)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2019/6868/0/09073137",
"title": "Hierarchical Multi-Armed Bandits for Discovering Hidden Populations",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2019/09073137/1jjAfyloOFW",
"parentPublication": {
"id": "proceedings/asonam/2019/6868/0",
"title": "2019 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040529",
"articleId": "13rRUwInvyx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040547",
"articleId": "13rRUwwaKt7",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgFl",
"name": "ttg2013040539s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040539s1.mp4",
"extension": "mp4",
"size": "37.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwwaKt7",
"doi": "10.1109/TVCG.2013.41",
"abstract": "In this paper, we investigate the validity of Mixed Reality (MR) Simulation by conducting an experiment studying the effects of the visual realism of the simulated environment on various search tasks in Augmented Reality (AR). MR Simulation is a practical approach to conducting controlled and repeatable user experiments in MR, including AR. This approach uses a high-fidelity Virtual Reality (VR) display system to simulate a wide range of equal or lower fidelity displays from the MR continuum, for the express purpose of conducting user experiments. For the experiment, we created three virtual models of a real-world location, each with a different perceived level of visual realism. We designed and executed an AR experiment using the real-world location and repeated the experiment within VR using the three virtual models we created. The experiment looked into how fast users could search for both physical and virtual information that was present in the scene. Our experiment demonstrates the usefulness of MR Simulation and provides early evidence for the validity of MR Simulation with respect to AR search tasks performed in immersive VR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we investigate the validity of Mixed Reality (MR) Simulation by conducting an experiment studying the effects of the visual realism of the simulated environment on various search tasks in Augmented Reality (AR). MR Simulation is a practical approach to conducting controlled and repeatable user experiments in MR, including AR. This approach uses a high-fidelity Virtual Reality (VR) display system to simulate a wide range of equal or lower fidelity displays from the MR continuum, for the express purpose of conducting user experiments. For the experiment, we created three virtual models of a real-world location, each with a different perceived level of visual realism. We designed and executed an AR experiment using the real-world location and repeated the experiment within VR using the three virtual models we created. The experiment looked into how fast users could search for both physical and virtual information that was present in the scene. Our experiment demonstrates the usefulness of MR Simulation and provides early evidence for the validity of MR Simulation with respect to AR search tasks performed in immersive VR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we investigate the validity of Mixed Reality (MR) Simulation by conducting an experiment studying the effects of the visual realism of the simulated environment on various search tasks in Augmented Reality (AR). MR Simulation is a practical approach to conducting controlled and repeatable user experiments in MR, including AR. This approach uses a high-fidelity Virtual Reality (VR) display system to simulate a wide range of equal or lower fidelity displays from the MR continuum, for the express purpose of conducting user experiments. For the experiment, we created three virtual models of a real-world location, each with a different perceived level of visual realism. We designed and executed an AR experiment using the real-world location and repeated the experiment within VR using the three virtual models we created. The experiment looked into how fast users could search for both physical and virtual information that was present in the scene. Our experiment demonstrates the usefulness of MR Simulation and provides early evidence for the validity of MR Simulation with respect to AR search tasks performed in immersive VR.",
"title": "The Effects of Visual Realism on Search Tasks in Mixed Reality Simulation",
"normalizedTitle": "The Effects of Visual Realism on Search Tasks in Mixed Reality Simulation",
"fno": "ttg2013040547",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visualization",
"Solid Modeling",
"Lighting",
"Cameras",
"Geometry",
"Virtual Environments",
"Augmented Reality",
"MR Simulation",
"Visual Realism"
],
"authors": [
{
"givenName": null,
"surname": "Cha Lee",
"fullName": "Cha Lee",
"affiliation": "Univ. of California, Santa Barbara, Santa Barbara, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "G. A.",
"surname": "Rincon",
"fullName": "G. A. Rincon",
"affiliation": "Univ. of California, Santa Barbara, Santa Barbara, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "G.",
"surname": "Meyer",
"fullName": "G. Meyer",
"affiliation": "Univ. of California, Santa Barbara, Santa Barbara, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Hollerer",
"fullName": "T. Hollerer",
"affiliation": "Univ. of California, Santa Barbara, Santa Barbara, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D. A.",
"surname": "Bowman",
"fullName": "D. A. Bowman",
"affiliation": "Center for Human-Comput. Interaction, Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "547-556",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/isuvr/2017/3091/0/3091a038",
"title": "Empathic Mixed Reality: Sharing What You Feel and Interacting with What You See",
"doi": null,
"abstractUrl": "/proceedings-article/isuvr/2017/3091a038/12OmNBNM97G",
"parentPublication": {
"id": "proceedings/isuvr/2017/3091/0",
"title": "2017 International Symposium on Ubiquitous Virtual Reality (ISUVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2016/3641/0/3641a037",
"title": "A Single Camera Image Based Approach for Glossy Reflections in Mixed Reality Applications",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2016/3641a037/12OmNrJAdMm",
"parentPublication": {
"id": "proceedings/ismar/2016/3641/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismarw/2016/3740/0/07836491",
"title": "Using Visual Effects to Facilitate Depth Perception for Spatial Tasks in Virtual and Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismarw/2016/07836491/12OmNwdtw9P",
"parentPublication": {
"id": "proceedings/ismarw/2016/3740/0",
"title": "2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cste/2022/8188/0/818800a082",
"title": "Integrating Inquiry-Based Pedagogy with Mixed Reality: Theories and Practices",
"doi": null,
"abstractUrl": "/proceedings-article/cste/2022/818800a082/1J7VZM9bxDi",
"parentPublication": {
"id": "proceedings/cste/2022/8188/0",
"title": "2022 4th International Conference on Computer Science and Technologies in Education (CSTE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a657",
"title": "Mixed Reality for Engineering Design Review Using Finite Element Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a657/1J7WwCL6CCQ",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049710",
"title": "Exploring Plausibility and Presence in Mixed Reality Experiences",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049710/1KYoplRZLWM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798101",
"title": "Mixed Reality in Art Education",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798101/1cJ0RtUtRgk",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisce/2019/5712/0/09107809",
"title": "Spatiotemporal Information System Using Mixed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/icisce/2019/09107809/1koLCLg2qqY",
"parentPublication": {
"id": "proceedings/icisce/2019/5712/0",
"title": "2019 6th International Conference on Information Science and Control Engineering (ICISCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/02/09144483",
"title": "Evaluating the Effects of Non-Isomorphic Rotation on 3D Manipulation Tasks in Mixed Reality Simulation",
"doi": null,
"abstractUrl": "/journal/tg/2022/02/09144483/1lClltCZfOg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2020/7675/0/767500a051",
"title": "Exploring Virtual Environments by Visually Impaired Using a Mixed Reality Cane Without Visual Feedback",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2020/767500a051/1pBMgh7AbaU",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2020/7675/0",
"title": "2020 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040539",
"articleId": "13rRUwInvl1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040557",
"articleId": "13rRUxly95x",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly95x",
"doi": "10.1109/TVCG.2013.26",
"abstract": "Accurately modeling the intrinsic material-dependent damping property for interactive sound rendering is a challenging problem. The Rayleigh damping model is commonly regarded as an adequate engineering model for interactive sound synthesis in virtual environment applications, but this assumption has never been rigorously analyzed. In this paper, we conduct a formal evaluation of this model. Our goal is to determine if auditory perception of material under Rayleigh damping assumption is 'geometryinvariant', i.e. if this approximation model is transferable across different shapes and sizes. First, audio recordings of same-material objects in various shapes and sizes are analyzed to determine if they can be approximated by the Rayleigh damping model with a single set of parameters. Next, we design and conduct a series of psychoacoustic experiments, in subjects evaluate if audio clips synthesized using the Rayleigh damping model are from the same material, when we alter the material, shape, and size parameters. Through both quantitative and qualitative evaluation, we show that the acoustic properties of the Rayleigh damping model for a single material is generally preserved across different geometries of objects consisting of homogeneous materials and is therefore a suitable, geometry-invariant sound model. Our study results also show that consistent with prior crossmodal expectations, visual perception of geometry can affect the auditory perception of materials. These findings facilitate the wide adoption of Rayleigh damping for interactive auditory systems and enable reuse of material parameters under this approximation model across different shapes and sizes, without laborious per-object parameter tuning.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Accurately modeling the intrinsic material-dependent damping property for interactive sound rendering is a challenging problem. The Rayleigh damping model is commonly regarded as an adequate engineering model for interactive sound synthesis in virtual environment applications, but this assumption has never been rigorously analyzed. In this paper, we conduct a formal evaluation of this model. Our goal is to determine if auditory perception of material under Rayleigh damping assumption is 'geometryinvariant', i.e. if this approximation model is transferable across different shapes and sizes. First, audio recordings of same-material objects in various shapes and sizes are analyzed to determine if they can be approximated by the Rayleigh damping model with a single set of parameters. Next, we design and conduct a series of psychoacoustic experiments, in subjects evaluate if audio clips synthesized using the Rayleigh damping model are from the same material, when we alter the material, shape, and size parameters. Through both quantitative and qualitative evaluation, we show that the acoustic properties of the Rayleigh damping model for a single material is generally preserved across different geometries of objects consisting of homogeneous materials and is therefore a suitable, geometry-invariant sound model. Our study results also show that consistent with prior crossmodal expectations, visual perception of geometry can affect the auditory perception of materials. These findings facilitate the wide adoption of Rayleigh damping for interactive auditory systems and enable reuse of material parameters under this approximation model across different shapes and sizes, without laborious per-object parameter tuning.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Accurately modeling the intrinsic material-dependent damping property for interactive sound rendering is a challenging problem. The Rayleigh damping model is commonly regarded as an adequate engineering model for interactive sound synthesis in virtual environment applications, but this assumption has never been rigorously analyzed. In this paper, we conduct a formal evaluation of this model. Our goal is to determine if auditory perception of material under Rayleigh damping assumption is 'geometryinvariant', i.e. if this approximation model is transferable across different shapes and sizes. First, audio recordings of same-material objects in various shapes and sizes are analyzed to determine if they can be approximated by the Rayleigh damping model with a single set of parameters. Next, we design and conduct a series of psychoacoustic experiments, in subjects evaluate if audio clips synthesized using the Rayleigh damping model are from the same material, when we alter the material, shape, and size parameters. Through both quantitative and qualitative evaluation, we show that the acoustic properties of the Rayleigh damping model for a single material is generally preserved across different geometries of objects consisting of homogeneous materials and is therefore a suitable, geometry-invariant sound model. Our study results also show that consistent with prior crossmodal expectations, visual perception of geometry can affect the auditory perception of materials. These findings facilitate the wide adoption of Rayleigh damping for interactive auditory systems and enable reuse of material parameters under this approximation model across different shapes and sizes, without laborious per-object parameter tuning.",
"title": "Auditory Perception of Geometry-Invariant Material Properties",
"normalizedTitle": "Auditory Perception of Geometry-Invariant Material Properties",
"fno": "ttg2013040557",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Damping",
"Shape",
"Psychoacoustic Models",
"Geometry",
"Glass",
"Analytical Models",
"Human Perception Of Material",
"Sound Synthesis"
],
"authors": [
{
"givenName": null,
"surname": "Zhimin Ren",
"fullName": "Zhimin Ren",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Hengchin Yeh",
"fullName": "Hengchin Yeh",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Klatzky",
"fullName": "R. Klatzky",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "M. C.",
"surname": "Lin",
"fullName": "M. C. Lin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "557-566",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2011/4455/0/4455a339",
"title": "Damping Characteristic of Composite Material with Periodic Micro-Tetrahedron Structures",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2011/4455a339/12OmNB836Om",
"parentPublication": {
"id": "proceedings/icdma/2011/4455/0",
"title": "2011 Second International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2016/8851/0/8851b554",
"title": "Geometry-Informed Material Recognition",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2016/8851b554/12OmNBEGYJf",
"parentPublication": {
"id": "proceedings/cvpr/2016/8851/0",
"title": "2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/1994/6275/0/00577133",
"title": "Primary segmentation of auditory scenes",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/1994/00577133/12OmNC8MsL8",
"parentPublication": {
"id": "proceedings/icpr/1994/6275/0",
"title": "12th IAPR International Conference on Pattern Recognition, 1994",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2009/3873/0/pid992478",
"title": "Analysis the Relation between Material Structure and Auditory Impression",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2009/pid992478/12OmNxwWoAz",
"parentPublication": {
"id": "proceedings/icicic/2009/3873/0",
"title": "Innovative Computing ,Information and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2015/7644/0/7644a941",
"title": "The Bibliometrics Analysis of the Status and Research Fronts in Preparing Damping Material with Waste Rubber in Terms of Web of Science",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2015/7644a941/12OmNzIUfVT",
"parentPublication": {
"id": "proceedings/icicta/2015/7644/0",
"title": "2015 8th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cdciem/2012/4639/0/4639a647",
"title": "Studies on Vibration Control of Beam by Damping Material",
"doi": null,
"abstractUrl": "/proceedings-article/cdciem/2012/4639a647/12OmNzaQozE",
"parentPublication": {
"id": "proceedings/cdciem/2012/4639/0",
"title": "Computer Distributed Control and Intelligent Environmental Monitoring, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08642440",
"title": "Audio-Material Reconstruction for Virtualized Reality Using a Probabilistic Damping Model",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08642440/17PYEiVyc2u",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wcmeim/2021/2172/0/217200a665",
"title": "Automatic winding forming technology for manufacturing Glass Fiber Reinforced Plastic pipes",
"doi": null,
"abstractUrl": "/proceedings-article/wcmeim/2021/217200a665/1ANLA8qgIQo",
"parentPublication": {
"id": "proceedings/wcmeim/2021/2172/0",
"title": "2021 4th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a118",
"title": "Comparison of Wake Oscillator Models with Different Damping Terms",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a118/1APq9yfO3U4",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400a524",
"title": "Dynamic modeling and vibration reduction analysis of a negative Poisson's ratio honeycomb foundation support",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400a524/1tzz5FblR6g",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040547",
"articleId": "13rRUwwaKt7",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040567",
"articleId": "13rRUxD9gXG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRYf",
"name": "ttg2013040557s1.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040557s1.pdf",
"extension": "pdf",
"size": "253 kB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXWRYg",
"name": "ttg2013040557s2.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040557s2.mp4",
"extension": "mp4",
"size": "31.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxD9gXG",
"doi": "10.1109/TVCG.2013.27",
"abstract": "We present an efficient algorithm to compute spatially-varying, direction-dependent artificial reverberation and reflection filters in large dynamic scenes for interactive sound propagation in virtual environments and video games. Our approach performs Monte Carlo integration of local visibility and depth functions to compute directionally-varying reverberation effects. The algorithm also uses a dynamically-generated rectangular aural proxy to efficiently model 2-4 orders of early reflections. These two techniques are combined to generate reflection and reverberation filters which vary with the direction of incidence at the listener. This combination leads to better sound source localization and immersion. The overall algorithm is efficient, easy to implement, and can handle moving sound sources, listeners, and dynamic scenes, with minimal storage overhead. We have integrated our approach with the audio rendering pipeline in Valve's Source game engine, and use it to generate realistic directional sound propagation effects in indoor and outdoor scenes in real-time. We demonstrate, through quantitative comparisons as well as evaluations, that our approach leads to enhanced, immersive multi-modal interaction.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present an efficient algorithm to compute spatially-varying, direction-dependent artificial reverberation and reflection filters in large dynamic scenes for interactive sound propagation in virtual environments and video games. Our approach performs Monte Carlo integration of local visibility and depth functions to compute directionally-varying reverberation effects. The algorithm also uses a dynamically-generated rectangular aural proxy to efficiently model 2-4 orders of early reflections. These two techniques are combined to generate reflection and reverberation filters which vary with the direction of incidence at the listener. This combination leads to better sound source localization and immersion. The overall algorithm is efficient, easy to implement, and can handle moving sound sources, listeners, and dynamic scenes, with minimal storage overhead. We have integrated our approach with the audio rendering pipeline in Valve's Source game engine, and use it to generate realistic directional sound propagation effects in indoor and outdoor scenes in real-time. We demonstrate, through quantitative comparisons as well as evaluations, that our approach leads to enhanced, immersive multi-modal interaction.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present an efficient algorithm to compute spatially-varying, direction-dependent artificial reverberation and reflection filters in large dynamic scenes for interactive sound propagation in virtual environments and video games. Our approach performs Monte Carlo integration of local visibility and depth functions to compute directionally-varying reverberation effects. The algorithm also uses a dynamically-generated rectangular aural proxy to efficiently model 2-4 orders of early reflections. These two techniques are combined to generate reflection and reverberation filters which vary with the direction of incidence at the listener. This combination leads to better sound source localization and immersion. The overall algorithm is efficient, easy to implement, and can handle moving sound sources, listeners, and dynamic scenes, with minimal storage overhead. We have integrated our approach with the audio rendering pipeline in Valve's Source game engine, and use it to generate realistic directional sound propagation effects in indoor and outdoor scenes in real-time. We demonstrate, through quantitative comparisons as well as evaluations, that our approach leads to enhanced, immersive multi-modal interaction.",
"title": "Aural Proxies and Directionally-Varying Reverberation for Interactive Sound Propagation in Virtual Environments",
"normalizedTitle": "Aural Proxies and Directionally-Varying Reverberation for Interactive Sound Propagation in Virtual Environments",
"fno": "ttg2013040567",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Reverberation",
"Computational Modeling",
"Geometry",
"Face",
"Games",
"Mathematical Model",
"Absorption",
"Local Approximate Models",
"Sound Propagation",
"Real Time",
"Directionally Varying Reverberation"
],
"authors": [
{
"givenName": "L.",
"surname": "Antani",
"fullName": "L. Antani",
"affiliation": "Univ. of North Carolina at Chapel Hill, Chapel Hill, NC, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "D.",
"surname": "Manocha",
"fullName": "D. Manocha",
"affiliation": "Univ. of North Carolina at Chapel Hill, Chapel Hill, NC, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "567-575",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdma/2011/4455/0/4455a519",
"title": "Experimental Study on a Thickness-Adjustable Absorber for Reverberation Controlling",
"doi": null,
"abstractUrl": "/proceedings-article/icdma/2011/4455a519/12OmNAKM03R",
"parentPublication": {
"id": "proceedings/icdma/2011/4455/0",
"title": "2011 Second International Conference on Digital Manufacturing & Automation",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sive/2014/5781/0/07006289",
"title": "Wave-based sound propagation for VR applications",
"doi": null,
"abstractUrl": "/proceedings-article/sive/2014/07006289/12OmNAXglTR",
"parentPublication": {
"id": "proceedings/sive/2014/5781/0",
"title": "2014 IEEE VR Workshop: Sonic Interaction in Virtual Environments (SIVE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iihmsp/2006/2745/0/04041661",
"title": "Audio Watermarking Based on Reverberation",
"doi": null,
"abstractUrl": "/proceedings-article/iihmsp/2006/04041661/12OmNAZOK0k",
"parentPublication": {
"id": "proceedings/iihmsp/2006/2745/0",
"title": "2006 International Conference on Intelligent Information Hiding and Multimedia",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsem/2010/4223/2/4223b254",
"title": "Application of Rough Set on Extraction of Sound Quality Parameters",
"doi": null,
"abstractUrl": "/proceedings-article/icsem/2010/4223b254/12OmNvyjGhw",
"parentPublication": {
"id": "proceedings/icsem/2010/4223/2",
"title": "2010 International Conference on System Science, Engineering Design and Manufacturing Informatization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ibica/2011/4606/0/4606a348",
"title": "A Cognitive Model to Mimic an Aspect of Low Level Perception of Sound: Modelling Reverberation Perception by Statistical Signal Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/ibica/2011/4606a348/12OmNwKGAlO",
"parentPublication": {
"id": "proceedings/ibica/2011/4606/0",
"title": "Innovations in Bio-inspired Computing and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cisp/2008/3119/4/3119d398",
"title": "Direct Reckoning Reverberation Time from the Scene Images Based on Rough Fuzzy Neural Network",
"doi": null,
"abstractUrl": "/proceedings-article/cisp/2008/3119d398/12OmNwoPtr7",
"parentPublication": {
"id": "proceedings/cisp/2008/3119/4",
"title": "Image and Signal Processing, Congress on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/snpd/2012/2120/0/06299338",
"title": "Evaluation of Realism of Dynamic Sound Space Using a Virtual Auditory Display",
"doi": null,
"abstractUrl": "/proceedings-article/snpd/2012/06299338/12OmNz4SOxH",
"parentPublication": {
"id": "proceedings/snpd/2012/2120/0",
"title": "2012 13th ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel & Distributed Computing (SNPD 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/1991/0003/0/00150779",
"title": "Estimation of position and waveform of a specified sound source decreasing the effect of other sound sources and reflection",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/1991/00150779/12OmNzw8ja1",
"parentPublication": {
"id": "proceedings/icassp/1991/0003/0",
"title": "Acoustics, Speech, and Signal Processing, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0204",
"title": "Evaluation of a Low-Cost 3D Sound System for Immersive Virtual Reality Training Systems",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0204/13rRUygT7f1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061707",
"title": "AD-Frustum: Adaptive Frustum Tracing for Interactive Sound Propagation",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061707/13rRUygT7mO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040557",
"articleId": "13rRUxly95x",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040576",
"articleId": "13rRUxYINf9",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRFX",
"name": "ttg2013040567s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040567s1.zip",
"extension": "zip",
"size": "19.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYINf9",
"doi": "10.1109/TVCG.2013.39",
"abstract": "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.",
"title": "Reflective and Refractive Objects for Mixed Reality",
"normalizedTitle": "Reflective and Refractive Objects for Mixed Reality",
"fno": "ttg2013040576",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Reality",
"Image Color Analysis",
"Rendering Computer Graphics",
"Lighting",
"Equations",
"Cameras",
"Streaming Media",
"Caustics",
"Mixed Reality",
"Reflections",
"Refractions"
],
"authors": [
{
"givenName": "M.",
"surname": "Knecht",
"fullName": "M. Knecht",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "C.",
"surname": "Traxler",
"fullName": "C. Traxler",
"affiliation": "VRVis - Zentrum fur Virtual Reality und Visualisierung Forschungs-GmbH, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "C.",
"surname": "Winklhofer",
"fullName": "C. Winklhofer",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Wimmer",
"fullName": "M. Wimmer",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "576-582",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/euromicro/2007/2977/0/29770307",
"title": "Reflective and Refractive Variables: A Model for Effective and Maintainable Adaptive-and-Dependable Software",
"doi": null,
"abstractUrl": "/proceedings-article/euromicro/2007/29770307/12OmNxE2mUb",
"parentPublication": {
"id": "proceedings/euromicro/2007/2977/0",
"title": "EUROMICRO '07. 2007 33rd Euromicro Conference on Software Engineering and Advanced Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2015/7660/0/7660a064",
"title": "[POSTER] Remote Mixed Reality System Supporting Interactions with Virtualized Objects",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2015/7660a064/12OmNzJbQY0",
"parentPublication": {
"id": "proceedings/ismar/2015/7660/0",
"title": "2015 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446391",
"title": "Visual Perception of Real World Depth Map Resolution for Mixed Reality Rendering",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446391/13bd1eSlyst",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/02/v0272",
"title": "Caustics Mapping: An Image-Space Technique for Real-Time Caustics",
"doi": null,
"abstractUrl": "/journal/tg/2007/02/v0272/13rRUxASuhs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2018/7315/0/731500a138",
"title": "Glossy Reflections for Mixed Reality Environments on Mobile Devices",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2018/731500a138/17D45Wda7hc",
"parentPublication": {
"id": "proceedings/cw/2018/7315/0",
"title": "2018 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a606",
"title": "Distortion-free Mid-air Image Inside Refractive Surface and on Reflective Surface",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a606/1CJbLHfeIsE",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbase/2022/9639/0/963900a027",
"title": "Interactive Image-Space Rendering of Dispersions",
"doi": null,
"abstractUrl": "/proceedings-article/cbase/2022/963900a027/1MBRh4FHK3m",
"parentPublication": {
"id": "proceedings/cbase/2022/9639/0",
"title": "2022 International Conference on Cloud Computing, Big Data Applications and Software Engineering (CBASE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798002",
"title": "Real-time Underwater Caustics for Mixed Reality 360° Videos",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798002/1cJ0UOVs3Ly",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/12/09123589",
"title": "An Improved Augmented-Reality Framework for Differential Rendering Beyond the Lambertian-World Assumption",
"doi": null,
"abstractUrl": "/journal/tg/2021/12/09123589/1kTxwwg0epW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/10/08684333",
"title": "Fast Computation of Single Scattering in Participating Media with Refractive Boundaries Using Frequency Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2020/10/08684333/1keqXrXysr6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040567",
"articleId": "13rRUxD9gXG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040583",
"articleId": "13rRUxYrbUF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRWy",
"name": "ttg2013040576s1.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040576s1.avi",
"extension": "avi",
"size": "24.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYrbUF",
"doi": "10.1109/TVCG.2013.32",
"abstract": "This paper explores body ownership and control of an 'extended' humanoid avatar that features a distinct and flexible tail-like appendage protruding from its coccyx. Thirty-two participants took part in a between-groups study to puppeteer the avatar in an immersive CAVETM -like system. Participantsa' body movement was tracked, and the avatara's humanoid body synchronously reflected this motion. However, sixteen participants experienced the avatara's tail moving around randomly and asynchronous to their own movement, while the other participants experienced a tail that they could, potentially, control accurately and synchronously through hip movement. Participants in the synchronous condition experienced a higher degree of body ownership and agency, suggesting that visuomotor synchrony enhanced the probability of ownership over the avatar body despite of its extra-human form. Participants experiencing body ownership were also more likely to be more anxious and attempt to avoid virtual threats to the tail and body. The higher task performance of participants in the synchronous condition indicates that people are able to quickly learn how to remap normal degrees of bodily freedom in order to control virtual bodies that differ from the humanoid form. We discuss the implications and applications of extended humanoid avatars as a method for exploring the plasticity of the braina's representation of the body and for gestural human-computer interfaces.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper explores body ownership and control of an 'extended' humanoid avatar that features a distinct and flexible tail-like appendage protruding from its coccyx. Thirty-two participants took part in a between-groups study to puppeteer the avatar in an immersive CAVETM -like system. Participantsa' body movement was tracked, and the avatara's humanoid body synchronously reflected this motion. However, sixteen participants experienced the avatara's tail moving around randomly and asynchronous to their own movement, while the other participants experienced a tail that they could, potentially, control accurately and synchronously through hip movement. Participants in the synchronous condition experienced a higher degree of body ownership and agency, suggesting that visuomotor synchrony enhanced the probability of ownership over the avatar body despite of its extra-human form. Participants experiencing body ownership were also more likely to be more anxious and attempt to avoid virtual threats to the tail and body. The higher task performance of participants in the synchronous condition indicates that people are able to quickly learn how to remap normal degrees of bodily freedom in order to control virtual bodies that differ from the humanoid form. We discuss the implications and applications of extended humanoid avatars as a method for exploring the plasticity of the braina's representation of the body and for gestural human-computer interfaces.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper explores body ownership and control of an 'extended' humanoid avatar that features a distinct and flexible tail-like appendage protruding from its coccyx. Thirty-two participants took part in a between-groups study to puppeteer the avatar in an immersive CAVETM -like system. Participantsa' body movement was tracked, and the avatara's humanoid body synchronously reflected this motion. However, sixteen participants experienced the avatara's tail moving around randomly and asynchronous to their own movement, while the other participants experienced a tail that they could, potentially, control accurately and synchronously through hip movement. Participants in the synchronous condition experienced a higher degree of body ownership and agency, suggesting that visuomotor synchrony enhanced the probability of ownership over the avatar body despite of its extra-human form. Participants experiencing body ownership were also more likely to be more anxious and attempt to avoid virtual threats to the tail and body. The higher task performance of participants in the synchronous condition indicates that people are able to quickly learn how to remap normal degrees of bodily freedom in order to control virtual bodies that differ from the humanoid form. We discuss the implications and applications of extended humanoid avatars as a method for exploring the plasticity of the braina's representation of the body and for gestural human-computer interfaces.",
"title": "Human Tails: Ownership and Control of Extended Humanoid Avatars",
"normalizedTitle": "Human Tails: Ownership and Control of Extended Humanoid Avatars",
"fno": "ttg2013040583",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Games",
"Tracking",
"Hip",
"Visualization",
"Educational Institutions",
"Joints",
"Gestural Interfaces",
"Avatars",
"Virtual Reality",
"Body Ownership",
"Agency",
"Body Schema",
"Plasticity"
],
"authors": [
{
"givenName": "W.",
"surname": "Steptoe",
"fullName": "W. Steptoe",
"affiliation": "Univ. Coll. London, London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Steed",
"fullName": "A. Steed",
"affiliation": "Univ. Coll. London, London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Slater",
"fullName": "M. Slater",
"affiliation": "ICREA, Univ. of Barcelona, Barcelona, Spain",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "583-590",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223379",
"title": "Avatar anthropomorphism and illusion of body ownership in VR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223379/12OmNAWpyrk",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223405",
"title": "Wings and flying in immersive VR — Controller type, sound effects and experienced ownership and agency",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223405/12OmNBOCWnu",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cts/2016/2300/0/07871048",
"title": "Body Ownership in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cts/2016/07871048/12OmNCm7BF7",
"parentPublication": {
"id": "proceedings/cts/2016/2300/0",
"title": "2016 International Conference on Collaboration Technologies and Systems (CTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicta/2014/6636/0/6636a088",
"title": "Analysis and Design of Humanoid Robot Dance",
"doi": null,
"abstractUrl": "/proceedings-article/icicta/2014/6636a088/12OmNwpoFMM",
"parentPublication": {
"id": "proceedings/icicta/2014/6636/0",
"title": "2014 7th International Conference on Intelligent Computation Technology and Automation (ICICTA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icndc/2010/8382/0/05645366",
"title": "Optimal Momentum Compensation with Waist Joint for Online Biped Gait Generating of Humanoid Robot",
"doi": null,
"abstractUrl": "/proceedings-article/icndc/2010/05645366/12OmNzlUKzP",
"parentPublication": {
"id": "proceedings/icndc/2010/8382/0",
"title": "2010 First International Conference on Networking and Distributed Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446569",
"title": "Spatial Asynchronous Visuo-Tactile Stimuli Influence Ownership of Virtual Wings",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446569/13bd1ftOBCI",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446448",
"title": "Agency Enhances Body Ownership Illusion of Being a Virtual Bat",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446448/13bd1gzWkRR",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a666",
"title": "Investigating User Embodiment of Inverse-Kinematic Avatars in Smartphone Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a666/1JrR5i5jDhe",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797787",
"title": "The Effect of Hand Size and Interaction Modality on the Virtual Hand Illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797787/1cJ179JUrPa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a729",
"title": "[DC] The Effect of Modulating The Step Length of an Embodied Self-Avatars on Gait Symmetry During Treadmill Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a729/1tnXoIo36uY",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040576",
"articleId": "13rRUxYINf9",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040591",
"articleId": "13rRUyYBlgz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYBlgz",
"doi": "10.1109/TVCG.2013.24",
"abstract": "We present a novel technique for animating self-avatar eye movements in an immersive virtual environment without the use of eye-tracking hardware, and evaluate our technique via a two-alternative, forced-choice-with-confidence experiment that compares this simulated-eye-tracking condition to a no-eye-tracking condition and a real-eye-tracking condition in which the avatar's eyes were rotated with an eye tracker. Viewing the reflection of a tracked self-avatar is often used in virtual-embodiment scenarios to induce in the participant the illusion that the virtual body of the self-avatar belongs to them, however current tracking methods do not account for the movements of the participants eyes, potentially lessening this body-ownership illusion. The results of our experiment indicate that, although blind to the experimental conditions, participants noticed differences between eye behaviors, and found that the real and simulated conditions represented their behavior better than the no-eye-tracking condition. Additionally, no statistical difference was found when choosing between the real and simulated conditions. These results suggest that adding eye movements to selfavatars produces a subjective increase in self-identification with the avatar due to a more complete representation of the participant's behavior, which may be beneficial for inducing virtual embodiment, and that effective results can be obtained without the need for any specialized eye-tracking hardware.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel technique for animating self-avatar eye movements in an immersive virtual environment without the use of eye-tracking hardware, and evaluate our technique via a two-alternative, forced-choice-with-confidence experiment that compares this simulated-eye-tracking condition to a no-eye-tracking condition and a real-eye-tracking condition in which the avatar's eyes were rotated with an eye tracker. Viewing the reflection of a tracked self-avatar is often used in virtual-embodiment scenarios to induce in the participant the illusion that the virtual body of the self-avatar belongs to them, however current tracking methods do not account for the movements of the participants eyes, potentially lessening this body-ownership illusion. The results of our experiment indicate that, although blind to the experimental conditions, participants noticed differences between eye behaviors, and found that the real and simulated conditions represented their behavior better than the no-eye-tracking condition. Additionally, no statistical difference was found when choosing between the real and simulated conditions. These results suggest that adding eye movements to selfavatars produces a subjective increase in self-identification with the avatar due to a more complete representation of the participant's behavior, which may be beneficial for inducing virtual embodiment, and that effective results can be obtained without the need for any specialized eye-tracking hardware.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel technique for animating self-avatar eye movements in an immersive virtual environment without the use of eye-tracking hardware, and evaluate our technique via a two-alternative, forced-choice-with-confidence experiment that compares this simulated-eye-tracking condition to a no-eye-tracking condition and a real-eye-tracking condition in which the avatar's eyes were rotated with an eye tracker. Viewing the reflection of a tracked self-avatar is often used in virtual-embodiment scenarios to induce in the participant the illusion that the virtual body of the self-avatar belongs to them, however current tracking methods do not account for the movements of the participants eyes, potentially lessening this body-ownership illusion. The results of our experiment indicate that, although blind to the experimental conditions, participants noticed differences between eye behaviors, and found that the real and simulated conditions represented their behavior better than the no-eye-tracking condition. Additionally, no statistical difference was found when choosing between the real and simulated conditions. These results suggest that adding eye movements to selfavatars produces a subjective increase in self-identification with the avatar due to a more complete representation of the participant's behavior, which may be beneficial for inducing virtual embodiment, and that effective results can be obtained without the need for any specialized eye-tracking hardware.",
"title": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"normalizedTitle": "An Evaluation of Self-Avatar Eye Movement for Virtual Embodiment",
"fno": "ttg2013040591",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Tracking",
"Mirrors",
"Calibration",
"Visualization",
"Hardware",
"Standards",
"User Studies",
"Virtual Embodiment",
"Eye Tracking",
"Virtual Characters"
],
"authors": [
{
"givenName": "D.",
"surname": "Borland",
"fullName": "D. Borland",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Peck",
"fullName": "T. Peck",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Slater",
"fullName": "M. Slater",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "591-596",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2015/1727/0/07223377",
"title": "Avatar embodiment realism and virtual fitness training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a057",
"title": "Visual Fidelity Effects on Expressive Self-avatar in Virtual Reality: First Impressions Matter",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a057/1CJc41zMnFC",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a772",
"title": "Embodiment of an Avatar with Unnatural Arm Movements",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a772/1J7W9fEjd6g",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar/2022/5325/0/532500a260",
"title": "The Effects of Avatar and Environment Design on Embodiment, Presence, Activation, and Task Load in a Virtual Reality Exercise Application",
"doi": null,
"abstractUrl": "/proceedings-article/ismar/2022/532500a260/1JrRf0Dbcac",
"parentPublication": {
"id": "proceedings/ismar/2022/5325/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998352",
"title": "Using Facial Animation to Increase the Enfacement Illusion and Avatar Self-Identification",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998352/1hpPCCB7Bte",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998133",
"title": "The Security-Utility Trade-off for Iris Authentication and Eye Animation for Social Virtual Avatars",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998133/1hrXcnyAOzu",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089510",
"title": "The Self-Avatar Follower Effect in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089510/1jIxamWhlT2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090598",
"title": "Modified Playback of Avatar Clip Sequences Based on Student Attention in Educational VR",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090598/1jIxljoHOvK",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090683",
"title": "The Effects of Avatar Visibility on Behavioral Response with or without Mirror-Visual Feedback in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090683/1jIxzZ4gw4E",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a127",
"title": "Evidence for a Relationship Between Self-Avatar Fixations and Perceived Avatar Similarity within Low-Cost Virtual Reality Embodiment",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a127/1tnXDDh8sqk",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040583",
"articleId": "13rRUxYrbUF",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040597",
"articleId": "13rRUwbs20V",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbs20V",
"doi": "10.1109/TVCG.2013.29",
"abstract": "It has been shown that it is possible to generate perceptual illusions of ownership in immersive virtual reality (IVR) over a virtual body seen from first person perspective, in other words over a body that visually substitutes the person's real body. This can occur even when the virtual body is quite different in appearance from the person's real body. However, investigation of the psychological, behavioral and attitudinal consequences of such body transformations remains an interesting problem with much to be discovered. Thirty six Caucasian people participated in a between-groups experiment where they played a West-African Djembe hand drum while immersed in IVR and with a virtual body that substituted their own. The virtual hand drum was registered with a physical drum. They were alongside a virtual character that played a drum in a supporting, accompanying role. In a baseline condition participants were represented only by plainly shaded white hands, so that they were able merely to play. In the experimental condition they were represented either by a casually dressed dark-skinned virtual body (Casual Dark-Skinned - CD) or by a formal suited light-skinned body (Formal Light-Skinned - FL). Although participants of both groups experienced a strong body ownership illusion towards the virtual body, only those with the CD representation showed significant increases in their movement patterns for drumming compared to the baseline condition and compared with those embodied in the FL body. Moreover, the stronger the illusion of body ownership in the CD condition, the greater this behavioral change. A path analysis showed that the observed behavioral changes were a function of the strength of the illusion of body ownership towards the virtual body and its perceived appropriateness for the drumming task. These results demonstrate that full body ownership illusions can lead to substantial behavioral and possibly cognitive changes depending on the appearance of the virtual body. This could be important for many applications such as learning, education, training, psychotherapy and rehabilitation using IVR.",
"abstracts": [
{
"abstractType": "Regular",
"content": "It has been shown that it is possible to generate perceptual illusions of ownership in immersive virtual reality (IVR) over a virtual body seen from first person perspective, in other words over a body that visually substitutes the person's real body. This can occur even when the virtual body is quite different in appearance from the person's real body. However, investigation of the psychological, behavioral and attitudinal consequences of such body transformations remains an interesting problem with much to be discovered. Thirty six Caucasian people participated in a between-groups experiment where they played a West-African Djembe hand drum while immersed in IVR and with a virtual body that substituted their own. The virtual hand drum was registered with a physical drum. They were alongside a virtual character that played a drum in a supporting, accompanying role. In a baseline condition participants were represented only by plainly shaded white hands, so that they were able merely to play. In the experimental condition they were represented either by a casually dressed dark-skinned virtual body (Casual Dark-Skinned - CD) or by a formal suited light-skinned body (Formal Light-Skinned - FL). Although participants of both groups experienced a strong body ownership illusion towards the virtual body, only those with the CD representation showed significant increases in their movement patterns for drumming compared to the baseline condition and compared with those embodied in the FL body. Moreover, the stronger the illusion of body ownership in the CD condition, the greater this behavioral change. A path analysis showed that the observed behavioral changes were a function of the strength of the illusion of body ownership towards the virtual body and its perceived appropriateness for the drumming task. These results demonstrate that full body ownership illusions can lead to substantial behavioral and possibly cognitive changes depending on the appearance of the virtual body. This could be important for many applications such as learning, education, training, psychotherapy and rehabilitation using IVR.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "It has been shown that it is possible to generate perceptual illusions of ownership in immersive virtual reality (IVR) over a virtual body seen from first person perspective, in other words over a body that visually substitutes the person's real body. This can occur even when the virtual body is quite different in appearance from the person's real body. However, investigation of the psychological, behavioral and attitudinal consequences of such body transformations remains an interesting problem with much to be discovered. Thirty six Caucasian people participated in a between-groups experiment where they played a West-African Djembe hand drum while immersed in IVR and with a virtual body that substituted their own. The virtual hand drum was registered with a physical drum. They were alongside a virtual character that played a drum in a supporting, accompanying role. In a baseline condition participants were represented only by plainly shaded white hands, so that they were able merely to play. In the experimental condition they were represented either by a casually dressed dark-skinned virtual body (Casual Dark-Skinned - CD) or by a formal suited light-skinned body (Formal Light-Skinned - FL). Although participants of both groups experienced a strong body ownership illusion towards the virtual body, only those with the CD representation showed significant increases in their movement patterns for drumming compared to the baseline condition and compared with those embodied in the FL body. Moreover, the stronger the illusion of body ownership in the CD condition, the greater this behavioral change. A path analysis showed that the observed behavioral changes were a function of the strength of the illusion of body ownership towards the virtual body and its perceived appropriateness for the drumming task. These results demonstrate that full body ownership illusions can lead to substantial behavioral and possibly cognitive changes depending on the appearance of the virtual body. This could be important for many applications such as learning, education, training, psychotherapy and rehabilitation using IVR.",
"title": "Drumming in Immersive Virtual Reality: The Body Shapes the Way We Play",
"normalizedTitle": "Drumming in Immersive Virtual Reality: The Body Shapes the Way We Play",
"fno": "ttg2013040597",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Avatars",
"Rubber",
"Mirrors",
"Correlation",
"Visualization",
"Instruments",
"Entertainment",
"Perception",
"Presence",
"User Studies",
"Experimental Methods",
"Multimodal Interaction",
"Training"
],
"authors": [
{
"givenName": "K.",
"surname": "Kilteni",
"fullName": "K. Kilteni",
"affiliation": "Event Lab., Univ. de Barcelona, Barcelona, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "I.",
"surname": "Bergstrom",
"fullName": "I. Bergstrom",
"affiliation": "Event Lab., Univ. de Barcelona, Barcelona, Spain",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Slater",
"fullName": "M. Slater",
"affiliation": "Event Lab., Univ. de Barcelona, Barcelona, Spain",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "597-605",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iscc/2017/1629/0/08024496",
"title": "Affective impact of social presence in immersive 3D virtual worlds",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2017/08024496/12OmNBt3qqj",
"parentPublication": {
"id": "proceedings/iscc/2017/1629/0",
"title": "2017 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223377",
"title": "Avatar embodiment realism and virtual fitness training",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223377/12OmNCcKQFn",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cts/2016/2300/0/07871048",
"title": "Body Ownership in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/cts/2016/07871048/12OmNCm7BF7",
"parentPublication": {
"id": "proceedings/cts/2016/2300/0",
"title": "2016 International Conference on Collaboration Technologies and Systems (CTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444805",
"title": "The contribution of real-time mirror reflections of motor actions on virtual body ownership in an immersive virtual environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444805/12OmNwoPtlH",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2013/4795/0/06549442",
"title": "Drumming in immersive virtual reality: The body shapes the way we play",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549442/12OmNz5apEB",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08263407",
"title": "The Impact of Avatar Personalization and Immersion on Virtual Body Ownership, Presence, and Emotional Response",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08263407/13rRUILtJqW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2014/07/mco2014070024",
"title": "Transcending the Self in Immersive Virtual Reality",
"doi": null,
"abstractUrl": "/magazine/co/2014/07/mco2014070024/13rRUwcAqvw",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798222",
"title": "Shared Body by Action Integration of Two Persons: Body Ownership, Sense of Agency and Task Performance",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798222/1cJ0T8PM6qI",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798055",
"title": "Embodying an Extra Virtual Body in Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798055/1cJ0Y0o1pO8",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797787",
"title": "The Effect of Hand Size and Interaction Modality on the Virtual Hand Illusion",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797787/1cJ179JUrPa",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040591",
"articleId": "13rRUyYBlgz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040606",
"articleId": "13rRUILLkvq",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFp1",
"name": "ttg2013040597s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040597s1.zip",
"extension": "zip",
"size": "19.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILLkvq",
"doi": "10.1109/TVCG.2013.40",
"abstract": "We propose a new olfactory display system that can generate an odor distribution on a two-dimensional display screen. The proposed system has four fans on the four corners of the screen. The airflows that are generated by these fans collide multiple times to create an airflow that is directed towards the user from a certain position on the screen. By introducing odor vapor into the airflows, the odor distribution is as if an odor source had been placed onto the screen. The generated odor distribution leads the user to perceive the odor as emanating from a specific region of the screen. The position of this virtual odor source can be shifted to an arbitrary position on the screen by adjusting the balance of the airflows from the four fans. Most users do not immediately notice the odor presentation mechanism of the proposed olfactory display system because the airflow and perceived odor come from the display screen rather than the fans. The airflow velocity can even be set below the threshold for airflow sensation, such that the odor alone is perceived by the user. We present experimental results that show the airflow field and odor distribution that are generated by the proposed system. We also report sensory test results to show how the generated odor distribution is perceived by the user and the issues that must be considered in odor presentation.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a new olfactory display system that can generate an odor distribution on a two-dimensional display screen. The proposed system has four fans on the four corners of the screen. The airflows that are generated by these fans collide multiple times to create an airflow that is directed towards the user from a certain position on the screen. By introducing odor vapor into the airflows, the odor distribution is as if an odor source had been placed onto the screen. The generated odor distribution leads the user to perceive the odor as emanating from a specific region of the screen. The position of this virtual odor source can be shifted to an arbitrary position on the screen by adjusting the balance of the airflows from the four fans. Most users do not immediately notice the odor presentation mechanism of the proposed olfactory display system because the airflow and perceived odor come from the display screen rather than the fans. The airflow velocity can even be set below the threshold for airflow sensation, such that the odor alone is perceived by the user. We present experimental results that show the airflow field and odor distribution that are generated by the proposed system. We also report sensory test results to show how the generated odor distribution is perceived by the user and the issues that must be considered in odor presentation.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a new olfactory display system that can generate an odor distribution on a two-dimensional display screen. The proposed system has four fans on the four corners of the screen. The airflows that are generated by these fans collide multiple times to create an airflow that is directed towards the user from a certain position on the screen. By introducing odor vapor into the airflows, the odor distribution is as if an odor source had been placed onto the screen. The generated odor distribution leads the user to perceive the odor as emanating from a specific region of the screen. The position of this virtual odor source can be shifted to an arbitrary position on the screen by adjusting the balance of the airflows from the four fans. Most users do not immediately notice the odor presentation mechanism of the proposed olfactory display system because the airflow and perceived odor come from the display screen rather than the fans. The airflow velocity can even be set below the threshold for airflow sensation, such that the odor alone is perceived by the user. We present experimental results that show the airflow field and odor distribution that are generated by the proposed system. We also report sensory test results to show how the generated odor distribution is perceived by the user and the issues that must be considered in odor presentation.",
"title": "Smelling Screen: Development and Evaluation of an Olfactory Display System for Presenting a Virtual Odor Source",
"normalizedTitle": "Smelling Screen: Development and Evaluation of an Olfactory Display System for Presenting a Virtual Odor Source",
"fno": "ttg2013040606",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Olfactory",
"Fans",
"Face",
"Position Measurement",
"Gas Detectors",
"Educational Institutions",
"Wind Dislpay",
"Virtual Reality",
"Multimedia",
"Olfactory Display"
],
"authors": [
{
"givenName": "H.",
"surname": "Matsukura",
"fullName": "H. Matsukura",
"affiliation": "Tokyo Univ. of Agric. & Technol., Koganei, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Yoneda",
"fullName": "T. Yoneda",
"affiliation": "Tokyo Univ. of Agric. & Technol., Koganei, Japan",
"__typename": "ArticleAuthorType"
},
{
"givenName": "H.",
"surname": "Ishida",
"fullName": "H. Ishida",
"affiliation": "Tokyo Univ. of Agric. & Technol., Koganei, Japan",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "606-615",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2013/4795/0/06549415",
"title": "Smelling screen: Presenting a virtual odor source on a LCD screen",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2013/06549415/12OmNB0X8pB",
"parentPublication": {
"id": "proceedings/vr/2013/4795/0",
"title": "2013 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811062",
"title": "Odor Presentation with a Vivid Sense of Reality: Incorporating Fluid Dynamics Simulation into Olfactory Display",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811062/12OmNs0C9X2",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2010/6237/0/05444763",
"title": "On the effect of airflow on odor presentation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2010/05444763/12OmNviHKkx",
"parentPublication": {
"id": "proceedings/vr/2010/6237/0",
"title": "2010 IEEE Virtual Reality Conference (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2011/0039/0/05759448",
"title": "Multi-sensorial field display: Presenting spatial distribution of airflow and odor",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2011/05759448/12OmNx3ZjcK",
"parentPublication": {
"id": "proceedings/vr/2011/0039/0",
"title": "2011 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811016",
"title": "Selection Method of Odor Components for Olfactory Display Using Mass Spectrum Database",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811016/12OmNxzuMBP",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2009/3943/0/04811042",
"title": "Interactive Odor Playback Based on Fluid Dynamics Simulation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2009/04811042/12OmNy7yEfO",
"parentPublication": {
"id": "proceedings/vr/2009/3943/0",
"title": "2009 IEEE Virtual Reality Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2012/1247/0/06180915",
"title": "Smelling screen: Technique to present a virtual odor source at an arbitrary position on a screen",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2012/06180915/12OmNzgeLC5",
"parentPublication": {
"id": "proceedings/vr/2012/1247/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08445841",
"title": "Demonstration of Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08445841/13bd1eTtWYE",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446484",
"title": "Olfactory Display Based on Sniffing Action",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446484/13bd1fdV4kM",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/smartiot/2021/4511/0/451100a329",
"title": "The Odor Characterizations and Interactive Olfactory Display: A Survey",
"doi": null,
"abstractUrl": "/proceedings-article/smartiot/2021/451100a329/1xDQb2bELm0",
"parentPublication": {
"id": "proceedings/smartiot/2021/4511/0",
"title": "2021 IEEE International Conference on Smart Internet of Things (SmartIoT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040597",
"articleId": "13rRUwbs20V",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040616",
"articleId": "13rRUzp02ok",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUzp02ok",
"doi": "10.1109/TVCG.2013.33",
"abstract": "We present a novel immersive telepresence system that allows distributed groups of users to meet in a shared virtual 3D world. Our approach is based on two coupled projection-based multi-user setups, each providing multiple users with perspectively correct stereoscopic images. At each site the users and their local interaction space are continuously captured using a cluster of registered depth and color cameras. The captured 3D information is transferred to the respective other location, where the remote participants are virtually reconstructed. We explore the use of these virtual user representations in various interaction scenarios in which local and remote users are face-to-face, side-by-side or decoupled. Initial experiments with distributed user groups indicate the mutual understanding of pointing and tracing gestures independent of whether they were performed by local or remote participants. Our users were excited about the new possibilities of jointly exploring a virtual city, where they relied on a world-in-miniature metaphor for mutual awareness of their respective locations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a novel immersive telepresence system that allows distributed groups of users to meet in a shared virtual 3D world. Our approach is based on two coupled projection-based multi-user setups, each providing multiple users with perspectively correct stereoscopic images. At each site the users and their local interaction space are continuously captured using a cluster of registered depth and color cameras. The captured 3D information is transferred to the respective other location, where the remote participants are virtually reconstructed. We explore the use of these virtual user representations in various interaction scenarios in which local and remote users are face-to-face, side-by-side or decoupled. Initial experiments with distributed user groups indicate the mutual understanding of pointing and tracing gestures independent of whether they were performed by local or remote participants. Our users were excited about the new possibilities of jointly exploring a virtual city, where they relied on a world-in-miniature metaphor for mutual awareness of their respective locations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a novel immersive telepresence system that allows distributed groups of users to meet in a shared virtual 3D world. Our approach is based on two coupled projection-based multi-user setups, each providing multiple users with perspectively correct stereoscopic images. At each site the users and their local interaction space are continuously captured using a cluster of registered depth and color cameras. The captured 3D information is transferred to the respective other location, where the remote participants are virtually reconstructed. We explore the use of these virtual user representations in various interaction scenarios in which local and remote users are face-to-face, side-by-side or decoupled. Initial experiments with distributed user groups indicate the mutual understanding of pointing and tracing gestures independent of whether they were performed by local or remote participants. Our users were excited about the new possibilities of jointly exploring a virtual city, where they relied on a world-in-miniature metaphor for mutual awareness of their respective locations.",
"title": "Immersive Group-to-Group Telepresence",
"normalizedTitle": "Immersive Group-to-Group Telepresence",
"fno": "ttg2013040616",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Calibration",
"Cameras",
"Servers",
"Streaming Media",
"Image Reconstruction",
"Image Color Analysis",
"Virtual Reality",
"3 D Capture",
"Multi User Virtual Reality",
"Telepresence"
],
"authors": [
{
"givenName": "S.",
"surname": "Beck",
"fullName": "S. Beck",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Kunert",
"fullName": "A. Kunert",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Kulik",
"fullName": "A. Kulik",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.",
"surname": "Froehlich",
"fullName": "B. Froehlich",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "616-625",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ismar-adjunct/2017/6327/0/6327a218",
"title": "[POSTER] CoVAR: Mixed-Platform Remote Collaborative Augmented and Virtual Realities System with Shared Collaboration Cues",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2017/6327a218/12OmNzV70Kh",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2017/6327/0",
"title": "2017 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigmm/2017/6549/0/07966773",
"title": "RGB-D Camera Network Calibration and Streaming for 3D Telepresence in Large Environment",
"doi": null,
"abstractUrl": "/proceedings-article/bigmm/2017/07966773/12OmNzcPADw",
"parentPublication": {
"id": "proceedings/bigmm/2017/6549/0",
"title": "2017 IEEE Third International Conference on Multimedia Big Data (BigMM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ism/2014/4311/0/4311a175",
"title": "An Immersive Telepresence System Using a Real-Time Omnidirectional Camera and a Virtual Reality Head-Mounted Display",
"doi": null,
"abstractUrl": "/proceedings-article/ism/2014/4311a175/12OmNzn38Pl",
"parentPublication": {
"id": "proceedings/ism/2014/4311/0",
"title": "2014 IEEE International Symposium on Multimedia (ISM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/03/07792698",
"title": "JackIn Head: Immersive Visual Telepresence System with Omnidirectional Wearable Camera",
"doi": null,
"abstractUrl": "/journal/tg/2017/03/07792698/13rRUx0geq0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2014/07/mco2014070046",
"title": "Immersive 3D Telepresence",
"doi": null,
"abstractUrl": "/magazine/co/2014/07/mco2014070046/13rRUy0ZzW3",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2022/5365/0/536500a524",
"title": "Synthesizing Novel Spaces for Remote Telepresence Experiences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2022/536500a524/1J7WaFB7xNC",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2022/5365/0",
"title": "2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wevr/2019/4050/0/08809591",
"title": "Immersive Gastronomic Experience with Distributed Reality",
"doi": null,
"abstractUrl": "/proceedings-article/wevr/2019/08809591/1cI62dVXsB2",
"parentPublication": {
"id": "proceedings/wevr/2019/4050/0",
"title": "2019 IEEE 5th Workshop on Everyday Virtual Reality (WEVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090433",
"title": "Virtual Tour: An Immersive Low Cost Telepresence System",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090433/1jIxrSY8cZa",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/07/09257094",
"title": "Output-Sensitive Avatar Representations for Immersive Telepresence",
"doi": null,
"abstractUrl": "/journal/tg/2022/07/09257094/1oFCABrJUmA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2021/1298/0/129800a451",
"title": "The Owl: Immersive Telepresence Communication for Hybrid Conferences",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2021/129800a451/1yeQG4fi6Dm",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2021/1298/0",
"title": "2021 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040606",
"articleId": "13rRUILLkvq",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040626",
"articleId": "13rRUwd9CG2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFvO",
"name": "ttg2013040616s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040616s1.mp4",
"extension": "mp4",
"size": "26.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwd9CG2",
"doi": "10.1109/TVCG.2013.23",
"abstract": "Passive haptics, also known as tactile augmentation, denotes the use of a physical counterpart to a virtual environment to provide tactile feedback. Employing passive haptics can result in more realistic touch sensations than those from active force feedback, especially for rigid contacts. However, changes in the virtual environment would necessitate modifications of the physical counterparts. In recent work space warping has been proposed as one solution to overcome this limitation. In this technique virtual space is distorted such that a variety of virtual models can be mapped onto one single physical object. In this paper, we propose as an extension adaptive space warping; we show how this technique can be employed in a mixed-reality surgical training simulator in order to map different virtual patients onto one physical anatomical model. We developed methods to warp different organ geometries onto one physical mock-up, to handle different mechanical behaviors of the virtual patients, and to allow interactive modifications of the virtual structures, while the physical counterparts remain unchanged. Various practical examples underline the wide applicability of our approach. To the best of our knowledge this is the first practical usage of such a technique in the specific context of interactive medical training.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Passive haptics, also known as tactile augmentation, denotes the use of a physical counterpart to a virtual environment to provide tactile feedback. Employing passive haptics can result in more realistic touch sensations than those from active force feedback, especially for rigid contacts. However, changes in the virtual environment would necessitate modifications of the physical counterparts. In recent work space warping has been proposed as one solution to overcome this limitation. In this technique virtual space is distorted such that a variety of virtual models can be mapped onto one single physical object. In this paper, we propose as an extension adaptive space warping; we show how this technique can be employed in a mixed-reality surgical training simulator in order to map different virtual patients onto one physical anatomical model. We developed methods to warp different organ geometries onto one physical mock-up, to handle different mechanical behaviors of the virtual patients, and to allow interactive modifications of the virtual structures, while the physical counterparts remain unchanged. Various practical examples underline the wide applicability of our approach. To the best of our knowledge this is the first practical usage of such a technique in the specific context of interactive medical training.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Passive haptics, also known as tactile augmentation, denotes the use of a physical counterpart to a virtual environment to provide tactile feedback. Employing passive haptics can result in more realistic touch sensations than those from active force feedback, especially for rigid contacts. However, changes in the virtual environment would necessitate modifications of the physical counterparts. In recent work space warping has been proposed as one solution to overcome this limitation. In this technique virtual space is distorted such that a variety of virtual models can be mapped onto one single physical object. In this paper, we propose as an extension adaptive space warping; we show how this technique can be employed in a mixed-reality surgical training simulator in order to map different virtual patients onto one physical anatomical model. We developed methods to warp different organ geometries onto one physical mock-up, to handle different mechanical behaviors of the virtual patients, and to allow interactive modifications of the virtual structures, while the physical counterparts remain unchanged. Various practical examples underline the wide applicability of our approach. To the best of our knowledge this is the first practical usage of such a technique in the specific context of interactive medical training.",
"title": "Adaptive Space Warping to Enhance Passive Haptics in an Arthroscopy Surgical Simulator",
"normalizedTitle": "Adaptive Space Warping to Enhance Passive Haptics in an Arthroscopy Surgical Simulator",
"fno": "ttg2013040626",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Haptic Interfaces",
"Avatars",
"Bones",
"Geometry",
"Joints",
"Surgery",
"Shape",
"Surgical Training Simulation",
"Virtual Reality",
"Passive Haptics"
],
"authors": [
{
"givenName": "Jonas",
"surname": "Spillmann",
"fullName": "Jonas Spillmann",
"affiliation": "Comput. Vision Lab., ETH Zurich, Zurich, Switzerland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.",
"surname": "Tuchschmid",
"fullName": "S. Tuchschmid",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Harders",
"fullName": "M. Harders",
"affiliation": "Comput. Vision Lab., ETH Zurich, Zurich, Switzerland",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "626-633",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2010/6846/0/05444703",
"title": "Redirected touching: Warping space to remap passive haptics",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2010/05444703/12OmNvH7fjb",
"parentPublication": {
"id": "proceedings/3dui/2010/6846/0",
"title": "2010 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cgi/2000/0643/0/06430295",
"title": "Haptics Issues in Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/cgi/2000/06430295/12OmNyQYt7r",
"parentPublication": {
"id": "proceedings/cgi/2000/0643/0",
"title": "Computer Graphics International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/03/07452616",
"title": "Haptics for Product Design and Manufacturing Simulation",
"doi": null,
"abstractUrl": "/journal/th/2016/03/07452616/13rRUNvyats",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2011/01/tth2011010051",
"title": "The Role of Haptics in Medical Training Simulators: A Survey of the State of the Art",
"doi": null,
"abstractUrl": "/journal/th/2011/01/tth2011010051/13rRUwI5TR8",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08260974",
"title": "Evaluating Remapped Physical Reach for Hand Interactions with Passive Haptics in Virtual Reality",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08260974/13rRUwkxc5s",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ic/2012/05/mic2012050083",
"title": "Integrating Haptics in Web Interfaces: State of the Art and Open Issues",
"doi": null,
"abstractUrl": "/magazine/ic/2012/05/mic2012050083/13rRUxASudL",
"parentPublication": {
"id": "mags/ic",
"title": "IEEE Internet Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797785",
"title": "Occurrence of Pseudo-Haptics by Swimming in a Virtual Reality Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797785/1cJ0HqCLp96",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090415",
"title": "Enhancing Proxy-Based Haptics in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090415/1jIxtWMak6c",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/svr/2020/9231/0/923100a134",
"title": "Proxy Haptics for Surgical Training",
"doi": null,
"abstractUrl": "/proceedings-article/svr/2020/923100a134/1oZBAEAmMBW",
"parentPublication": {
"id": "proceedings/svr/2020/9231/0",
"title": "2020 22nd Symposium on Virtual and Augmented Reality (SVR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040616",
"articleId": "13rRUzp02ok",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040634",
"articleId": "13rRUx0Pqpx",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0Pqpx",
"doi": "10.1109/TVCG.2013.28",
"abstract": "Redirected walking algorithms imperceptibly rotate a virtual scene and scale movements to guide users of immersive virtual environment systems away from tracking area boundaries. These distortions ideally permit users to explore large and potentially unbounded virtual worlds while walking naturally through a physically limited space. Estimates of the physical space required to perform effective redirected walking have been based largely on the ability of humans to perceive the distortions introduced by redirected walking and have not examined the impact the overall steering strategy used. This work compares four generalized redirected walking algorithms, including Steer-to-Center, Steer-to-Orbit, Steer-to-Multiple-Targets and Steer-to-Multiple+Center. Two experiments are presented based on simulated navigation as well as live-user navigation carried out in a large immersive virtual environment facility. Simulations were conducted with both synthetic paths and previously-logged user data. Primary comparison metrics include mean and maximum distances from the tracking area center for each algorithm, number of wall contacts, and mean rates of redirection. Results indicated that Steer-to-Center out-performed all other algorithms relative to these metrics. Steer-to-Orbit also performed well in some circumstances.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Redirected walking algorithms imperceptibly rotate a virtual scene and scale movements to guide users of immersive virtual environment systems away from tracking area boundaries. These distortions ideally permit users to explore large and potentially unbounded virtual worlds while walking naturally through a physically limited space. Estimates of the physical space required to perform effective redirected walking have been based largely on the ability of humans to perceive the distortions introduced by redirected walking and have not examined the impact the overall steering strategy used. This work compares four generalized redirected walking algorithms, including Steer-to-Center, Steer-to-Orbit, Steer-to-Multiple-Targets and Steer-to-Multiple+Center. Two experiments are presented based on simulated navigation as well as live-user navigation carried out in a large immersive virtual environment facility. Simulations were conducted with both synthetic paths and previously-logged user data. Primary comparison metrics include mean and maximum distances from the tracking area center for each algorithm, number of wall contacts, and mean rates of redirection. Results indicated that Steer-to-Center out-performed all other algorithms relative to these metrics. Steer-to-Orbit also performed well in some circumstances.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Redirected walking algorithms imperceptibly rotate a virtual scene and scale movements to guide users of immersive virtual environment systems away from tracking area boundaries. These distortions ideally permit users to explore large and potentially unbounded virtual worlds while walking naturally through a physically limited space. Estimates of the physical space required to perform effective redirected walking have been based largely on the ability of humans to perceive the distortions introduced by redirected walking and have not examined the impact the overall steering strategy used. This work compares four generalized redirected walking algorithms, including Steer-to-Center, Steer-to-Orbit, Steer-to-Multiple-Targets and Steer-to-Multiple+Center. Two experiments are presented based on simulated navigation as well as live-user navigation carried out in a large immersive virtual environment facility. Simulations were conducted with both synthetic paths and previously-logged user data. Primary comparison metrics include mean and maximum distances from the tracking area center for each algorithm, number of wall contacts, and mean rates of redirection. Results indicated that Steer-to-Center out-performed all other algorithms relative to these metrics. Steer-to-Orbit also performed well in some circumstances.",
"title": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data",
"normalizedTitle": "Comparing Four Approaches to Generalized Redirected Walking: Simulation and Live User Data",
"fno": "ttg2013040634",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Legged Locomotion",
"Orbits",
"Navigation",
"Algorithm Design And Analysis",
"Space Vehicles",
"Visualization",
"Tracking",
"Simulation",
"Redirected Walking",
"Virtual Environments",
"Navigation",
"Human Computer Interaction",
"Live Users"
],
"authors": [
{
"givenName": "E.",
"surname": "Hodgson",
"fullName": "E. Hodgson",
"affiliation": "Smale Interactive Visualization Center, Miami Univ., Miami, OH, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "E.",
"surname": "Bachmann",
"fullName": "E. Bachmann",
"affiliation": "Comput. Sci. & Software Eng., Miami Univ., Miami, OH, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "634-643",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446579",
"title": "Leveraging Configuration Spaces and Navigation Functions for Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446579/13bd1fdV4lq",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/11/ttg2013111872",
"title": "Optimizing Constrained-Environment Redirected Walking Instructions Using Search Techniques",
"doi": null,
"abstractUrl": "/journal/tg/2013/11/ttg2013111872/13rRUIM2VBH",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404579",
"title": "Performance of Redirected Walking Algorithms in a Constrained Virtual World",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404579/13rRUwjoNx4",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/05/08645818",
"title": "Multi-User Redirected Walking and Resetting Using Artificial Potential Fields",
"doi": null,
"abstractUrl": "/journal/tg/2019/05/08645818/17PYEiVyc2v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09961901",
"title": "Transferable Virtual-Physical Environmental Alignment with Redirected Walking",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09961901/1IxvZ4KZbri",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798121",
"title": "Real-time Optimal Planning for Redirected Walking Using Deep Q-Learning",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798121/1cJ17Y60ruM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797818",
"title": "Effects of Tracking Area Shape and Size on Artificial Potential Field Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797818/1cJ1htJ7ArK",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/05/08998570",
"title": "A Steering Algorithm for Redirected Walking Using Reinforcement Learning",
"doi": null,
"abstractUrl": "/journal/tg/2020/05/08998570/1hx2DxYanDy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/11/09523832",
"title": "Redirected Walking in Static and Dynamic Scenes Using Visibility Polygons",
"doi": null,
"abstractUrl": "/journal/tg/2021/11/09523832/1wpqjiNuSqY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040626",
"articleId": "13rRUwd9CG2",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040644",
"articleId": "13rRUxC0SEg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxC0SEg",
"doi": "10.1109/TVCG.2013.31",
"abstract": "Pie menus are a well-known technique for interacting with 2D environments and so far a large body of research documents their usage and optimizations. Yet, comparatively little research has been done on the usability of pie menus in immersive virtual environments (IVEs). In this paper we reduce this gap by presenting an implementation and evaluation of an extended hierarchical pie menu system for IVEs that can be operated with a six-degrees-of-freedom input device. Following an iterative development process, we first developed and evaluated a basic hierarchical pie menu system. To better understand how pie menus should be operated in IVEs, we tested this system in a pilot user study with 24 participants and focus on item selection. Regarding the results of the study, the system was tweaked and elements like check boxes, sliders, and color map editors were added to provide extended functionality. An expert review with five experts was performed with the extended pie menus being integrated into an existing VR application to identify potential design issues. Overall results indicated high performance and efficient design.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Pie menus are a well-known technique for interacting with 2D environments and so far a large body of research documents their usage and optimizations. Yet, comparatively little research has been done on the usability of pie menus in immersive virtual environments (IVEs). In this paper we reduce this gap by presenting an implementation and evaluation of an extended hierarchical pie menu system for IVEs that can be operated with a six-degrees-of-freedom input device. Following an iterative development process, we first developed and evaluated a basic hierarchical pie menu system. To better understand how pie menus should be operated in IVEs, we tested this system in a pilot user study with 24 participants and focus on item selection. Regarding the results of the study, the system was tweaked and elements like check boxes, sliders, and color map editors were added to provide extended functionality. An expert review with five experts was performed with the extended pie menus being integrated into an existing VR application to identify potential design issues. Overall results indicated high performance and efficient design.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Pie menus are a well-known technique for interacting with 2D environments and so far a large body of research documents their usage and optimizations. Yet, comparatively little research has been done on the usability of pie menus in immersive virtual environments (IVEs). In this paper we reduce this gap by presenting an implementation and evaluation of an extended hierarchical pie menu system for IVEs that can be operated with a six-degrees-of-freedom input device. Following an iterative development process, we first developed and evaluated a basic hierarchical pie menu system. To better understand how pie menus should be operated in IVEs, we tested this system in a pilot user study with 24 participants and focus on item selection. Regarding the results of the study, the system was tweaked and elements like check boxes, sliders, and color map editors were added to provide extended functionality. An expert review with five experts was performed with the extended pie menus being integrated into an existing VR application to identify potential design issues. Overall results indicated high performance and efficient design.",
"title": "Extended Pie Menus for Immersive Virtual Environments",
"normalizedTitle": "Extended Pie Menus for Immersive Virtual Environments",
"fno": "ttg2013040644",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Layout",
"Usability",
"Error Analysis",
"Context",
"Performance Evaluation",
"Atmospheric Measurements",
"Particle Measurements",
"User Study",
"Pie Menus",
"Interaction",
"User Interfaces"
],
"authors": [
{
"givenName": "S.",
"surname": "Gebhardt",
"fullName": "S. Gebhardt",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "S.",
"surname": "Pick",
"fullName": "S. Pick",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "F.",
"surname": "Leithold",
"fullName": "F. Leithold",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.",
"surname": "Hentschel",
"fullName": "B. Hentschel",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "T.",
"surname": "Kuhlen",
"fullName": "T. Kuhlen",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "644-651",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cw/2009/3791/0/3791a292",
"title": "Comparison of Adaptive, Adaptable and Mixed-Initiative Menus",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2009/3791a292/12OmNBBzoem",
"parentPublication": {
"id": "proceedings/cw/2009/3791/0",
"title": "2009 International Conference on CyberWorlds",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2014/3624/0/06798837",
"title": "An evaluation of a smart-phone-based menu system for immersive virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2014/06798837/12OmNBp52Dj",
"parentPublication": {
"id": "proceedings/3dui/2014/3624/0",
"title": "2014 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/haptics/2004/2112/0/21120224",
"title": "A Study of Haptic Linear and Pie Menus in a 3D Fish Tank VR Environment",
"doi": null,
"abstractUrl": "/proceedings-article/haptics/2004/21120224/12OmNqyUUH0",
"parentPublication": {
"id": "proceedings/haptics/2004/2112/0",
"title": "Haptic Interfaces for Virtual Environment and Teleoperator Systems, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscc/2017/1629/0/08024653",
"title": "PIE: A personalized incentive for location-aware mobile crowd sensing",
"doi": null,
"abstractUrl": "/proceedings-article/iscc/2017/08024653/12OmNwkR5vU",
"parentPublication": {
"id": "proceedings/iscc/2017/1629/0",
"title": "2017 IEEE Symposium on Computers and Communications (ISCC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2015/6886/0/07131737",
"title": "Handymenu: Integrating menu selection into a multifunction smartphone-based VR controller",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2015/07131737/12OmNx19jZG",
"parentPublication": {
"id": "proceedings/3dui/2015/6886/0",
"title": "2015 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2009/3726/1/3726a089",
"title": "A Study of Applying Extended PIE Technique to Software Testability Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2009/3726a089/12OmNxIzWOh",
"parentPublication": {
"id": "proceedings/compsac/2009/3726/1",
"title": "2009 33rd Annual IEEE International Computer Software and Applications Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2001/0948/0/09480149",
"title": "Design and Evaluation of Menu Systems for Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2001/09480149/12OmNy3Agvx",
"parentPublication": {
"id": "proceedings/vr/2001/0948/0",
"title": "Virtual Reality Conference, IEEE",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tabletop/2007/3013/0/30130121",
"title": "Improving Menu Interaction for Cluttered Tabletop Setups with User-Drawn Path Menus",
"doi": null,
"abstractUrl": "/proceedings-article/tabletop/2007/30130121/12OmNyVes0N",
"parentPublication": {
"id": "proceedings/tabletop/2007/3013/0",
"title": "Horizontal Interactive Human-Computer Systems, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icicic/2008/3161/0/31610364",
"title": "Enhancing Pie-Menu Selection with Pen Pressure",
"doi": null,
"abstractUrl": "/proceedings-article/icicic/2008/31610364/12OmNzn38SO",
"parentPublication": {
"id": "proceedings/icicic/2008/3161/0",
"title": "Innovative Computing ,Information and Control, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797900",
"title": "Menus on the Desk? System Control in DeskVR",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797900/1cJ18TJZQf6",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040634",
"articleId": "13rRUx0Pqpx",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040652",
"articleId": "13rRUygT7mX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7mX",
"doi": "10.1109/TVCG.2013.38",
"abstract": "In this paper we introduce novel 'Camera Motions' (CMs) to improve the sensations related to locomotion in virtual environments (VE). Traditional Camera Motions are artificial oscillating motions applied to the subjective viewpoint when walking in the VE, and they are meant to evoke and reproduce the visual flow generated during a human walk. Our novel camera motions are: (1) multistate, (2) personified, and (3) they can take into account the topography of the virtual terrain. Being multistate, our CMs can account for different states of locomotion in VE namely: walking, but also running and sprinting. Being personified, our CMs can be adapted to avatars physiology such as to its size, weight or training status. They can then take into account avatars fatigue and recuperation for updating visual CMs accordingly. Last, our approach is adapted to the topography of the VE. Running over a strong positive slope would rapidly decrease the advance speed of the avatar, increase its energy loss, and eventually change the locomotion mode, influencing the visual feedback of the camera motions. Our new approach relies on a locomotion simulator partially inspired by human physiology and implemented for a real-time use in Desktop VR. We have conducted a series of experiments to evaluate the perception of our new CMs by naive participants. Results notably show that participants could discriminate and perceive transitions between the different locomotion modes, by relying exclusively on our CMs. They could also perceive some properties of the avatar being used and, overall, very well appreciated the new CMs techniques. Taken together, our results suggest that our new CMs could be introduced in Desktop VR applications involving first-person navigation, in order to enhance sensations of walking, running, and sprinting, with potentially different avatars and over uneven terrains, such as for: training, virtual visits or video games.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this paper we introduce novel 'Camera Motions' (CMs) to improve the sensations related to locomotion in virtual environments (VE). Traditional Camera Motions are artificial oscillating motions applied to the subjective viewpoint when walking in the VE, and they are meant to evoke and reproduce the visual flow generated during a human walk. Our novel camera motions are: (1) multistate, (2) personified, and (3) they can take into account the topography of the virtual terrain. Being multistate, our CMs can account for different states of locomotion in VE namely: walking, but also running and sprinting. Being personified, our CMs can be adapted to avatars physiology such as to its size, weight or training status. They can then take into account avatars fatigue and recuperation for updating visual CMs accordingly. Last, our approach is adapted to the topography of the VE. Running over a strong positive slope would rapidly decrease the advance speed of the avatar, increase its energy loss, and eventually change the locomotion mode, influencing the visual feedback of the camera motions. Our new approach relies on a locomotion simulator partially inspired by human physiology and implemented for a real-time use in Desktop VR. We have conducted a series of experiments to evaluate the perception of our new CMs by naive participants. Results notably show that participants could discriminate and perceive transitions between the different locomotion modes, by relying exclusively on our CMs. They could also perceive some properties of the avatar being used and, overall, very well appreciated the new CMs techniques. Taken together, our results suggest that our new CMs could be introduced in Desktop VR applications involving first-person navigation, in order to enhance sensations of walking, running, and sprinting, with potentially different avatars and over uneven terrains, such as for: training, virtual visits or video games.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this paper we introduce novel 'Camera Motions' (CMs) to improve the sensations related to locomotion in virtual environments (VE). Traditional Camera Motions are artificial oscillating motions applied to the subjective viewpoint when walking in the VE, and they are meant to evoke and reproduce the visual flow generated during a human walk. Our novel camera motions are: (1) multistate, (2) personified, and (3) they can take into account the topography of the virtual terrain. Being multistate, our CMs can account for different states of locomotion in VE namely: walking, but also running and sprinting. Being personified, our CMs can be adapted to avatars physiology such as to its size, weight or training status. They can then take into account avatars fatigue and recuperation for updating visual CMs accordingly. Last, our approach is adapted to the topography of the VE. Running over a strong positive slope would rapidly decrease the advance speed of the avatar, increase its energy loss, and eventually change the locomotion mode, influencing the visual feedback of the camera motions. Our new approach relies on a locomotion simulator partially inspired by human physiology and implemented for a real-time use in Desktop VR. We have conducted a series of experiments to evaluate the perception of our new CMs by naive participants. Results notably show that participants could discriminate and perceive transitions between the different locomotion modes, by relying exclusively on our CMs. They could also perceive some properties of the avatar being used and, overall, very well appreciated the new CMs techniques. Taken together, our results suggest that our new CMs could be introduced in Desktop VR applications involving first-person navigation, in order to enhance sensations of walking, running, and sprinting, with potentially different avatars and over uneven terrains, such as for: training, virtual visits or video games.",
"title": "Personified and Multistate Camera Motions for First-Person Navigation in Desktop Virtual Reality",
"normalizedTitle": "Personified and Multistate Camera Motions for First-Person Navigation in Desktop Virtual Reality",
"fno": "ttg2013040652",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Navigation",
"Visualization",
"Vibrations",
"Mathematical Model",
"Legged Locomotion",
"Oscillators",
"Locomotion Simulation",
"Navigation",
"Camera Motions"
],
"authors": [
{
"givenName": "L.",
"surname": "Terziman",
"fullName": "L. Terziman",
"affiliation": "INSA / INRIA / DGA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Marchal",
"fullName": "M. Marchal",
"affiliation": "INSA / IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "F.",
"surname": "Multon",
"fullName": "F. Multon",
"affiliation": "M2S, Univ. Rennes 2, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.",
"surname": "Arnaldi",
"fullName": "B. Arnaldi",
"affiliation": "INSA / IRISA, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Lecuyer",
"fullName": "A. Lecuyer",
"affiliation": "INRIA, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "652-661",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icme/2015/7082/0/07177481",
"title": "Temporal spotting of human actions from videos containing actor's unintentional motions",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2015/07177481/12OmNro0IfC",
"parentPublication": {
"id": "proceedings/icme/2015/7082/0",
"title": "2015 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892303",
"title": "Object location memory error in virtual and real environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892303/12OmNx7ouWn",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550194",
"title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550194/12OmNyFU75b",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446177",
"title": "You Shall Not Pass: Non-Intrusive Feedback for Virtual Walls in VR Environments with Room-Scale Mapping",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446177/13bd1eSlyu1",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/04/08302409",
"title": "Effects of Unaugmented Periphery and Vibrotactile Feedback on Proxemics with Virtual Humans in AR",
"doi": null,
"abstractUrl": "/journal/tg/2018/04/08302409/13rRUxcbnCw",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2018/7592/0/08699289",
"title": "Walking-in-Place for VR Navigation Independent of Gaze Direction Using a Waist-Worn Inertial Measurement Unit",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2018/08699289/19F1PlWtKJa",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2018/7592/0",
"title": "2018 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2022/9617/0/961700a821",
"title": "Evaluating the Impact of Limited Physical Space on the Navigation Performance of Two Locomotion Methods in Immersive Virtual Environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2022/961700a821/1CJbMX1TyoM",
"parentPublication": {
"id": "proceedings/vr/2022/9617/0",
"title": "2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089579",
"title": "Feature Guided Path Redirection for VR Navigation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089579/1jIx7XMm676",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2020/5608/0/09089654",
"title": "Effects of Locomotion Style and Body Visibility of a Telepresence Avatar",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2020/09089654/1jIxd00PzX2",
"parentPublication": {
"id": "proceedings/vr/2020/5608/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/09/09332290",
"title": "Quantifiable Fine-Grain Occlusion Removal Assistance for Efficient VR Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2022/09/09332290/1qzsRxXpW4o",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040644",
"articleId": "13rRUxC0SEg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040662",
"articleId": "13rRUzpzeB4",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRZs",
"name": "ttg2013040652s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040652s1.mp4",
"extension": "mp4",
"size": "32.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUzpzeB4",
"doi": "10.1109/TVCG.2013.35",
"abstract": "Stressful interpersonal experiences can be difficult to prepare for. Virtual humans may be leveraged to allow learners to safely gain exposure to stressful interpersonal experiences. In this paper we present a between-subjects study exploring how the presence of a virtual human affected learners while practicing a stressful interpersonal experience. Twenty-six fourth-year medical students practiced performing a prostate exam on a prostate exam simulator. Participants in the experimental condition examined a simulator augmented with a virtual human. Other participants examined a standard unaugmented simulator. Participants reactions were assessed using self-reported, behavioral, and physiological metrics. Participants who examined the virtual human experienced significantly more stress, measured via skin conductance. Participants stress was correlated with previous experience performing real prostate exams; participants who had performed more real prostate exams were more likely to experience stress while examining the virtual human. Participants who examined the virtual human showed signs of greater engagement; non-stressed participants performed better prostate exams while stressed participants treated the virtual human more realistically. Results indicated that stress evoked by virtual humans is linked to similar previous real-world stressful experiences, implying that learners real-world experience must be taken into account when using virtual humans to prepare them for stressful interpersonal experiences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Stressful interpersonal experiences can be difficult to prepare for. Virtual humans may be leveraged to allow learners to safely gain exposure to stressful interpersonal experiences. In this paper we present a between-subjects study exploring how the presence of a virtual human affected learners while practicing a stressful interpersonal experience. Twenty-six fourth-year medical students practiced performing a prostate exam on a prostate exam simulator. Participants in the experimental condition examined a simulator augmented with a virtual human. Other participants examined a standard unaugmented simulator. Participants reactions were assessed using self-reported, behavioral, and physiological metrics. Participants who examined the virtual human experienced significantly more stress, measured via skin conductance. Participants stress was correlated with previous experience performing real prostate exams; participants who had performed more real prostate exams were more likely to experience stress while examining the virtual human. Participants who examined the virtual human showed signs of greater engagement; non-stressed participants performed better prostate exams while stressed participants treated the virtual human more realistically. Results indicated that stress evoked by virtual humans is linked to similar previous real-world stressful experiences, implying that learners real-world experience must be taken into account when using virtual humans to prepare them for stressful interpersonal experiences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Stressful interpersonal experiences can be difficult to prepare for. Virtual humans may be leveraged to allow learners to safely gain exposure to stressful interpersonal experiences. In this paper we present a between-subjects study exploring how the presence of a virtual human affected learners while practicing a stressful interpersonal experience. Twenty-six fourth-year medical students practiced performing a prostate exam on a prostate exam simulator. Participants in the experimental condition examined a simulator augmented with a virtual human. Other participants examined a standard unaugmented simulator. Participants reactions were assessed using self-reported, behavioral, and physiological metrics. Participants who examined the virtual human experienced significantly more stress, measured via skin conductance. Participants stress was correlated with previous experience performing real prostate exams; participants who had performed more real prostate exams were more likely to experience stress while examining the virtual human. Participants who examined the virtual human showed signs of greater engagement; non-stressed participants performed better prostate exams while stressed participants treated the virtual human more realistically. Results indicated that stress evoked by virtual humans is linked to similar previous real-world stressful experiences, implying that learners real-world experience must be taken into account when using virtual humans to prepare them for stressful interpersonal experiences.",
"title": "Leveraging Virtual Humans to Effectively Prepare Learners for Stressful Interpersonal Experiences",
"normalizedTitle": "Leveraging Virtual Humans to Effectively Prepare Learners for Stressful Interpersonal Experiences",
"fno": "ttg2013040662",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Standards",
"Stress",
"Measurement",
"Interviews",
"Training",
"Educational Institutions",
"Prostate Cancer",
"User Studies",
"Virtual Digital Characters",
"Mixed Reality",
"Training"
],
"authors": [
{
"givenName": "A.",
"surname": "Robb",
"fullName": "A. Robb",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Kopper",
"fullName": "R. Kopper",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Ambani",
"fullName": "R. Ambani",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "F.",
"surname": "Qayyum",
"fullName": "F. Qayyum",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "D.",
"surname": "Lind",
"fullName": "D. Lind",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Li-Ming Su",
"fullName": "Li-Ming Su",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "B.",
"surname": "Lok",
"fullName": "B. Lok",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "662-670",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/asea/2014/7760/0/07023890",
"title": "A Design of Efficient Medical Information System to Enhance Health Behaviors After Radical Prostatectomy",
"doi": null,
"abstractUrl": "/proceedings-article/asea/2014/07023890/12OmNBrlPzE",
"parentPublication": {
"id": "proceedings/asea/2014/7760/0",
"title": "2014 7th International Conference on Advanced Software Engineering and Its Applications (ASEA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2006/0224/0/02240059",
"title": "Interpersonal Scenarios: Virtual \\approx Real?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2006/02240059/12OmNwDACqI",
"parentPublication": {
"id": "proceedings/vr/2006/0224/0",
"title": "IEEE Virtual Reality Conference (VR 2006)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cec/2012/4857/0/4857a101",
"title": "Social network characteristics of online shopping interpersonal relationship in real and virtual communities",
"doi": null,
"abstractUrl": "/proceedings-article/cec/2012/4857a101/12OmNwtn3xN",
"parentPublication": {
"id": "proceedings/cec/2012/4857/0",
"title": "2012 IEEE 14th International Conference on Commerce and Enterprise Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itag/2015/7874/0/7874a071",
"title": "Co-design of a Prostate Cancer Serious Game for African Caribbean Men",
"doi": null,
"abstractUrl": "/proceedings-article/itag/2015/7874a071/12OmNzYNN1h",
"parentPublication": {
"id": "proceedings/itag/2015/7874/0",
"title": "2015 International Conference on Interactive Technologies and Games (iTAG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cbms/1997/7928/0/79280120",
"title": "Knowledge-based mechanical imaging",
"doi": null,
"abstractUrl": "/proceedings-article/cbms/1997/79280120/12OmNzaQozw",
"parentPublication": {
"id": "proceedings/cbms/1997/7928/0",
"title": "Proceedings of the 26th IEEE International Symposium on Computer-Based Medical Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2013/5048/0/5048a624",
"title": "Affective Conversational Models: Interpersonal Stance in a Police Interview Context",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2013/5048a624/12OmNzuZUEe",
"parentPublication": {
"id": "proceedings/acii/2013/5048/0",
"title": "2013 Humaine Association Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2007/03/v0443",
"title": "Comparing Interpersonal Interactions with a Virtual Human to Those with a Real Human",
"doi": null,
"abstractUrl": "/journal/tg/2007/03/v0443/13rRUwvT9gk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2022/8402/0/840200a774",
"title": "Interpersonal Distance to a Speaking Avatar: Loudness Matters Irrespective of Contents",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2022/840200a774/1CJcDgr8xyg",
"parentPublication": {
"id": "proceedings/vrw/2022/8402/0",
"title": "2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/05/10049655",
"title": "Measuring Interpersonal Trust towards Virtual Humans with a Virtual Maze Paradigm",
"doi": null,
"abstractUrl": "/journal/tg/2023/05/10049655/1KYouwvCMBa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2021/4057/0/405700a613",
"title": "Communications in Virtual Environment Improve Interpersonal Impression",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2021/405700a613/1tnWHW8JhK0",
"parentPublication": {
"id": "proceedings/vrw/2021/4057/0",
"title": "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040652",
"articleId": "13rRUygT7mX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040671",
"articleId": "13rRUx0xPIH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFwW",
"name": "ttg2013040662s1.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040662s1.mp4",
"extension": "mp4",
"size": "9.38 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPIH",
"doi": "10.1109/TVCG.2013.34",
"abstract": "Virtual walking, a fundamental task in Virtual Reality (VR), is greatly influenced by the locomotion interface being used, by the specificities of input and output devices, and by the way the virtual environment is represented. No matter how virtual walking is controlled, the generation of realistic virtual trajectories is absolutely required for some applications, especially those dedicated to the study of walking behaviors in VR, navigation through virtual places for architecture, rehabilitation and training. Previous studies focused on evaluating the realism of locomotion trajectories have mostly considered the result of the locomotion task (efficiency, accuracy) and its subjective perception (presence, cybersickness). Few focused on the locomotion trajectory itself, but in situation of geometrically constrained task. In this paper, we study the realism of unconstrained trajectories produced during virtual walking by addressing the following question: did the user reach his destination by virtually walking along a trajectory he would have followed in similar real conditions? To this end, we propose a comprehensive evaluation framework consisting on a set of trajectographical criteria and a locomotion model to generate reference trajectories. We consider a simple locomotion task where users walk between two oriented points in space. The travel path is analyzed both geometrically and temporally in comparison to simulated reference trajectories. In addition, we demonstrate the framework over a user study which considered an initial set of common and frequent virtual walking conditions, namely different input devices, output display devices, control laws, and visualization modalities. The study provides insight into the relative contributions of each condition to the overall realism of the resulting virtual trajectories.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Virtual walking, a fundamental task in Virtual Reality (VR), is greatly influenced by the locomotion interface being used, by the specificities of input and output devices, and by the way the virtual environment is represented. No matter how virtual walking is controlled, the generation of realistic virtual trajectories is absolutely required for some applications, especially those dedicated to the study of walking behaviors in VR, navigation through virtual places for architecture, rehabilitation and training. Previous studies focused on evaluating the realism of locomotion trajectories have mostly considered the result of the locomotion task (efficiency, accuracy) and its subjective perception (presence, cybersickness). Few focused on the locomotion trajectory itself, but in situation of geometrically constrained task. In this paper, we study the realism of unconstrained trajectories produced during virtual walking by addressing the following question: did the user reach his destination by virtually walking along a trajectory he would have followed in similar real conditions? To this end, we propose a comprehensive evaluation framework consisting on a set of trajectographical criteria and a locomotion model to generate reference trajectories. We consider a simple locomotion task where users walk between two oriented points in space. The travel path is analyzed both geometrically and temporally in comparison to simulated reference trajectories. In addition, we demonstrate the framework over a user study which considered an initial set of common and frequent virtual walking conditions, namely different input devices, output display devices, control laws, and visualization modalities. The study provides insight into the relative contributions of each condition to the overall realism of the resulting virtual trajectories.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Virtual walking, a fundamental task in Virtual Reality (VR), is greatly influenced by the locomotion interface being used, by the specificities of input and output devices, and by the way the virtual environment is represented. No matter how virtual walking is controlled, the generation of realistic virtual trajectories is absolutely required for some applications, especially those dedicated to the study of walking behaviors in VR, navigation through virtual places for architecture, rehabilitation and training. Previous studies focused on evaluating the realism of locomotion trajectories have mostly considered the result of the locomotion task (efficiency, accuracy) and its subjective perception (presence, cybersickness). Few focused on the locomotion trajectory itself, but in situation of geometrically constrained task. In this paper, we study the realism of unconstrained trajectories produced during virtual walking by addressing the following question: did the user reach his destination by virtually walking along a trajectory he would have followed in similar real conditions? To this end, we propose a comprehensive evaluation framework consisting on a set of trajectographical criteria and a locomotion model to generate reference trajectories. We consider a simple locomotion task where users walk between two oriented points in space. The travel path is analyzed both geometrically and temporally in comparison to simulated reference trajectories. In addition, we demonstrate the framework over a user study which considered an initial set of common and frequent virtual walking conditions, namely different input devices, output display devices, control laws, and visualization modalities. The study provides insight into the relative contributions of each condition to the overall realism of the resulting virtual trajectories.",
"title": "Kinematic Evaluation of Virtual Walking Trajectories",
"normalizedTitle": "Kinematic Evaluation of Virtual Walking Trajectories",
"fno": "ttg2013040671",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Trajectory",
"Legged Locomotion",
"Logic Gates",
"Visualization",
"Cameras",
"Virtual Environments",
"Angular Velocity",
"Perception Action",
"Locomotion",
"Evaluation",
"Motor Control",
"Vision"
],
"authors": [
{
"givenName": "G.",
"surname": "Cirio",
"fullName": "G. Cirio",
"affiliation": "INRIA Rennes, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Olivier",
"fullName": "A. Olivier",
"affiliation": "INRIA Rennes, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Marchal",
"fullName": "M. Marchal",
"affiliation": "INRIA Rennes, Rennes, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Pettre",
"fullName": "J. Pettre",
"affiliation": "INRIA Rennes, Rennes, France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "671-680",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vr/2014/2871/0/06802053",
"title": "An enhanced steering algorithm for redirected walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802053/12OmNCbU2Wt",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223389",
"title": "The effect of head mounted display weight and locomotion method on the perceived naturalness of virtual walking speeds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223389/12OmNwqft3l",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2013/6097/0/06550194",
"title": "Flexible spaces: Dynamic layout generation for infinite walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2013/06550194/12OmNyFU75b",
"parentPublication": {
"id": "proceedings/3dui/2013/6097/0",
"title": "2013 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504742",
"title": "Simultaneous mapping and redirected walking for ad hoc free walking in virtual environments",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504742/12OmNyUFg0I",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08448288",
"title": "Experiencing an Invisible World War I Battlefield Through Narrative-Driven Redirected Walking in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08448288/13bd1fZBGdu",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446587",
"title": "Do Textures and Global Illumination Influence the Perception of Redirected Walking Based on Translational Gain?",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446587/13bd1gJ1v0m",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/07/07946183",
"title": "Walking with Virtual People: Evaluation of Locomotion Interfaces in Dynamic Environments",
"doi": null,
"abstractUrl": "/journal/tg/2018/07/07946183/13rRUEgs2C2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg201404569",
"title": "Establishing the Range of Perceptually Natural Visual Walking Speeds for Virtual Walking-In-Place Locomotion",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg201404569/13rRUxAASTb",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/07/08967011",
"title": "The Role of Binocular Vision in Avoiding Virtual Obstacles While Walking",
"doi": null,
"abstractUrl": "/journal/tg/2021/07/08967011/1gPjyDVBxF6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vrw/2020/6532/0/09090595",
"title": "Reactive Alignment of Virtual and Physical Environments Using Redirected Walking",
"doi": null,
"abstractUrl": "/proceedings-article/vrw/2020/09090595/1jIxm1j8B2w",
"parentPublication": {
"id": "proceedings/vrw/2020/6532/0",
"title": "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040662",
"articleId": "13rRUzpzeB4",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040681",
"articleId": "13rRUILLkvr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgNq",
"name": "ttg2013040671s1.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013040671s1.zip",
"extension": "zip",
"size": "447 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILLkvr",
"doi": "10.1109/TVCG.2013.30",
"abstract": "The aim of our experiment is to determine if eye-gaze can be estimated from a virtuality human: to within the accuracies that underpin social interaction; and reliably across gaze poses and camera arrangements likely in every day settings. The scene is set by explaining why Immersive Virtuality Telepresence has the potential to meet the grand challenge of faithfully communicating both the appearance and the focus of attention of a remote human participant within a shared 3D computer-supported context. Within the experiment n=22 participants rotated static 3D virtuality humans, reconstructed from surround images, until they felt most looked at. The dependent variable was absolute angular error, which was compared to that underpinning social gaze behaviour in the natural world. Independent variables were 1) relative orientations of eye, head and body of captured subject; and 2) subset of cameras used to texture the form. Analysis looked for statistical and practical significance and qualitative corroborating evidence. The analysed results tell us much about the importance and detail of the relationship between gaze pose, method of video based reconstruction, and camera arrangement. They tell us that virtuality can reproduce gaze to an accuracy useful in social interaction, but with the adopted method of Video Based Reconstruction, this is highly dependent on combination of gaze pose and camera arrangement. This suggests changes in the VBR approach in order to allow more flexible camera arrangements. The work is of interest to those wanting to support expressive meetings that are both socially and spatially situated, and particular those using or building Immersive Virtuality Telepresence to accomplish this. It is also of relevance to the use of virtuality humans in applications ranging from the study of human interactions to gaming and the crossing of the stage line in films and TV.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The aim of our experiment is to determine if eye-gaze can be estimated from a virtuality human: to within the accuracies that underpin social interaction; and reliably across gaze poses and camera arrangements likely in every day settings. The scene is set by explaining why Immersive Virtuality Telepresence has the potential to meet the grand challenge of faithfully communicating both the appearance and the focus of attention of a remote human participant within a shared 3D computer-supported context. Within the experiment n=22 participants rotated static 3D virtuality humans, reconstructed from surround images, until they felt most looked at. The dependent variable was absolute angular error, which was compared to that underpinning social gaze behaviour in the natural world. Independent variables were 1) relative orientations of eye, head and body of captured subject; and 2) subset of cameras used to texture the form. Analysis looked for statistical and practical significance and qualitative corroborating evidence. The analysed results tell us much about the importance and detail of the relationship between gaze pose, method of video based reconstruction, and camera arrangement. They tell us that virtuality can reproduce gaze to an accuracy useful in social interaction, but with the adopted method of Video Based Reconstruction, this is highly dependent on combination of gaze pose and camera arrangement. This suggests changes in the VBR approach in order to allow more flexible camera arrangements. The work is of interest to those wanting to support expressive meetings that are both socially and spatially situated, and particular those using or building Immersive Virtuality Telepresence to accomplish this. It is also of relevance to the use of virtuality humans in applications ranging from the study of human interactions to gaming and the crossing of the stage line in films and TV.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The aim of our experiment is to determine if eye-gaze can be estimated from a virtuality human: to within the accuracies that underpin social interaction; and reliably across gaze poses and camera arrangements likely in every day settings. The scene is set by explaining why Immersive Virtuality Telepresence has the potential to meet the grand challenge of faithfully communicating both the appearance and the focus of attention of a remote human participant within a shared 3D computer-supported context. Within the experiment n=22 participants rotated static 3D virtuality humans, reconstructed from surround images, until they felt most looked at. The dependent variable was absolute angular error, which was compared to that underpinning social gaze behaviour in the natural world. Independent variables were 1) relative orientations of eye, head and body of captured subject; and 2) subset of cameras used to texture the form. Analysis looked for statistical and practical significance and qualitative corroborating evidence. The analysed results tell us much about the importance and detail of the relationship between gaze pose, method of video based reconstruction, and camera arrangement. They tell us that virtuality can reproduce gaze to an accuracy useful in social interaction, but with the adopted method of Video Based Reconstruction, this is highly dependent on combination of gaze pose and camera arrangement. This suggests changes in the VBR approach in order to allow more flexible camera arrangements. The work is of interest to those wanting to support expressive meetings that are both socially and spatially situated, and particular those using or building Immersive Virtuality Telepresence to accomplish this. It is also of relevance to the use of virtuality humans in applications ranging from the study of human interactions to gaming and the crossing of the stage line in films and TV.",
"title": "Estimating the Gaze of a Virtuality Human",
"normalizedTitle": "Estimating the Gaze of a Virtuality Human",
"fno": "ttg2013040681",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cameras",
"Estimation",
"Accuracy",
"Image Reconstruction",
"Face",
"Visualization",
"Hierarchical Finite State Machines",
"Cinematography",
"Virtual Worlds",
"Virtual Environments",
"Camera Placement"
],
"authors": [
{
"givenName": "D. J.",
"surname": "Roberts",
"fullName": "D. J. Roberts",
"affiliation": "Univ. of Salford, Salford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Rae",
"fullName": "J. Rae",
"affiliation": "Univ. of Roehampton, Roehampton, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "T. W.",
"surname": "Duckworth",
"fullName": "T. W. Duckworth",
"affiliation": "Univ. of Salford, Salford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "C. M.",
"surname": "Moore",
"fullName": "C. M. Moore",
"affiliation": "Univ. of Salford, Salford, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R.",
"surname": "Aspin",
"fullName": "R. Aspin",
"affiliation": "Univ. of Salford, Salford, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "681-690",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icmi/2002/1834/0/18340261",
"title": "Active Gaze Tracking for Human-Robot Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/icmi/2002/18340261/12OmNAGNCeq",
"parentPublication": {
"id": "proceedings/icmi/2002/1834/0",
"title": "Proceedings Fourth IEEE International Conference on Multimodal Interfaces",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2013/0015/0/06607532",
"title": "Calibration-free gaze tracking using particle filter",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2013/06607532/12OmNC3XhtM",
"parentPublication": {
"id": "proceedings/icme/2013/0015/0",
"title": "2013 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223361",
"title": "AR-SSVEP for brain-machine interface: Estimating user's gaze in head-mounted display with USB camera",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223361/12OmNwtEEzT",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/est/2012/4791/0/4791a062",
"title": "Liveness Detection Using Gaze Collinearity",
"doi": null,
"abstractUrl": "/proceedings-article/est/2012/4791a062/12OmNxwWoG1",
"parentPublication": {
"id": "proceedings/est/2012/4791/0",
"title": "2012 Third International Conference on Emerging Security Technologies",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fg/2015/6026/1/07163121",
"title": "Robust gaze estimation based on adaptive fusion of multiple cameras",
"doi": null,
"abstractUrl": "/proceedings-article/fg/2015/07163121/12OmNyQGS7p",
"parentPublication": {
"id": "proceedings/fg/2015/6026/5",
"title": "2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2017/4283/0/4283a350",
"title": "Pholder: An Eye-Gaze Assisted Reading Application on Android",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2017/4283a350/12OmNz2kqfc",
"parentPublication": {
"id": "proceedings/sitis/2017/4283/0",
"title": "2017 13th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2014/4308/0/4308a165",
"title": "Estimating Gaze Direction of Vehicle Drivers Using a Smartphone Camera",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2014/4308a165/12OmNzuIjrA",
"parentPublication": {
"id": "proceedings/cvprw/2014/4308/0",
"title": "2014 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2013/02/ttp2013020329",
"title": "Appearance-Based Gaze Estimation Using Visual Saliency",
"doi": null,
"abstractUrl": "/journal/tp/2013/02/ttp2013020329/13rRUyfKIEn",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2019/4803/0/480300g911",
"title": "Gaze360: Physically Unconstrained Gaze Estimation in the Wild",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2019/480300g911/1hQqsJoZU8U",
"parentPublication": {
"id": "proceedings/iccv/2019/4803/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2021/8808/0/09412211",
"title": "Estimating Gaze Points from Facial Landmarks by a Remote Spherical Camera",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2021/09412211/1tmhwomVU08",
"parentPublication": {
"id": "proceedings/icpr/2021/8808/0",
"title": "2020 25th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040671",
"articleId": "13rRUx0xPIH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040691",
"articleId": "13rRUxlgy3G",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxlgy3G",
"doi": "10.1109/TVCG.2013.36",
"abstract": "The perception of objects, depth, and distance has been repeatedly shown to be divergent between virtual and physical environments. We hypothesize that many of these discrepancies stem from incorrect geometric viewing parameters, specifically that physical measurements of eye position are insufficiently precise to provide proper viewing parameters. In this paper, we introduce a perceptual calibration procedure derived from geometric models. While most research has used geometric models to predict perceptual errors, we instead use these models inversely to determine perceptually correct viewing parameters. We study the advantages of these new psychophysically determined viewing parameters compared to the commonly used measured viewing parameters in an experiment with 20 subjects. The perceptually calibrated viewing parameters for the subjects generally produced new virtual eye positions that were wider and deeper than standard practices would estimate. Our study shows that perceptually calibrated viewing parameters can significantly improve depth acuity, distance estimation, and the perception of shape.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The perception of objects, depth, and distance has been repeatedly shown to be divergent between virtual and physical environments. We hypothesize that many of these discrepancies stem from incorrect geometric viewing parameters, specifically that physical measurements of eye position are insufficiently precise to provide proper viewing parameters. In this paper, we introduce a perceptual calibration procedure derived from geometric models. While most research has used geometric models to predict perceptual errors, we instead use these models inversely to determine perceptually correct viewing parameters. We study the advantages of these new psychophysically determined viewing parameters compared to the commonly used measured viewing parameters in an experiment with 20 subjects. The perceptually calibrated viewing parameters for the subjects generally produced new virtual eye positions that were wider and deeper than standard practices would estimate. Our study shows that perceptually calibrated viewing parameters can significantly improve depth acuity, distance estimation, and the perception of shape.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The perception of objects, depth, and distance has been repeatedly shown to be divergent between virtual and physical environments. We hypothesize that many of these discrepancies stem from incorrect geometric viewing parameters, specifically that physical measurements of eye position are insufficiently precise to provide proper viewing parameters. In this paper, we introduce a perceptual calibration procedure derived from geometric models. While most research has used geometric models to predict perceptual errors, we instead use these models inversely to determine perceptually correct viewing parameters. We study the advantages of these new psychophysically determined viewing parameters compared to the commonly used measured viewing parameters in an experiment with 20 subjects. The perceptually calibrated viewing parameters for the subjects generally produced new virtual eye positions that were wider and deeper than standard practices would estimate. Our study shows that perceptually calibrated viewing parameters can significantly improve depth acuity, distance estimation, and the perception of shape.",
"title": "Perceptual Calibration for Immersive Display Environments",
"normalizedTitle": "Perceptual Calibration for Immersive Display Environments",
"fno": "ttg2013040691",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Calibration",
"Solid Modeling",
"Estimation",
"Shape",
"Virtual Environments",
"Cameras",
"Stereo Vision Displays",
"Virtual Reality",
"Calibration",
"Perception",
"Distance Estimation",
"Shape Perception",
"Depth Compression"
],
"authors": [
{
"givenName": "K.",
"surname": "Ponto",
"fullName": "K. Ponto",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin, Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Gleicher",
"fullName": "M. Gleicher",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin, Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "R. G.",
"surname": "Radwin",
"fullName": "R. G. Radwin",
"affiliation": "Dept. of Biomed. Eng., Univ. of Wisconsin, Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Hyun Joon Shin",
"fullName": "Hyun Joon Shin",
"affiliation": "Div. of Digital Media, Ajou Univ., Suwon, South Korea",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "691-700",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wacv/2017/4822/0/07926707",
"title": "Automatic Calibration of a Multiple-Projector Spherical Fish Tank VR Display",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2017/07926707/12OmNAoDhTe",
"parentPublication": {
"id": "proceedings/wacv/2017/4822/0",
"title": "2017 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2008/2174/0/04761481",
"title": "Camera calibration for uneven terrains by observing pedestrians",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2008/04761481/12OmNC8dgoP",
"parentPublication": {
"id": "proceedings/icpr/2008/2174/0",
"title": "ICPR 2008 19th International Conference on Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2011/348/0/06011885",
"title": "Novel projector calibration approaches of multi-resolution display",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2011/06011885/12OmNCd2rEL",
"parentPublication": {
"id": "proceedings/icme/2011/348/0",
"title": "2011 IEEE International Conference on Multimedia and Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2014/2871/0/06802063",
"title": "Automated calibration of display characteristics (ACDC) for head-mounted displays and arbitrary surfaces",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2014/06802063/12OmNxwENpf",
"parentPublication": {
"id": "proceedings/vr/2014/2871/0",
"title": "2014 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/1993/3880/0/00341038",
"title": "Efficient and robust methods of accurate camera calibration",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/1993/00341038/12OmNyo1nQx",
"parentPublication": {
"id": "proceedings/cvpr/1993/3880/0",
"title": "Proceedings of IEEE Conference on Computer Vision and Pattern Recognition",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2017/1032/0/1032c554",
"title": "Low-Dimensionality Calibration through Local Anisotropic Scaling for Robust Hand Model Personalization",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2017/1032c554/12OmNywfKHK",
"parentPublication": {
"id": "proceedings/iccv/2017/1032/0",
"title": "2017 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223434",
"title": "A multi-projector display system of arbitrary shape, size and resolution",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223434/12OmNzYNNiY",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130255",
"title": "Calibration of radially symmetric distortion based on linearity in the calibrated image",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130255/12OmNzzxuyx",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/1991/04/i0370",
"title": "Camera Calibration by Vanishing Lines for 3-D Computer Vision",
"doi": null,
"abstractUrl": "/journal/tp/1991/04/i0370/13rRUwhpBEQ",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798019",
"title": "Large-Scale Projection-Based Immersive Display: The Design and Implementation of LargeSpace",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798019/1cJ17trBZEQ",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040681",
"articleId": "13rRUILLkvr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040701",
"articleId": "13rRUx0xPmZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPmZ",
"doi": "10.1109/TVCG.2013.37",
"abstract": "The following series of experiments explore the effect of static peripheral stimulation on the perception of distance and spatial scale in a typical head-mounted virtual environment. It was found that applying constant white light in an observers far periphery enabled the observer to more accurately judge distances using blind walking. An effect of similar magnitude was also found when observers estimated the size of a virtual space using a visual scale task. The presence of the effect across multiple psychophysical tasks provided confidence that a perceptual change was, in fact, being invoked by the addition of the peripheral stimulation. These results were also compared to observer performance in a very large field of view virtual environment and in the real world. The subsequent findings raise the possibility that distance judgments in virtual environments might be considerably more similar to those in the real world than previous work has suggested.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The following series of experiments explore the effect of static peripheral stimulation on the perception of distance and spatial scale in a typical head-mounted virtual environment. It was found that applying constant white light in an observers far periphery enabled the observer to more accurately judge distances using blind walking. An effect of similar magnitude was also found when observers estimated the size of a virtual space using a visual scale task. The presence of the effect across multiple psychophysical tasks provided confidence that a perceptual change was, in fact, being invoked by the addition of the peripheral stimulation. These results were also compared to observer performance in a very large field of view virtual environment and in the real world. The subsequent findings raise the possibility that distance judgments in virtual environments might be considerably more similar to those in the real world than previous work has suggested.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The following series of experiments explore the effect of static peripheral stimulation on the perception of distance and spatial scale in a typical head-mounted virtual environment. It was found that applying constant white light in an observers far periphery enabled the observer to more accurately judge distances using blind walking. An effect of similar magnitude was also found when observers estimated the size of a virtual space using a visual scale task. The presence of the effect across multiple psychophysical tasks provided confidence that a perceptual change was, in fact, being invoked by the addition of the peripheral stimulation. These results were also compared to observer performance in a very large field of view virtual environment and in the real world. The subsequent findings raise the possibility that distance judgments in virtual environments might be considerably more similar to those in the real world than previous work has suggested.",
"title": "Peripheral Stimulation and its Effect on Perceived Spatial Scale in Virtual Environments",
"normalizedTitle": "Peripheral Stimulation and its Effect on Perceived Spatial Scale in Virtual Environments",
"fno": "ttg2013040701",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Virtual Environments",
"Legged Locomotion",
"Visualization",
"Observers",
"Adaptive Optics",
"Stimulated Emission",
"Optical Imaging",
"Periphery",
"Virtual Environments",
"Spatial Perception",
"Distance Judgments",
"Field Of View"
],
"authors": [
{
"givenName": "J. A.",
"surname": "Jones",
"fullName": "J. A. Jones",
"affiliation": "Inst. for Creative Technol., Univ. of Southern California, Los Angeles, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J. Edward",
"surname": "Swan",
"fullName": "J. Edward Swan",
"affiliation": "Dept. of Comput. Sci. & Eng., Mississippi State Univ., Starkville, MS, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M.",
"surname": "Bolas",
"fullName": "M. Bolas",
"affiliation": "Inst. for Creative Technol., Univ. of Southern California, Los Angeles, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "701-710",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dui/2016/0842/0/07460066",
"title": "Rhythmic vibrations to heels and forefeet to produce virtual walking",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460066/12OmNBQkwZJ",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2005/8929/0/01492799",
"title": "Virtual acceleration with galvanic vestibular stimulation in a virtual reality environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2005/01492799/12OmNwJPMZr",
"parentPublication": {
"id": "proceedings/vr/2005/8929/0",
"title": "IEEE Virtual Reality 2005",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2014/5209/0/5209c855",
"title": "Automatic Prediction of Perceived Traits Using Visual Cues under Varied Situational Context",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2014/5209c855/12OmNwdbVaL",
"parentPublication": {
"id": "proceedings/icpr/2014/5209/0",
"title": "2014 22nd International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2017/6647/0/07892287",
"title": "Evaluation of airflow effect on a VR walk",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2017/07892287/12OmNwtEEvF",
"parentPublication": {
"id": "proceedings/vr/2017/6647/0",
"title": "2017 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2015/1727/0/07223328",
"title": "The effect of visual display properties and gain presentation mode on the perceived naturalness of virtual walking speeds",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2015/07223328/12OmNxGja3F",
"parentPublication": {
"id": "proceedings/vr/2015/1727/0",
"title": "2015 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504715",
"title": "Vestibulohaptic passive stimulation for a walking sensation",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504715/12OmNxu6p8R",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2018/3365/0/08446282",
"title": "Illusory Body Ownership Between Different Body Parts: Synchronization of Right Thumb and Right Arm",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2018/08446282/13bd1gQYgE6",
"parentPublication": {
"id": "proceedings/vr/2018/3365/0",
"title": "2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/th/2016/04/07534882",
"title": "Tactile Apparent Motion on the Torso Modulates Perceived Forward Self-Motion Velocity",
"doi": null,
"abstractUrl": "/journal/th/2016/04/07534882/13rRUwbs1SE",
"parentPublication": {
"id": "trans/th",
"title": "IEEE Transactions on Haptics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08797756",
"title": "Field of View and Forward Motion Discrimination in Virtual Reality",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08797756/1cJ0UegDTgY",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08794563",
"title": "Estimation of Rotation Gain Thresholds Considering FOV, Gender, and Distractors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08794563/1dNHkjixhDi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040691",
"articleId": "13rRUxlgy3G",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040711",
"articleId": "13rRUwcAqqf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwcAqqf",
"doi": "10.1109/TVCG.2013.42",
"abstract": "Autism Spectrum Disorders (ASD) are characterized by atypical patterns of behaviors and impairments in social communication. Among the fundamental social impairments in the ASD population are challenges in appropriately recognizing and responding to facial expressions. Traditional intervention approaches often require intensive support and well-trained therapists to address core deficits, with many with ASD having tremendous difficulty accessing such care due to lack of available trained therapists as well as intervention costs. As a result, emerging technology such as virtual reality (VR) has the potential to offer useful technology-enabled intervention systems. In this paper, an innovative VR-based facial emotional expression presentation system was developed that allows monitoring of eye gaze and physiological signals related to emotion identification to explore new efficient therapeutic paradigms. A usability study of this new system involving ten adolescents with ASD and ten typically developing adolescents as a control group was performed. The eye tracking and physiological data were analyzed to determine intragroup and intergroup variations of gaze and physiological patterns. Performance data, eye tracking indices and physiological features indicated that there were differences in the way adolescents with ASD process and recognize emotional faces compared to their typically developing peers. These results will be used in the future for an online adaptive VR-based multimodal social interaction system to improve emotion recognition abilities of individuals with ASD.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Autism Spectrum Disorders (ASD) are characterized by atypical patterns of behaviors and impairments in social communication. Among the fundamental social impairments in the ASD population are challenges in appropriately recognizing and responding to facial expressions. Traditional intervention approaches often require intensive support and well-trained therapists to address core deficits, with many with ASD having tremendous difficulty accessing such care due to lack of available trained therapists as well as intervention costs. As a result, emerging technology such as virtual reality (VR) has the potential to offer useful technology-enabled intervention systems. In this paper, an innovative VR-based facial emotional expression presentation system was developed that allows monitoring of eye gaze and physiological signals related to emotion identification to explore new efficient therapeutic paradigms. A usability study of this new system involving ten adolescents with ASD and ten typically developing adolescents as a control group was performed. The eye tracking and physiological data were analyzed to determine intragroup and intergroup variations of gaze and physiological patterns. Performance data, eye tracking indices and physiological features indicated that there were differences in the way adolescents with ASD process and recognize emotional faces compared to their typically developing peers. These results will be used in the future for an online adaptive VR-based multimodal social interaction system to improve emotion recognition abilities of individuals with ASD.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Autism Spectrum Disorders (ASD) are characterized by atypical patterns of behaviors and impairments in social communication. Among the fundamental social impairments in the ASD population are challenges in appropriately recognizing and responding to facial expressions. Traditional intervention approaches often require intensive support and well-trained therapists to address core deficits, with many with ASD having tremendous difficulty accessing such care due to lack of available trained therapists as well as intervention costs. As a result, emerging technology such as virtual reality (VR) has the potential to offer useful technology-enabled intervention systems. In this paper, an innovative VR-based facial emotional expression presentation system was developed that allows monitoring of eye gaze and physiological signals related to emotion identification to explore new efficient therapeutic paradigms. A usability study of this new system involving ten adolescents with ASD and ten typically developing adolescents as a control group was performed. The eye tracking and physiological data were analyzed to determine intragroup and intergroup variations of gaze and physiological patterns. Performance data, eye tracking indices and physiological features indicated that there were differences in the way adolescents with ASD process and recognize emotional faces compared to their typically developing peers. These results will be used in the future for an online adaptive VR-based multimodal social interaction system to improve emotion recognition abilities of individuals with ASD.",
"title": "Understanding How Adolescents with Autism Respond to Facial Expressions in Virtual Reality Environments",
"normalizedTitle": "Understanding How Adolescents with Autism Respond to Facial Expressions in Virtual Reality Environments",
"fno": "ttg2013040711",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Variable Speed Drives",
"Biomedical Monitoring",
"Monitoring",
"Animation",
"Emotion Recognition",
"Physiology",
"Autism",
"Vr Based Response Systems",
"3 D Interaction",
"Multimodal Interaction",
"Psychology",
"Usability"
],
"authors": [
{
"givenName": "E.",
"surname": "Bekele",
"fullName": "E. Bekele",
"affiliation": "EECS Dept., Vanderbilt Univ., Nashville, TN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Zhi Zheng",
"fullName": "Zhi Zheng",
"affiliation": "Electr. Eng. & Comput. Sci. Dept., Vanderbilt Univ., Nashville, TN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "A.",
"surname": "Swanson",
"fullName": "A. Swanson",
"affiliation": "Treatment & Res. in Autism Disorders (TRIAD), Vanderbilt Univ., Nashville, TN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "J.",
"surname": "Crittendon",
"fullName": "J. Crittendon",
"affiliation": "Dept. of Pediatrics & Psychiatry, Vanderbilt Univ., Nashville, TN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Z.",
"surname": "Warren",
"fullName": "Z. Warren",
"affiliation": "Dept. of Pediatrics & Psychiatry, Vanderbilt Univ., Nashville, TN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "N.",
"surname": "Sarkar",
"fullName": "N. Sarkar",
"affiliation": "Mech. Eng. Dept., Vanderbilt Univ., Nashville, TN, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "711-720",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ichi/2013/5089/0/5089a484",
"title": "Can NAO Robot Improve Eye-Gaze Attention of Children with High Functioning Autism?",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2013/5089a484/12OmNAoDifg",
"parentPublication": {
"id": "proceedings/ichi/2013/5089/0",
"title": "2013 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icccnt/2013/3926/0/06726798",
"title": "Considerations in Autism therapy using robotics",
"doi": null,
"abstractUrl": "/proceedings-article/icccnt/2013/06726798/12OmNvm6VGY",
"parentPublication": {
"id": "proceedings/icccnt/2013/3926/0",
"title": "2013 Fourth International Conference on Computing, Communications and Networking Technologies (ICCCNT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/passat-socialcom/2012/5638/0/06406398",
"title": "Robotic Social Therapy on Children with Autism: Preliminary Evaluation through Multi-parametric Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/passat-socialcom/2012/06406398/12OmNxxvAI5",
"parentPublication": {
"id": "proceedings/passat-socialcom/2012/5638/0",
"title": "2012 International Conference on Privacy, Security, Risk and Trust (PASSAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344621",
"title": "Cognitive state measurement from eye gaze analysis in an intelligent virtual reality driving system for autism intervention",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344621/12OmNyfdOR4",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/passat-socialcom/2012/5638/0/06406352",
"title": "Robotic Social Therapy on Children with Autism: Preliminary Evaluation through Multi-parametric Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/passat-socialcom/2012/06406352/12OmNzhELjd",
"parentPublication": {
"id": "proceedings/passat-socialcom/2012/5638/0",
"title": "2012 International Conference on Privacy, Security, Risk and Trust (PASSAT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2017/02/07495013",
"title": "Cognitive Load Measurement in a Virtual Reality-Based Driving System for Autism Intervention",
"doi": null,
"abstractUrl": "/journal/ta/2017/02/07495013/13rRUwhpBMP",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0/08328405",
"title": "Buddy: A Virtual Life Coaching System for Children and Adolescents with High Functioning Autism",
"doi": null,
"abstractUrl": "/proceedings-article/dasc-picom-datacom-cyberscitech/2017/08328405/17D45Wuc374",
"parentPublication": {
"id": "proceedings/dasc-picom-datacom-cyberscitech/2017/1956/0",
"title": "2017 IEEE 15th Intl Conf on Dependable, Autonomic and Secure Computing, 15th Intl Conf on Pervasive Intelligence and Computing, 3rd Intl Conf on Big Data Intelligence and Computing and Cyber Science and Technology Congress(DASC/PiCom/DataCom/CyberSciTech)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2019/1377/0/08798032",
"title": "Teachers' Views on how to use Virtual Reality to Instruct Children and Adolescents Diagnosed with Autism Spectrum Disorder",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2019/08798032/1cJ0YBL70AM",
"parentPublication": {
"id": "proceedings/vr/2019/1377/0",
"title": "2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2019/2607/1/260701a630",
"title": "Application of Reconstructed Phase Space in Autism Intervention",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2019/260701a630/1cYiwZcclRm",
"parentPublication": {
"id": "proceedings/compsac/2019/2607/1",
"title": "2019 IEEE 43rd Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2019/3888/0/08925474",
"title": "Computational Modeling of Psycho-physiological Arousal and Social Initiation of children with Autism in Interventions through Full-Body Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2019/08925474/1fHGE1f2lNe",
"parentPublication": {
"id": "proceedings/acii/2019/3888/0",
"title": "2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040701",
"articleId": "13rRUx0xPmZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013040000xix",
"articleId": "13rRUxD9gXF",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvvc5OL",
"title": "April",
"year": "2013",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "April",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxD9gXF",
"doi": "10.1109/TVCG.2013.45",
"abstract": "This index covers all technical items - papers, correspondence, reviews, etc. - that appeared in this periodical during the year, and items from previous years that were commented upon or corrected in this year. Departments and other items may also be covered if they have been judged to have archival value. The Author Index contains the primary entry for each item, listed under the first author's name. The primary entry includes the co-authors' names, the title of the paper or other item, and its location, specified by the publication abbreviation, year, month, and inclusive pagination. The Subject Index contains entries describing the item under all appropriate subject headings, plus the first author's name, the publication abbreviation, month, and year, and inclusive pages. Note that the item title is found only under the primary entry in the Author Index.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This index covers all technical items - papers, correspondence, reviews, etc. - that appeared in this periodical during the year, and items from previous years that were commented upon or corrected in this year. Departments and other items may also be covered if they have been judged to have archival value. The Author Index contains the primary entry for each item, listed under the first author's name. The primary entry includes the co-authors' names, the title of the paper or other item, and its location, specified by the publication abbreviation, year, month, and inclusive pagination. The Subject Index contains entries describing the item under all appropriate subject headings, plus the first author's name, the publication abbreviation, month, and year, and inclusive pages. Note that the item title is found only under the primary entry in the Author Index.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This index covers all technical items - papers, correspondence, reviews, etc. - that appeared in this periodical during the year, and items from previous years that were commented upon or corrected in this year. Departments and other items may also be covered if they have been judged to have archival value. The Author Index contains the primary entry for each item, listed under the first author's name. The primary entry includes the co-authors' names, the title of the paper or other item, and its location, specified by the publication abbreviation, year, month, and inclusive pagination. The Subject Index contains entries describing the item under all appropriate subject headings, plus the first author's name, the publication abbreviation, month, and year, and inclusive pages. Note that the item title is found only under the primary entry in the Author Index.",
"title": "Author index",
"normalizedTitle": "Author index",
"fno": "ttg2013040000xix",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "04",
"pubDate": "2013-04-01 00:00:00",
"pubType": "trans",
"pages": "xix",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013040711",
"articleId": "13rRUwcAqqf",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIJuxva",
"doi": "10.1109/2945.556500",
"abstract": "Abstract—This research explores the principles, implementation, and optimization of a competitive volume compression system based on fractal image compression. The extension of fractal image compression to volumetric data is trivial in theory. However, the simple addition of a dimension to existing fractal image compression algorithms results in infeasible compression times and noncompetitive volume compression results. This paper extends several fractal image compression enhancements to perform properly and efficiently on volumetric data, and introduces a new 3D edge classification scheme based on principal component analysis. Numerous experiments over the many parameters of fractal volume compression suggest aggressive settings of its system parameters. At this peak efficiency, fractal volume compression surpasses vector quantization and approaches within 1 dB PSNR of the discrete cosine transform. When compared to the DCT, fractal volume compression represents surfaces in volumes exceptionally well at high compression rates, and the artifacts of its compression error appear as noise instead of deceptive smoothing or distracting ringing.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This research explores the principles, implementation, and optimization of a competitive volume compression system based on fractal image compression. The extension of fractal image compression to volumetric data is trivial in theory. However, the simple addition of a dimension to existing fractal image compression algorithms results in infeasible compression times and noncompetitive volume compression results. This paper extends several fractal image compression enhancements to perform properly and efficiently on volumetric data, and introduces a new 3D edge classification scheme based on principal component analysis. Numerous experiments over the many parameters of fractal volume compression suggest aggressive settings of its system parameters. At this peak efficiency, fractal volume compression surpasses vector quantization and approaches within 1 dB PSNR of the discrete cosine transform. When compared to the DCT, fractal volume compression represents surfaces in volumes exceptionally well at high compression rates, and the artifacts of its compression error appear as noise instead of deceptive smoothing or distracting ringing.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This research explores the principles, implementation, and optimization of a competitive volume compression system based on fractal image compression. The extension of fractal image compression to volumetric data is trivial in theory. However, the simple addition of a dimension to existing fractal image compression algorithms results in infeasible compression times and noncompetitive volume compression results. This paper extends several fractal image compression enhancements to perform properly and efficiently on volumetric data, and introduces a new 3D edge classification scheme based on principal component analysis. Numerous experiments over the many parameters of fractal volume compression suggest aggressive settings of its system parameters. At this peak efficiency, fractal volume compression surpasses vector quantization and approaches within 1 dB PSNR of the discrete cosine transform. When compared to the DCT, fractal volume compression represents surfaces in volumes exceptionally well at high compression rates, and the artifacts of its compression error appear as noise instead of deceptive smoothing or distracting ringing.",
"title": "Fractal Volume Compression",
"normalizedTitle": "Fractal Volume Compression",
"fno": "v0313",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Compression",
"Fractal",
"Iterated Function System",
"Volume Visualization"
],
"authors": [],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "313-322",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "v0281",
"articleId": "13rRUyogGA1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyogGA1",
"doi": "10.1109/TVCG.1996.10004",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Editorial",
"normalizedTitle": "Editorial",
"fno": "v0281",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "281-282",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0313",
"articleId": "13rRUIJuxva",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0283",
"articleId": "13rRUwvT9gj",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwvT9gj",
"doi": "10.1109/2945.556498",
"abstract": "Abstract—We describe a new framework for efficiently computing and storing global illumination effects for complex, animated environments. The new framework allows the rapid generation of sequences representing any arbitrary path in a \"view space\" within an environment in which both the viewer and objects move. The global illumination is stored as time sequences of range-images at base locations that span the view space. We present algorithms for determining locations for these base images, and the time steps required to adequately capture the effects of object motion. We also present algorithms for computing the global illumination in the base images that exploit spatial and temporal coherence by considering direct and indirect illumination separately. We discuss an initial implementation using the new framework. Results and analysis of our implementation demonstrate the effectiveness of the individual phases of the approach; we conclude with an application of the complete framework to a complex environment that includes object motion.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We describe a new framework for efficiently computing and storing global illumination effects for complex, animated environments. The new framework allows the rapid generation of sequences representing any arbitrary path in a \"view space\" within an environment in which both the viewer and objects move. The global illumination is stored as time sequences of range-images at base locations that span the view space. We present algorithms for determining locations for these base images, and the time steps required to adequately capture the effects of object motion. We also present algorithms for computing the global illumination in the base images that exploit spatial and temporal coherence by considering direct and indirect illumination separately. We discuss an initial implementation using the new framework. Results and analysis of our implementation demonstrate the effectiveness of the individual phases of the approach; we conclude with an application of the complete framework to a complex environment that includes object motion.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We describe a new framework for efficiently computing and storing global illumination effects for complex, animated environments. The new framework allows the rapid generation of sequences representing any arbitrary path in a \"view space\" within an environment in which both the viewer and objects move. The global illumination is stored as time sequences of range-images at base locations that span the view space. We present algorithms for determining locations for these base images, and the time steps required to adequately capture the effects of object motion. We also present algorithms for computing the global illumination in the base images that exploit spatial and temporal coherence by considering direct and indirect illumination separately. We discuss an initial implementation using the new framework. Results and analysis of our implementation demonstrate the effectiveness of the individual phases of the approach; we conclude with an application of the complete framework to a complex environment that includes object motion.",
"title": "Implementation and Analysis of an Image-Based Global Illumination Framework for Animated Environments",
"normalizedTitle": "Implementation and Analysis of an Image-Based Global Illumination Framework for Animated Environments",
"fno": "v0283",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Animation",
"Global Illumination",
"Image Based Rendering",
"Radiosity",
"Ray Tracing",
"Walk Throughs"
],
"authors": [
{
"givenName": "Jeffry",
"surname": "Nimeroff",
"fullName": "Jeffry Nimeroff",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Julie",
"surname": "Dorsey",
"fullName": "Julie Dorsey",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Holly",
"surname": "Rushmeier",
"fullName": "Holly Rushmeier",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "283-298",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0281",
"articleId": "13rRUyogGA1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0299",
"articleId": "13rRUx0xPhX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPhX",
"doi": "10.1109/2945.556499",
"abstract": "Abstract—A definitive understanding of irradiance behavior in penumbral regions has been hard to come by, mainly due to the computational expense of determining the visible parts of an area light source. Consequently, sampling strategies have been mostly ad hoc, and evaluation of the resulting approximations has been difficult. In this paper, the structure of penumbral irradiance is investigated empirically and numerically. This study has been made feasible by the use of the discontinuity mesh and the backprojection, an efficient data structure representing visibility in regions of partial occlusion. Regions of penumbrae in which irradiance varies nonmonotonically are characterized empirically, and numerical tests are performed to determine the frequency of their occurrence. This study inspired the development of two algorithms for the construction of interpolating approximations to irradiance: One algorithm reduces the number of edges in the mesh defining the interpolant domain, and the other algorithm chooses among linear, quadratic, and mixed interpolants based on irradiance monotonicity. Results from numerical tests and images are presented that demonstrate good performance of the new algorithms for various realistic test configurations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—A definitive understanding of irradiance behavior in penumbral regions has been hard to come by, mainly due to the computational expense of determining the visible parts of an area light source. Consequently, sampling strategies have been mostly ad hoc, and evaluation of the resulting approximations has been difficult. In this paper, the structure of penumbral irradiance is investigated empirically and numerically. This study has been made feasible by the use of the discontinuity mesh and the backprojection, an efficient data structure representing visibility in regions of partial occlusion. Regions of penumbrae in which irradiance varies nonmonotonically are characterized empirically, and numerical tests are performed to determine the frequency of their occurrence. This study inspired the development of two algorithms for the construction of interpolating approximations to irradiance: One algorithm reduces the number of edges in the mesh defining the interpolant domain, and the other algorithm chooses among linear, quadratic, and mixed interpolants based on irradiance monotonicity. Results from numerical tests and images are presented that demonstrate good performance of the new algorithms for various realistic test configurations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—A definitive understanding of irradiance behavior in penumbral regions has been hard to come by, mainly due to the computational expense of determining the visible parts of an area light source. Consequently, sampling strategies have been mostly ad hoc, and evaluation of the resulting approximations has been difficult. In this paper, the structure of penumbral irradiance is investigated empirically and numerically. This study has been made feasible by the use of the discontinuity mesh and the backprojection, an efficient data structure representing visibility in regions of partial occlusion. Regions of penumbrae in which irradiance varies nonmonotonically are characterized empirically, and numerical tests are performed to determine the frequency of their occurrence. This study inspired the development of two algorithms for the construction of interpolating approximations to irradiance: One algorithm reduces the number of edges in the mesh defining the interpolant domain, and the other algorithm chooses among linear, quadratic, and mixed interpolants based on irradiance monotonicity. Results from numerical tests and images are presented that demonstrate good performance of the new algorithms for various realistic test configurations.",
"title": "Structured Penumbral Irradiance Computation",
"normalizedTitle": "Structured Penumbral Irradiance Computation",
"fno": "v0299",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rendering",
"Primary And Global Illumination",
"Sampling",
"Interpolation",
"Structure",
"Penumbra",
"Experimental Study",
"Irradiance",
"Radiosity",
"Discontinuity Meshing",
"Backprojection",
"Mesh Simplification",
"Interpolant Degree Reduction"
],
"authors": [
{
"givenName": "George",
"surname": "Drettakis",
"fullName": "George Drettakis",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Eugene L.",
"surname": "Fiume",
"fullName": "Eugene L. Fiume",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "299-312",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0283",
"articleId": "13rRUwvT9gj",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0323",
"articleId": "13rRUyv53Fa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyv53Fa",
"doi": "10.1109/2945.556501",
"abstract": "Abstract—We present algorithms for interactive rendering of large-scale NURBS models. The algorithms convert the NURBS surfaces to Bézier surfaces, tessellate each Bézier surface into triangles, and render them using the triangle-rendering capabilities common to current graphics systems. This paper presents algorithms for computing tight bounds on surface properties in order to generate high quality tessellation of Bézier surfaces. We introduce enhanced visibility determination techniques and present methods to make efficient use of coherence between successive frames. In addition, we also discuss issues in parallelization of these techniques. The algorithm also avoids polygonization anomalies like cracks. Our algorithms work well in practice and, on high-end graphics systems, are able to display models described using thousands of Bézier surfaces at interactive frame rates.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We present algorithms for interactive rendering of large-scale NURBS models. The algorithms convert the NURBS surfaces to Bézier surfaces, tessellate each Bézier surface into triangles, and render them using the triangle-rendering capabilities common to current graphics systems. This paper presents algorithms for computing tight bounds on surface properties in order to generate high quality tessellation of Bézier surfaces. We introduce enhanced visibility determination techniques and present methods to make efficient use of coherence between successive frames. In addition, we also discuss issues in parallelization of these techniques. The algorithm also avoids polygonization anomalies like cracks. Our algorithms work well in practice and, on high-end graphics systems, are able to display models described using thousands of Bézier surfaces at interactive frame rates.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We present algorithms for interactive rendering of large-scale NURBS models. The algorithms convert the NURBS surfaces to Bézier surfaces, tessellate each Bézier surface into triangles, and render them using the triangle-rendering capabilities common to current graphics systems. This paper presents algorithms for computing tight bounds on surface properties in order to generate high quality tessellation of Bézier surfaces. We introduce enhanced visibility determination techniques and present methods to make efficient use of coherence between successive frames. In addition, we also discuss issues in parallelization of these techniques. The algorithm also avoids polygonization anomalies like cracks. Our algorithms work well in practice and, on high-end graphics systems, are able to display models described using thousands of Bézier surfaces at interactive frame rates.",
"title": "Interactive Display of Large NURBS Models",
"normalizedTitle": "Interactive Display of Large NURBS Models",
"fno": "v0323",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"NURBS",
"Tessellation",
"Triangulation",
"Visibility",
"Interactive Display",
"CAD",
"Parallel Algorithm"
],
"authors": [
{
"givenName": "Subodh",
"surname": "Kumar",
"fullName": "Subodh Kumar",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dinesh",
"surname": "Manocha",
"fullName": "Dinesh Manocha",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Anselmo",
"surname": "Lastra",
"fullName": "Anselmo Lastra",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "323-336",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0299",
"articleId": "13rRUx0xPhX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0337",
"articleId": "13rRUyfKIHv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyfKIHv",
"doi": "10.1109/2945.556502",
"abstract": "Abstract—This paper describes an image metamorphosis technique to handle scattered feature constraints specified with points, polylines, and splines. Solutions to the following three problems are presented: feature specification, warp generation, and transition control. We demonstrate the use of snakes to reduce the burden of feature specification. Next, we propose the use of multilevel free-form deformations (MFFD) to compute C2-continuous and one-to-one mapping functions among the specified features. The resulting technique, based on B-spline approximation, is simpler and faster than previous warp generation methods. Furthermore, it produces smooth image transformations without undesirable ripples and foldovers. Finally, we simplify the MFFD algorithm to derive transition functions to control geometry and color blending. Implementation details are furnished and comparisons among various metamorphosis techniques are presented.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This paper describes an image metamorphosis technique to handle scattered feature constraints specified with points, polylines, and splines. Solutions to the following three problems are presented: feature specification, warp generation, and transition control. We demonstrate the use of snakes to reduce the burden of feature specification. Next, we propose the use of multilevel free-form deformations (MFFD) to compute C2-continuous and one-to-one mapping functions among the specified features. The resulting technique, based on B-spline approximation, is simpler and faster than previous warp generation methods. Furthermore, it produces smooth image transformations without undesirable ripples and foldovers. Finally, we simplify the MFFD algorithm to derive transition functions to control geometry and color blending. Implementation details are furnished and comparisons among various metamorphosis techniques are presented.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This paper describes an image metamorphosis technique to handle scattered feature constraints specified with points, polylines, and splines. Solutions to the following three problems are presented: feature specification, warp generation, and transition control. We demonstrate the use of snakes to reduce the burden of feature specification. Next, we propose the use of multilevel free-form deformations (MFFD) to compute C2-continuous and one-to-one mapping functions among the specified features. The resulting technique, based on B-spline approximation, is simpler and faster than previous warp generation methods. Furthermore, it produces smooth image transformations without undesirable ripples and foldovers. Finally, we simplify the MFFD algorithm to derive transition functions to control geometry and color blending. Implementation details are furnished and comparisons among various metamorphosis techniques are presented.",
"title": "Image Metamorphosis with Scattered Feature Constraints",
"normalizedTitle": "Image Metamorphosis with Scattered Feature Constraints",
"fno": "v0337",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Metamorphosis",
"Morphing",
"Snakes",
"Multilevel Free Form Deformation",
"Multilevel B Spline Interpolation"
],
"authors": [
{
"givenName": "Seungyong",
"surname": "Lee",
"fullName": "Seungyong Lee",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "George",
"surname": "Woberg",
"fullName": "George Woberg",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kyung-Yong",
"surname": "Chwa",
"fullName": "Kyung-Yong Chwa",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sung Yong",
"surname": "Shin",
"fullName": "Sung Yong Shin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "337-354",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0323",
"articleId": "13rRUyv53Fa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0355",
"articleId": "13rRUwjGoLv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjGoLv",
"doi": "10.1109/2945.556503",
"abstract": "Abstract—One of the most important ways of visualizing fluid flow is the construction of streamlines, which are lines that are everywhere tangential to the local fluid velocity. Stream surfaces are defined as surfaces through which no fluid penetrates. Streamlines can therefore be computed from the intersection of two nonparallel stream surfaces. This paper presents new algorithms for the computation of dual stream functions from Computational Fluid Dynamics data that is defined on an unstructured tetrahedral mesh. These algorithms are compared with standard numerical routines for computing streamlines, and are shown to be quicker and more accurate than techniques involving numerical integration along the streamline.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—One of the most important ways of visualizing fluid flow is the construction of streamlines, which are lines that are everywhere tangential to the local fluid velocity. Stream surfaces are defined as surfaces through which no fluid penetrates. Streamlines can therefore be computed from the intersection of two nonparallel stream surfaces. This paper presents new algorithms for the computation of dual stream functions from Computational Fluid Dynamics data that is defined on an unstructured tetrahedral mesh. These algorithms are compared with standard numerical routines for computing streamlines, and are shown to be quicker and more accurate than techniques involving numerical integration along the streamline.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—One of the most important ways of visualizing fluid flow is the construction of streamlines, which are lines that are everywhere tangential to the local fluid velocity. Stream surfaces are defined as surfaces through which no fluid penetrates. Streamlines can therefore be computed from the intersection of two nonparallel stream surfaces. This paper presents new algorithms for the computation of dual stream functions from Computational Fluid Dynamics data that is defined on an unstructured tetrahedral mesh. These algorithms are compared with standard numerical routines for computing streamlines, and are shown to be quicker and more accurate than techniques involving numerical integration along the streamline.",
"title": "Visualizing Unstructured Flow Data Using Dual Stream Functions",
"normalizedTitle": "Visualizing Unstructured Flow Data Using Dual Stream Functions",
"fno": "v0355",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "David",
"surname": "Knight",
"fullName": "David Knight",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gordon",
"surname": "Mallinson",
"fullName": "Gordon Mallinson",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "355-363",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0337",
"articleId": "13rRUyfKIHv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0364",
"articleId": "13rRUwghd4S",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNwkR5xu",
"title": "December",
"year": "1996",
"issueNum": "04",
"idPrefix": "tg",
"pubType": "journal",
"volume": "2",
"label": "December",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwghd4S",
"doi": "10.1109/2945.556504",
"abstract": "Abstract—Reconstruction is prerequisite whenever a discrete signal needs to be resampled as a result of transformation such as texture mapping, image manipulation, volume slicing, and rendering. We present a new method for the characterization and measurement of reconstruction error in spatial domain. Our method uses the Classical Shannon's Sampling Theorem as a basis to develop error bounds. We use this formulation to provide, for the first time, an efficient way to guarantee an error bound at every point by varying the size of the reconstruction filter. We go further to support position-adaptive reconstruction and data-adaptive reconstruction which adjust filter size to the location of reconstruction point and to the data values in its vicinity. We demonstrate the effectiveness of our methods with 1D signals, 2D signals (images), and 3D signals (volumes).",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Reconstruction is prerequisite whenever a discrete signal needs to be resampled as a result of transformation such as texture mapping, image manipulation, volume slicing, and rendering. We present a new method for the characterization and measurement of reconstruction error in spatial domain. Our method uses the Classical Shannon's Sampling Theorem as a basis to develop error bounds. We use this formulation to provide, for the first time, an efficient way to guarantee an error bound at every point by varying the size of the reconstruction filter. We go further to support position-adaptive reconstruction and data-adaptive reconstruction which adjust filter size to the location of reconstruction point and to the data values in its vicinity. We demonstrate the effectiveness of our methods with 1D signals, 2D signals (images), and 3D signals (volumes).",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Reconstruction is prerequisite whenever a discrete signal needs to be resampled as a result of transformation such as texture mapping, image manipulation, volume slicing, and rendering. We present a new method for the characterization and measurement of reconstruction error in spatial domain. Our method uses the Classical Shannon's Sampling Theorem as a basis to develop error bounds. We use this formulation to provide, for the first time, an efficient way to guarantee an error bound at every point by varying the size of the reconstruction filter. We go further to support position-adaptive reconstruction and data-adaptive reconstruction which adjust filter size to the location of reconstruction point and to the data values in its vicinity. We demonstrate the effectiveness of our methods with 1D signals, 2D signals (images), and 3D signals (volumes).",
"title": "Reconstruction Error Characterization and Control: A Sampling Theory Approach",
"normalizedTitle": "Reconstruction Error Characterization and Control: A Sampling Theory Approach",
"fno": "v0364",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Raghu",
"surname": "Machiraju",
"fullName": "Raghu Machiraju",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Roni",
"surname": "Yagel",
"fullName": "Roni Yagel",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "04",
"pubDate": "1996-10-01 00:00:00",
"pubType": "trans",
"pages": "364-378",
"year": "1996",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0355",
"articleId": "13rRUwjGoLv",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwhHcQJ",
"doi": "10.1109/TVCG.1995.10001",
"abstract": null,
"abstracts": [
{
"abstractType": "Regular",
"content": "",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": null,
"title": "Visualization Takes its Place in the Scientific Community",
"normalizedTitle": "Visualization Takes its Place in the Scientific Community",
"fno": "v0097",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Gregory M.",
"surname": "Nielson",
"fullName": "Gregory M. Nielson",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "97-98",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "v0099",
"articleId": "13rRUwghd8Y",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwghd8Y",
"doi": "10.1109/2945.468400",
"abstract": "Abstract—This tutorial survey paper reviews several different models for light interaction with volume densities of absorbing, glowing, reflecting, and/or scattering material. They are, in order of increasing realism, absorption only, emission only, emission and absorption combined, single scattering of external illumination without shadows, single scattering with shadows, and multiple scattering. For each model I give the physical assumptions, describe the applications for which it is appropriate, derive the differential or integral equations for light transport, present calculation methods for solving them, and show output images for a data set representing a cloud. Special attention is given to calculation methods for the multiple scattering model.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This tutorial survey paper reviews several different models for light interaction with volume densities of absorbing, glowing, reflecting, and/or scattering material. They are, in order of increasing realism, absorption only, emission only, emission and absorption combined, single scattering of external illumination without shadows, single scattering with shadows, and multiple scattering. For each model I give the physical assumptions, describe the applications for which it is appropriate, derive the differential or integral equations for light transport, present calculation methods for solving them, and show output images for a data set representing a cloud. Special attention is given to calculation methods for the multiple scattering model.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This tutorial survey paper reviews several different models for light interaction with volume densities of absorbing, glowing, reflecting, and/or scattering material. They are, in order of increasing realism, absorption only, emission only, emission and absorption combined, single scattering of external illumination without shadows, single scattering with shadows, and multiple scattering. For each model I give the physical assumptions, describe the applications for which it is appropriate, derive the differential or integral equations for light transport, present calculation methods for solving them, and show output images for a data set representing a cloud. Special attention is given to calculation methods for the multiple scattering model.",
"title": "Optical Models for Direct Volume Rendering",
"normalizedTitle": "Optical Models for Direct Volume Rendering",
"fno": "v0099",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Optical Models",
"Multiple Scattering",
"Extinction",
"Volume Shadows",
"Volume Rendering",
"Emission",
"Volume Shading",
"Participating Media",
"Discrete Ordinates Method",
"Compositing"
],
"authors": [
{
"givenName": "Nelson",
"surname": "Max",
"fullName": "Nelson Max",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "99-108",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0097",
"articleId": "13rRUwhHcQJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0109",
"articleId": "13rRUx0xPIp",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPIp",
"doi": "10.1109/2945.468408",
"abstract": "Abstract—This article presents a method for decomposing volume data into 3D DoG (difference of Gaussians) functions by using the frame theory [1] of nonorthogonal wavelets. Since we can think of a DoG function as a pair of Gaussian functions, we can consider this method an automatic generation of Blinn’s blobby objects [2]. We can also use this representation method for data compression by neglecting the insignificant coefficients, since the wavelet coefficients have significant values only where the volume density changes. Further, since the DoG function closely approximates a ∇2G (Laplacian of Gaussian) function, the representation can be considered a hierarchy of the 3D edges on different resolution spaces. Using the spherically symmetric feature of the 3D DoG function, we can easily visualize the 3D edge structure by the density reprojection method [3], [4]. We will apply our representation method to medical CT volume data and show its efficiency in describing the spatial structure of the volume.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This article presents a method for decomposing volume data into 3D DoG (difference of Gaussians) functions by using the frame theory [1] of nonorthogonal wavelets. Since we can think of a DoG function as a pair of Gaussian functions, we can consider this method an automatic generation of Blinn’s blobby objects [2]. We can also use this representation method for data compression by neglecting the insignificant coefficients, since the wavelet coefficients have significant values only where the volume density changes. Further, since the DoG function closely approximates a ∇2G (Laplacian of Gaussian) function, the representation can be considered a hierarchy of the 3D edges on different resolution spaces. Using the spherically symmetric feature of the 3D DoG function, we can easily visualize the 3D edge structure by the density reprojection method [3], [4]. We will apply our representation method to medical CT volume data and show its efficiency in describing the spatial structure of the volume.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This article presents a method for decomposing volume data into 3D DoG (difference of Gaussians) functions by using the frame theory [1] of nonorthogonal wavelets. Since we can think of a DoG function as a pair of Gaussian functions, we can consider this method an automatic generation of Blinn’s blobby objects [2]. We can also use this representation method for data compression by neglecting the insignificant coefficients, since the wavelet coefficients have significant values only where the volume density changes. Further, since the DoG function closely approximates a ∇2G (Laplacian of Gaussian) function, the representation can be considered a hierarchy of the 3D edges on different resolution spaces. Using the spherically symmetric feature of the 3D DoG function, we can easily visualize the 3D edge structure by the density reprojection method [3], [4]. We will apply our representation method to medical CT volume data and show its efficiency in describing the spatial structure of the volume.",
"title": "Multiscale Volume Representation by a DoG Wavelet",
"normalizedTitle": "Multiscale Volume Representation by a DoG Wavelet",
"fno": "v0109",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Wavelet",
"Do G Function",
"Meta Ball",
"Blobby Object"
],
"authors": [
{
"givenName": "Shigeru",
"surname": "Muraki",
"fullName": "Shigeru Muraki",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "109-116",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0099",
"articleId": "13rRUwghd8Y",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0117",
"articleId": "13rRUNvgyW6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgyW6",
"doi": "10.1109/2945.468407",
"abstract": "Abstract—This paper advocates the use of a group of renderers rather than any specific rendering method. We describe a bundle containing four alternative approaches to visualizing volume data. One new approach uses realistic volumetric gas rendering techniques to produce photo-realistic images and animations. The second uses ray casting that is based on a simpler illumination model and is mainly centered around a versatile new tool for the design of transfer functions. The third method employs a simple illumination model and rapid rendering mechanisms to provide efficient preview capabilities. The last one reduces data magnitude by displaying the most visible components and exploits rendering hardware to provide real time browsing capabilities. We show that each rendering tool provides a unique service and demonstrate the combined utility of our group of volume renderers in computational fluid dynamic (CFD) visualization. While one tool allows the explorer to render rapidly for navigation through the data, another tool allows one to emphasize data features (e.g., shock waves), and yet another tool allows one to realistically render the data. We believe that only through the deployment of groups of renderers will the scientist be well served and equipped to form numerous perspectives of the same dataset, each providing different insights into the data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This paper advocates the use of a group of renderers rather than any specific rendering method. We describe a bundle containing four alternative approaches to visualizing volume data. One new approach uses realistic volumetric gas rendering techniques to produce photo-realistic images and animations. The second uses ray casting that is based on a simpler illumination model and is mainly centered around a versatile new tool for the design of transfer functions. The third method employs a simple illumination model and rapid rendering mechanisms to provide efficient preview capabilities. The last one reduces data magnitude by displaying the most visible components and exploits rendering hardware to provide real time browsing capabilities. We show that each rendering tool provides a unique service and demonstrate the combined utility of our group of volume renderers in computational fluid dynamic (CFD) visualization. While one tool allows the explorer to render rapidly for navigation through the data, another tool allows one to emphasize data features (e.g., shock waves), and yet another tool allows one to realistically render the data. We believe that only through the deployment of groups of renderers will the scientist be well served and equipped to form numerous perspectives of the same dataset, each providing different insights into the data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This paper advocates the use of a group of renderers rather than any specific rendering method. We describe a bundle containing four alternative approaches to visualizing volume data. One new approach uses realistic volumetric gas rendering techniques to produce photo-realistic images and animations. The second uses ray casting that is based on a simpler illumination model and is mainly centered around a versatile new tool for the design of transfer functions. The third method employs a simple illumination model and rapid rendering mechanisms to provide efficient preview capabilities. The last one reduces data magnitude by displaying the most visible components and exploits rendering hardware to provide real time browsing capabilities. We show that each rendering tool provides a unique service and demonstrate the combined utility of our group of volume renderers in computational fluid dynamic (CFD) visualization. While one tool allows the explorer to render rapidly for navigation through the data, another tool allows one to emphasize data features (e.g., shock waves), and yet another tool allows one to realistically render the data. We believe that only through the deployment of groups of renderers will the scientist be well served and equipped to form numerous perspectives of the same dataset, each providing different insights into the data.",
"title": "Grouping Volume Renderers for Enhanced Visualization in Computational Fluid Dynamics",
"normalizedTitle": "Grouping Volume Renderers for Enhanced Visualization in Computational Fluid Dynamics",
"fno": "v0117",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Volume Rendering",
"Scientific Visualization",
"Computational Field Dynamics",
"Gaseous Rendering",
"Splatting",
"Template Rendering"
],
"authors": [
{
"givenName": "Roni",
"surname": "Yagel",
"fullName": "Roni Yagel",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David S.",
"surname": "Ebert",
"fullName": "David S. Ebert",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "James N.",
"surname": "Scott",
"fullName": "James N. Scott",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Yair",
"surname": "Kurzion",
"fullName": "Yair Kurzion",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "117-132",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0109",
"articleId": "13rRUx0xPIp",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0133",
"articleId": "13rRUxYrbUr",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYrbUr",
"doi": "10.1109/2945.468406",
"abstract": "Abstract—Line Integral Convolution (LIC), introduced by Cabral and Leedom in SIGGRAPH ’93, is a powerful technique for imaging and animating vector fields. We extend the LIC technique in three ways:1. The existing algorithm is limited to vector fields over a regular Cartesian grid. We extend the algorithm and the animation techniques possible with it to vector fields over curvilinear surfaces, such as those found in computational fluid dynamics simulations.2. We introduce a technique to visualize vector magnitude as well as vector direction, i.e., variable-speed flow animation.3. We show how to modify LIC to visualize unsteady (time dependent) flows.Our implementation utilizes texture-mapping hardware to run in real time, which allows our algorithms to be included in interactive applications.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Line Integral Convolution (LIC), introduced by Cabral and Leedom in SIGGRAPH ’93, is a powerful technique for imaging and animating vector fields. We extend the LIC technique in three ways:1. The existing algorithm is limited to vector fields over a regular Cartesian grid. We extend the algorithm and the animation techniques possible with it to vector fields over curvilinear surfaces, such as those found in computational fluid dynamics simulations.2. We introduce a technique to visualize vector magnitude as well as vector direction, i.e., variable-speed flow animation.3. We show how to modify LIC to visualize unsteady (time dependent) flows.Our implementation utilizes texture-mapping hardware to run in real time, which allows our algorithms to be included in interactive applications.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Line Integral Convolution (LIC), introduced by Cabral and Leedom in SIGGRAPH ’93, is a powerful technique for imaging and animating vector fields. We extend the LIC technique in three ways:1. The existing algorithm is limited to vector fields over a regular Cartesian grid. We extend the algorithm and the animation techniques possible with it to vector fields over curvilinear surfaces, such as those found in computational fluid dynamics simulations.2. We introduce a technique to visualize vector magnitude as well as vector direction, i.e., variable-speed flow animation.3. We show how to modify LIC to visualize unsteady (time dependent) flows.Our implementation utilizes texture-mapping hardware to run in real time, which allows our algorithms to be included in interactive applications.",
"title": "Using Line Integral Convolution for Flow Visualization: Curvilinear Grids, Variable-Speed Animation, and Unsteady Flows",
"normalizedTitle": "Using Line Integral Convolution for Flow Visualization: Curvilinear Grids, Variable-Speed Animation, and Unsteady Flows",
"fno": "v0133",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Flow Visualization",
"Vector Field Visualization",
"Line Integral Convolution",
"Curvilinear Grids",
"Flow Animation",
"Unsteady Flows"
],
"authors": [
{
"givenName": "Lisa K.",
"surname": "Forssell",
"fullName": "Lisa K. Forssell",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Scott D.",
"surname": "Cohen",
"fullName": "Scott D. Cohen",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "133-141",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0117",
"articleId": "13rRUNvgyW6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0142",
"articleId": "13rRUxD9gXv",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxD9gXv",
"doi": "10.1109/2945.468405",
"abstract": "Abstract—In computational fluid dynamics, visualization is a frequently used tool for data evaluation, understanding of flow characteristics, and qualitative comparison to flow visualizations originating from experiments. Building on an existing visualization software system that allows for a careful selection of state-of-the-art visualization techniques and some extensions, it became possible to present various features of the data in a single image. The visualization shows vortex position and rotation as well as skin-friction lines, experimental oil-flow traces, shock-wave positions, and time surfaces. Animation provides natural perception of flow in combination with abstract representation of phenomena. By adding experimental flow visualization, a comparison between numerical simulation and wind-tunnel flow becomes possible up to a high level of detail. Since some of the underlying algorithms are not yet described in detail in the visualization literature, some experiences gained from the implementation are illustrated. The dedicated techniques which are illustrated here address specific properties of vector quantities in the flow field, such as the velocity vector or the friction vector. Image complexity is reduced by employing complex visualization methods. Thus, the room is created which is necessary to study the interaction of various phenomena.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—In computational fluid dynamics, visualization is a frequently used tool for data evaluation, understanding of flow characteristics, and qualitative comparison to flow visualizations originating from experiments. Building on an existing visualization software system that allows for a careful selection of state-of-the-art visualization techniques and some extensions, it became possible to present various features of the data in a single image. The visualization shows vortex position and rotation as well as skin-friction lines, experimental oil-flow traces, shock-wave positions, and time surfaces. Animation provides natural perception of flow in combination with abstract representation of phenomena. By adding experimental flow visualization, a comparison between numerical simulation and wind-tunnel flow becomes possible up to a high level of detail. Since some of the underlying algorithms are not yet described in detail in the visualization literature, some experiences gained from the implementation are illustrated. The dedicated techniques which are illustrated here address specific properties of vector quantities in the flow field, such as the velocity vector or the friction vector. Image complexity is reduced by employing complex visualization methods. Thus, the room is created which is necessary to study the interaction of various phenomena.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—In computational fluid dynamics, visualization is a frequently used tool for data evaluation, understanding of flow characteristics, and qualitative comparison to flow visualizations originating from experiments. Building on an existing visualization software system that allows for a careful selection of state-of-the-art visualization techniques and some extensions, it became possible to present various features of the data in a single image. The visualization shows vortex position and rotation as well as skin-friction lines, experimental oil-flow traces, shock-wave positions, and time surfaces. Animation provides natural perception of flow in combination with abstract representation of phenomena. By adding experimental flow visualization, a comparison between numerical simulation and wind-tunnel flow becomes possible up to a high level of detail. Since some of the underlying algorithms are not yet described in detail in the visualization literature, some experiences gained from the implementation are illustrated. The dedicated techniques which are illustrated here address specific properties of vector quantities in the flow field, such as the velocity vector or the friction vector. Image complexity is reduced by employing complex visualization methods. Thus, the room is created which is necessary to study the interaction of various phenomena.",
"title": "Competent, Compact, Comparative Visualization of a Vortical Flow Field",
"normalizedTitle": "Competent, Compact, Comparative Visualization of a Vortical Flow Field",
"fno": "v0142",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Flow Visualization",
"Vector Field Visualization",
"Comparative Visualization"
],
"authors": [
{
"givenName": "Hans-Georg",
"surname": "Pagendarm",
"fullName": "Hans-Georg Pagendarm",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Birgit",
"surname": "Walter",
"fullName": "Birgit Walter",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "142-150",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0133",
"articleId": "13rRUxYrbUr",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0151",
"articleId": "13rRUwjGoFO",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjGoFO",
"doi": "10.1109/2945.468404",
"abstract": "Abstract—We present a method for visualizing unsteady flow by displaying its vortices. The vortices are identified by using a vorticity-predictor pressure-corrector scheme that follows vortex cores. The cross-sections of a vortex at each point along the core can be represented by a Fourier series. A vortex can be faithfully reconstructed from the series as a simple quadrilateral mesh, or its reconstruction can be enhanced to indicate helical motion. The mesh can reduce the representation of the flow features by a factor of one thousand or more compared with the volumetric dataset. With this amount of reduction it is possible to implement an interactive system on a graphics workstation to permit a viewer to examine, in three dimensions, the evolution of the vortical structures in a complex, unsteady flow.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—We present a method for visualizing unsteady flow by displaying its vortices. The vortices are identified by using a vorticity-predictor pressure-corrector scheme that follows vortex cores. The cross-sections of a vortex at each point along the core can be represented by a Fourier series. A vortex can be faithfully reconstructed from the series as a simple quadrilateral mesh, or its reconstruction can be enhanced to indicate helical motion. The mesh can reduce the representation of the flow features by a factor of one thousand or more compared with the volumetric dataset. With this amount of reduction it is possible to implement an interactive system on a graphics workstation to permit a viewer to examine, in three dimensions, the evolution of the vortical structures in a complex, unsteady flow.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—We present a method for visualizing unsteady flow by displaying its vortices. The vortices are identified by using a vorticity-predictor pressure-corrector scheme that follows vortex cores. The cross-sections of a vortex at each point along the core can be represented by a Fourier series. A vortex can be faithfully reconstructed from the series as a simple quadrilateral mesh, or its reconstruction can be enhanced to indicate helical motion. The mesh can reduce the representation of the flow features by a factor of one thousand or more compared with the volumetric dataset. With this amount of reduction it is possible to implement an interactive system on a graphics workstation to permit a viewer to examine, in three dimensions, the evolution of the vortical structures in a complex, unsteady flow.",
"title": "A Predictor-Corrector Technique for Visualizing Unsteady Flow",
"normalizedTitle": "A Predictor-Corrector Technique for Visualizing Unsteady Flow",
"fno": "v0151",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Vortex Identification",
"Vortex Visualization",
"Numerical Flow Visualization",
"Numerical Flow Animation",
"Vortex Core",
"Data Reduction",
"Feature Extraction"
],
"authors": [
{
"givenName": "David C.",
"surname": "Banks",
"fullName": "David C. Banks",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bart A.",
"surname": "Singer",
"fullName": "Bart A. Singer",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "151-163",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0142",
"articleId": "13rRUxD9gXv",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0164",
"articleId": "13rRUwIF697",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwIF697",
"doi": "10.1109/2945.468403",
"abstract": "Abstract—Curves in space are difficult to perceive and analyze, especially when they form dense sets as in typical 3D flow and volume deformation applications. We propose a technique that exposes essential properties of space curves by attaching an appropriate moving coordinate frame to each point, reexpressing that moving frame as a unit quaternion, and supporting interaction with the resulting quaternion field. The original curves in three-space are associated with piecewise continuous four-vector quaternion fields, which map into new curves lying in the unit three-sphere in four-space. Since four-space clusters of curves with similar moving frames occur independently of the curves’ original proximity in three-space, a powerful analysis tool results. We treat two separate moving-frame formalisms, the Frenet frame and the parallel-transport frame, and compare their properties. We describe several flexible approaches for interacting with and exploiting the properties of the four-dimensional quaternion fields.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—Curves in space are difficult to perceive and analyze, especially when they form dense sets as in typical 3D flow and volume deformation applications. We propose a technique that exposes essential properties of space curves by attaching an appropriate moving coordinate frame to each point, reexpressing that moving frame as a unit quaternion, and supporting interaction with the resulting quaternion field. The original curves in three-space are associated with piecewise continuous four-vector quaternion fields, which map into new curves lying in the unit three-sphere in four-space. Since four-space clusters of curves with similar moving frames occur independently of the curves’ original proximity in three-space, a powerful analysis tool results. We treat two separate moving-frame formalisms, the Frenet frame and the parallel-transport frame, and compare their properties. We describe several flexible approaches for interacting with and exploiting the properties of the four-dimensional quaternion fields.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—Curves in space are difficult to perceive and analyze, especially when they form dense sets as in typical 3D flow and volume deformation applications. We propose a technique that exposes essential properties of space curves by attaching an appropriate moving coordinate frame to each point, reexpressing that moving frame as a unit quaternion, and supporting interaction with the resulting quaternion field. The original curves in three-space are associated with piecewise continuous four-vector quaternion fields, which map into new curves lying in the unit three-sphere in four-space. Since four-space clusters of curves with similar moving frames occur independently of the curves’ original proximity in three-space, a powerful analysis tool results. We treat two separate moving-frame formalisms, the Frenet frame and the parallel-transport frame, and compare their properties. We describe several flexible approaches for interacting with and exploiting the properties of the four-dimensional quaternion fields.",
"title": "Quaternion Frame Approach to Streamline Visualization",
"normalizedTitle": "Quaternion Frame Approach to Streamline Visualization",
"fno": "v0164",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Quaternion",
"Frenet Frame",
"Orientation Frame"
],
"authors": [
{
"givenName": "Andrew J.",
"surname": "Hanson",
"fullName": "Andrew J. Hanson",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanson Hui",
"surname": "Ma",
"fullName": "Hanson Hui Ma",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "164-174",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0151",
"articleId": "13rRUwjGoFO",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0175",
"articleId": "13rRUwghd4Q",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwghd4Q",
"doi": "10.1109/2945.468402",
"abstract": "Abstract—This paper describes Obliq-3D, a high-level, fast-turnaround system for building 3D animations. Obliq-3D consists of an interpreted language that is embedded into a 3D animation library. This library is based on a few simple, yet powerful constructs that allow programmers to describe three-dimensional scenes and animations of such scenes. By virtue of its interpretive nature, Obliq-3D provides a fast-turnaround environment. The combination of simplicity and fast turnaround allows programmers to construct nontrivial animations quickly and easily.The paper is divided into three major parts. The first part introduces the basic concepts of Obliq-3D, using a series of graduated examples. The second part shows how the system can be used to implement Cone Trees. The third part develops a complete animation of Dijkstra’s shortest-path algorithm.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This paper describes Obliq-3D, a high-level, fast-turnaround system for building 3D animations. Obliq-3D consists of an interpreted language that is embedded into a 3D animation library. This library is based on a few simple, yet powerful constructs that allow programmers to describe three-dimensional scenes and animations of such scenes. By virtue of its interpretive nature, Obliq-3D provides a fast-turnaround environment. The combination of simplicity and fast turnaround allows programmers to construct nontrivial animations quickly and easily.The paper is divided into three major parts. The first part introduces the basic concepts of Obliq-3D, using a series of graduated examples. The second part shows how the system can be used to implement Cone Trees. The third part develops a complete animation of Dijkstra’s shortest-path algorithm.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This paper describes Obliq-3D, a high-level, fast-turnaround system for building 3D animations. Obliq-3D consists of an interpreted language that is embedded into a 3D animation library. This library is based on a few simple, yet powerful constructs that allow programmers to describe three-dimensional scenes and animations of such scenes. By virtue of its interpretive nature, Obliq-3D provides a fast-turnaround environment. The combination of simplicity and fast turnaround allows programmers to construct nontrivial animations quickly and easily.The paper is divided into three major parts. The first part introduces the basic concepts of Obliq-3D, using a series of graduated examples. The second part shows how the system can be used to implement Cone Trees. The third part develops a complete animation of Dijkstra’s shortest-path algorithm.",
"title": "Obliq-3D: A High-Level, Fast-Turnaround 3D Animation System",
"normalizedTitle": "Obliq-3D: A High-Level, Fast-Turnaround 3D Animation System",
"fno": "v0175",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"3 D Graphics 3 D Animation Information Visualization Algorithm Animation Interpreted Language Embedded Language Scripting Language"
],
"authors": [
{
"givenName": "Marc A.",
"surname": "Najork",
"fullName": "Marc A. Najork",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc H.",
"surname": "Brown",
"fullName": "Marc H. Brown",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "175-193",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0164",
"articleId": "13rRUwIF697",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "v0194",
"articleId": "13rRUxDIth2",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNvEyRcK",
"title": "June",
"year": "1995",
"issueNum": "02",
"idPrefix": "tg",
"pubType": "journal",
"volume": "1",
"label": "June",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxDIth2",
"doi": "10.1109/2945.468401",
"abstract": "Abstract—This paper investigates the visualization of geometric algorithms. We discuss how limiting the domain makes it possible to create a system that enables others to use it easily. Knowledge about the domain can be very helpful in building a system which automates large parts of the user’s task. A system can be designed to isolate the user from any concern about how graphics is done. The application need only specify “what” happens and need not be concerned with “how” to make it happen on the screen. We develop a conceptual model and a framework for experimenting with it. We also present a system, GASP, which implements this model. GASP allows quick generation of three-dimensional geometric algorithm visualizations, even for highly complex algorithms. It also provides a visual debugging facility for geometric computing. We show the utility of GASP by presenting a variety of examples.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Abstract—This paper investigates the visualization of geometric algorithms. We discuss how limiting the domain makes it possible to create a system that enables others to use it easily. Knowledge about the domain can be very helpful in building a system which automates large parts of the user’s task. A system can be designed to isolate the user from any concern about how graphics is done. The application need only specify “what” happens and need not be concerned with “how” to make it happen on the screen. We develop a conceptual model and a framework for experimenting with it. We also present a system, GASP, which implements this model. GASP allows quick generation of three-dimensional geometric algorithm visualizations, even for highly complex algorithms. It also provides a visual debugging facility for geometric computing. We show the utility of GASP by presenting a variety of examples.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Abstract—This paper investigates the visualization of geometric algorithms. We discuss how limiting the domain makes it possible to create a system that enables others to use it easily. Knowledge about the domain can be very helpful in building a system which automates large parts of the user’s task. A system can be designed to isolate the user from any concern about how graphics is done. The application need only specify “what” happens and need not be concerned with “how” to make it happen on the screen. We develop a conceptual model and a framework for experimenting with it. We also present a system, GASP, which implements this model. GASP allows quick generation of three-dimensional geometric algorithm visualizations, even for highly complex algorithms. It also provides a visual debugging facility for geometric computing. We show the utility of GASP by presenting a variety of examples.",
"title": "Visualization of Geometric Algorithms",
"normalizedTitle": "Visualization of Geometric Algorithms",
"fno": "v0194",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Algorithm Animation",
"Computational Geometry",
"Three Dimensional Geometric Algorithms"
],
"authors": [
{
"givenName": "Ayellet",
"surname": "Tal",
"fullName": "Ayellet Tal",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "David",
"surname": "Dobkin",
"fullName": "David Dobkin",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": false,
"isOpenAccess": false,
"issueNum": "02",
"pubDate": "1995-04-01 00:00:00",
"pubType": "trans",
"pages": "194-204",
"year": "1995",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "v0175",
"articleId": "13rRUwghd4Q",
"__typename": "AdjacentArticleType"
},
"next": null,
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwcAqqg",
"doi": "10.1109/TVCG.2013.199",
"abstract": null,
"abstracts": [],
"normalizedAbstract": null,
"title": "Table of Contents",
"normalizedTitle": "Table of Contents",
"fno": "ttg2013120iii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "iii-ix",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": null,
"next": {
"fno": "ttg201312000x",
"articleId": "13rRUxOve9J",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxOve9J",
"doi": "10.1109/TVCG.2013.176",
"abstract": null,
"abstracts": [],
"normalizedAbstract": null,
"title": "Message fron the Editor-in-Chief",
"normalizedTitle": "Message fron the Editor-in-Chief",
"fno": "ttg201312000x",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "x-x",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2016/01/07307929",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07307929/13rRUIIVlkl",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/1986/01/01663026",
"title": "Message: From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/magazine/co/1986/01/01663026/13rRUILLkGt",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg20121200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg20121200ix/13rRUwIF69i",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2016/01/07423841",
"title": "Message From the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2016/01/07423841/13rRUwgQpvG",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg20111200ix",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg20111200ix/13rRUwjoNx0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2015/04/07064831",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2015/04/07064831/13rRUxBa5no",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2017/01/07870827",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2017/01/07870827/13rRUxE04mk",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08165928",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08165928/13rRUxly8T3",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2010/06/ttg201006000x",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/tg/2010/06/ttg201006000x/13rRUytF41v",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ec/2020/04/09280500",
"title": "Message from the Editor-in-Chief",
"doi": null,
"abstractUrl": "/journal/ec/2020/04/09280500/1pg8LOZjN28",
"parentPublication": {
"id": "trans/ec",
"title": "IEEE Transactions on Emerging Topics in Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013120iii",
"articleId": "13rRUwcAqqg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg20131200xi",
"articleId": "13rRUNvyaf0",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvyaf0",
"doi": "10.1109/TVCG.2013.175",
"abstract": "Presents the opening speaches and editorials from guest editors from the conference proceedings.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Presents the opening speaches and editorials from guest editors from the conference proceedings.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Presents the opening speaches and editorials from guest editors from the conference proceedings.",
"title": "Message from the Paper Chairs and Guest Editors",
"normalizedTitle": "Message from the Paper Chairs and Guest Editors",
"fno": "ttg20131200xi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Special Issues And Sections",
"Meetings",
"Computer Graphics",
"Information Technology",
"Data Visualization",
"Special Issues And Sections",
"Meetings",
"Computer Graphics",
"Information Technology"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xi-xiv",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "mags/cs/2016/02/mcs2016020006",
"title": "Guest Editors' introduction",
"doi": null,
"abstractUrl": "/magazine/cs/2016/02/mcs2016020006/13rRUEgs2Q2",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/04/ttg2013040000vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2013/04/ttg2013040000vi/13rRUILtJma",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/04/ttg2014040vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/04/ttg2014040vi/13rRUwI5Ug9",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06935059",
"title": "Message from the VIS Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06935059/13rRUxBa564",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212000x",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212000x/13rRUxYIN49",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/ex/2013/04/mex2013040005",
"title": "Technological Challenges in Emergency Response [Guest editors' introduction]",
"doi": null,
"abstractUrl": "/magazine/ex/2013/04/mex2013040005/13rRUxbCbn4",
"parentPublication": {
"id": "mags/ex",
"title": "IEEE Intelligent Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/04/ttg20120400vi",
"title": "Message from the Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2012/04/ttg20120400vi/13rRUxly9dS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/05/09754286",
"title": "IEEE VR 2022 Message from the Journal Paper Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2022/05/09754286/1Cpd7Bwusk8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/11/08855105",
"title": "Message from the ISMAR 2019 Science and Technology Program Chairs and <italic>TVCG</italic> Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2019/11/08855105/1dNHma690d2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/05/09405530",
"title": "Message from the Program Chairs and Guest Editors",
"doi": null,
"abstractUrl": "/journal/tg/2021/05/09405530/1sP1eDRuGMU",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201312000x",
"articleId": "13rRUxOve9J",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg20131200xv",
"articleId": "13rRUwgQpDu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwgQpDu",
"doi": "10.1109/TVCG.2013.165",
"abstract": "Provides a listing of current committee members and society officers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Provides a listing of current committee members and society officers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Provides a listing of current committee members and society officers.",
"title": "IEEE Visualization and Graphics Technical Committee (VGTC)",
"normalizedTitle": "IEEE Visualization and Graphics Technical Committee (VGTC)",
"fno": "ttg20131200xv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xv-xv",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg20131200xi",
"articleId": "13rRUNvyaf0",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013120xvi",
"articleId": "13rRUxly8SW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxly8SW",
"doi": "10.1109/TVCG.2013.218",
"abstract": "Provides a listing of current committee members and society officers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Provides a listing of current committee members and society officers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Provides a listing of current committee members and society officers.",
"title": "VIS Conference Committee",
"normalizedTitle": "VIS Conference Committee",
"fno": "ttg2013120xvi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xvi-xvi",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg20131200xv",
"articleId": "13rRUwgQpDu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201312xvii",
"articleId": "13rRUxBa5nn",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5nn",
"doi": "10.1109/TVCG.2013.171",
"abstract": "Provides a listing of current committee members and society officers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Provides a listing of current committee members and society officers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Provides a listing of current committee members and society officers.",
"title": "International Program Committees",
"normalizedTitle": "International Program Committees",
"fno": "ttg201312xvii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xvii-xviii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013120xvi",
"articleId": "13rRUxly8SW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013120xix",
"articleId": "13rRUwjXZSc",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjXZSc",
"doi": "10.1109/TVCG.2013.195",
"abstract": "Provides a listing of current committee members and society officers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Provides a listing of current committee members and society officers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Provides a listing of current committee members and society officers.",
"title": "Steering Committees",
"normalizedTitle": "Steering Committees",
"fno": "ttg2013120xix",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xix-xix",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg201312xvii",
"articleId": "13rRUxBa5nn",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg20131200xx",
"articleId": "13rRUygT7fd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygT7fd",
"doi": "10.1109/TVCG.2013.185",
"abstract": "The publication offers a note of thanks and lists its reviewers.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The publication offers a note of thanks and lists its reviewers.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The publication offers a note of thanks and lists its reviewers.",
"title": "Reviewers",
"normalizedTitle": "Reviewers",
"fno": "ttg20131200xx",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xx-xxii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013120xix",
"articleId": "13rRUwjXZSc",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201312xxiii",
"articleId": "13rRUwInvB5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvB5",
"doi": "10.1109/TVCG.2013.201",
"abstract": "The 2012 VGTC Visualization Career Award was presented to Ben Shneiderman.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The 2012 VGTC Visualization Career Award was presented to Ben Shneiderman.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The 2012 VGTC Visualization Career Award was presented to Ben Shneiderman.",
"title": "The 2012 VGTC Visualization Career Award:Ben Shneiderman",
"normalizedTitle": "The 2012 VGTC Visualization Career Award:Ben Shneiderman",
"fno": "ttg201312xxiii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xxiii-xxiii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg20131200xx",
"articleId": "13rRUygT7fd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201312xxiv",
"articleId": "13rRUyeCkag",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyeCkag",
"doi": "10.1109/TVCG.2013.202",
"abstract": "The 2012 VGTC Visualization Technical Achievement Award was presented to John Stasko.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The 2012 VGTC Visualization Technical Achievement Award was presented to John Stasko.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The 2012 VGTC Visualization Technical Achievement Award was presented to John Stasko.",
"title": "The 2012 VGTC Visualization Technical Achievement Award:John Stasko",
"normalizedTitle": "The 2012 VGTC Visualization Technical Achievement Award:John Stasko",
"fno": "ttg201312xxiv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xxiv-xxiv",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg201312xxiii",
"articleId": "13rRUwInvB5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013120xxv",
"articleId": "13rRUwIF6l6",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwIF6l6",
"doi": "10.1109/TVCG.2013.203",
"abstract": "The 2013 VGTC Visualization Career Award was presented to Gregory M. Nielson.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The 2013 VGTC Visualization Career Award was presented to Gregory M. Nielson.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The 2013 VGTC Visualization Career Award was presented to Gregory M. Nielson.",
"title": "The 2013 VGTC Visualization Career Award:Gregory M. Nielson",
"normalizedTitle": "The 2013 VGTC Visualization Career Award:Gregory M. Nielson",
"fno": "ttg2013120xxv",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xxv-xxv",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg201312xxiv",
"articleId": "13rRUyeCkag",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201312xxvi",
"articleId": "13rRUwbs2gu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwbs2gu",
"doi": "10.1109/TVCG.2013.204",
"abstract": "The 2013 VGTC Visualization Technical Achievement Award was presented to Kwan-Liu Ma.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The 2013 VGTC Visualization Technical Achievement Award was presented to Kwan-Liu Ma.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The 2013 VGTC Visualization Technical Achievement Award was presented to Kwan-Liu Ma.",
"title": "The 2013 VGTC Visualization Technical Achievement Award:Kwan-Liu Ma",
"normalizedTitle": "The 2013 VGTC Visualization Technical Achievement Award:Kwan-Liu Ma",
"fno": "ttg201312xxvi",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Awards"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xxvi-xxvi",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg2013120xxv",
"articleId": "13rRUwIF6l6",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201312xxvii",
"articleId": "13rRUILLkDQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILLkDQ",
"doi": "10.1109/TVCG.2013.217",
"abstract": "A brief biography of Erez Lieberman Aiden is given highlighting his professional achievements.",
"abstracts": [
{
"abstractType": "Regular",
"content": "A brief biography of Erez Lieberman Aiden is given highlighting his professional achievements.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "A brief biography of Erez Lieberman Aiden is given highlighting his professional achievements.",
"title": "VIS 2013 Keynote Speaker: Erez Lieberman Aiden [biography]",
"normalizedTitle": "VIS 2013 Keynote Speaker: Erez Lieberman Aiden [biography]",
"fno": "ttg201312xxvii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Biographies",
"Aiden Erez Lieberman"
],
"authors": [],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": false,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xxvii-xxvii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [],
"adjacentArticles": {
"previous": {
"fno": "ttg201312xxvi",
"articleId": "13rRUwbs2gu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg201312xxviii",
"articleId": "13rRUygBwhJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUygBwhJ",
"doi": "10.1109/TVCG.2013.216",
"abstract": "Summary form only given. In the past decades many new techniques have been developed to visualize and interact with abstract data, but also, many challenges remain. In my talk I will reflect on how to make progress in our field: how to identify interesting problems and next how to find effective solutions. I will begin with an attempt to identify characteristics of interesting problems, and discuss windows of opportunity for data, tasks, and users. Some problems have been solved, some are too hard to deal with, what is the range we should aim at? And what impact can be obtained? Next, I discuss strategies and approaches for finding novel solutions, such as combining existing approaches and finding inspiration in other disciplines, including art and design. This talk is based on lessons we learned while developing new techniques, and will be illustrated with a variety of cases and demos from our group at TU/e, showing successes and failures.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Summary form only given. In the past decades many new techniques have been developed to visualize and interact with abstract data, but also, many challenges remain. In my talk I will reflect on how to make progress in our field: how to identify interesting problems and next how to find effective solutions. I will begin with an attempt to identify characteristics of interesting problems, and discuss windows of opportunity for data, tasks, and users. Some problems have been solved, some are too hard to deal with, what is the range we should aim at? And what impact can be obtained? Next, I discuss strategies and approaches for finding novel solutions, such as combining existing approaches and finding inspiration in other disciplines, including art and design. This talk is based on lessons we learned while developing new techniques, and will be illustrated with a variety of cases and demos from our group at TU/e, showing successes and failures.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Summary form only given. In the past decades many new techniques have been developed to visualize and interact with abstract data, but also, many challenges remain. In my talk I will reflect on how to make progress in our field: how to identify interesting problems and next how to find effective solutions. I will begin with an attempt to identify characteristics of interesting problems, and discuss windows of opportunity for data, tasks, and users. Some problems have been solved, some are too hard to deal with, what is the range we should aim at? And what impact can be obtained? Next, I discuss strategies and approaches for finding novel solutions, such as combining existing approaches and finding inspiration in other disciplines, including art and design. This talk is based on lessons we learned while developing new techniques, and will be illustrated with a variety of cases and demos from our group at TU/e, showing successes and failures.",
"title": "VIS 2013 Capstone Speaker: Information Visualization: Challenges and Opportunities",
"normalizedTitle": "VIS 2013 Capstone Speaker: Information Visualization: Challenges and Opportunities",
"fno": "ttg201312xxviii",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [],
"authors": [
{
"givenName": "Jarke",
"surname": "van Wijk",
"fullName": "Jarke van Wijk",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": false,
"showRecommendedArticles": true,
"isOpenAccess": true,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "xxviii-xxviii",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2015/9783/0/07347623",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2015/07347623/12OmNBSSVi0",
"parentPublication": {
"id": "proceedings/vast/2015/9783/0",
"title": "2015 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2015/9785/0/07429483",
"title": "VIS capstone address: Architectures physical and digital",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2015/07429483/12OmNx6Piuq",
"parentPublication": {
"id": "proceedings/scivis/2015/9785/0",
"title": "2015 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883505",
"title": "VIS capstone address",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883505/12OmNxFJXuy",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2016/0836/0/07504680",
"title": "Capstone speaker: Agents? Seriously",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2016/07504680/12OmNzV70mm",
"parentPublication": {
"id": "proceedings/vr/2016/0836/0",
"title": "2016 IEEE Virtual Reality (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/3dui/2016/0842/0/07460022",
"title": "Keynote speaker: Getting real",
"doi": null,
"abstractUrl": "/proceedings-article/3dui/2016/07460022/12OmNzt0IxZ",
"parentPublication": {
"id": "proceedings/3dui/2016/0842/0",
"title": "2016 IEEE Symposium on 3D User Interfaces (3DUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg201312xxvii",
"title": "VIS 2013 Keynote Speaker: Erez Lieberman Aiden [biography]",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg201312xxvii/13rRUILLkDQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg201212xxii",
"title": "VisWeek 2012 Capstone Speaker",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg201212xxii/13rRUyY294C",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vr/2023/4815/0/481500z025",
"title": "Keynote Speaker: Digital Humans in Virtual Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vr/2023/481500z025/1MNgtJP55y8",
"parentPublication": {
"id": "proceedings/vr/2023/4815/0",
"title": "2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2018/6861/0/08802482",
"title": "VIS Capstone Address: Can I believe what I see?-Information theoretic algorithm validation",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2018/08802482/1cJ6WL6h2iA",
"parentPublication": {
"id": "proceedings/vast/2018/6861/0",
"title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/percom-workshops/2020/4716/0/09156201",
"title": "Invited Talk: Software Engineering, AI and autonomous vehicles: Security assurance",
"doi": null,
"abstractUrl": "/proceedings-article/percom-workshops/2020/09156201/1m1jBbaFOes",
"parentPublication": {
"id": "proceedings/percom-workshops/2020/4716/0",
"title": "2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201312xxvii",
"articleId": "13rRUILLkDQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013121962",
"articleId": "13rRUwhHcJg",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwhHcJg",
"doi": "10.1109/TVCG.2013.125",
"abstract": "Regression models play a key role in many application domains for analyzing or predicting a quantitative dependent variable based on one or more independent variables. Automated approaches for building regression models are typically limited with respect to incorporating domain knowledge in the process of selecting input variables (also known as feature subset selection). Other limitations include the identification of local structures, transformations, and interactions between variables. The contribution of this paper is a framework for building regression models addressing these limitations. The framework combines a qualitative analysis of relationship structures by visualization and a quantification of relevance for ranking any number of features and pairs of features which may be categorical or continuous. A central aspect is the local approximation of the conditional target distribution by partitioning 1D and 2D feature domains into disjoint regions. This enables a visual investigation of local patterns and largely avoids structural assumptions for the quantitative ranking. We describe how the framework supports different tasks in model building (e.g., validation and comparison), and we present an interactive workflow for feature subset selection. A real-world case study illustrates the step-wise identification of a five-dimensional model for natural gas consumption. We also report feedback from domain experts after two months of deployment in the energy sector, indicating a significant effort reduction for building and improving regression models.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Regression models play a key role in many application domains for analyzing or predicting a quantitative dependent variable based on one or more independent variables. Automated approaches for building regression models are typically limited with respect to incorporating domain knowledge in the process of selecting input variables (also known as feature subset selection). Other limitations include the identification of local structures, transformations, and interactions between variables. The contribution of this paper is a framework for building regression models addressing these limitations. The framework combines a qualitative analysis of relationship structures by visualization and a quantification of relevance for ranking any number of features and pairs of features which may be categorical or continuous. A central aspect is the local approximation of the conditional target distribution by partitioning 1D and 2D feature domains into disjoint regions. This enables a visual investigation of local patterns and largely avoids structural assumptions for the quantitative ranking. We describe how the framework supports different tasks in model building (e.g., validation and comparison), and we present an interactive workflow for feature subset selection. A real-world case study illustrates the step-wise identification of a five-dimensional model for natural gas consumption. We also report feedback from domain experts after two months of deployment in the energy sector, indicating a significant effort reduction for building and improving regression models.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Regression models play a key role in many application domains for analyzing or predicting a quantitative dependent variable based on one or more independent variables. Automated approaches for building regression models are typically limited with respect to incorporating domain knowledge in the process of selecting input variables (also known as feature subset selection). Other limitations include the identification of local structures, transformations, and interactions between variables. The contribution of this paper is a framework for building regression models addressing these limitations. The framework combines a qualitative analysis of relationship structures by visualization and a quantification of relevance for ranking any number of features and pairs of features which may be categorical or continuous. A central aspect is the local approximation of the conditional target distribution by partitioning 1D and 2D feature domains into disjoint regions. This enables a visual investigation of local patterns and largely avoids structural assumptions for the quantitative ranking. We describe how the framework supports different tasks in model building (e.g., validation and comparison), and we present an interactive workflow for feature subset selection. A real-world case study illustrates the step-wise identification of a five-dimensional model for natural gas consumption. We also report feedback from domain experts after two months of deployment in the energy sector, indicating a significant effort reduction for building and improving regression models.",
"title": "A Partition-Based Framework for Building and Validating Regression Models",
"normalizedTitle": "A Partition-Based Framework for Building and Validating Regression Models",
"fno": "ttg2013121962",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Modeling",
"Regression Analysis",
"Computational Modeling",
"Feature Extraction",
"Frequency Domain Analysis",
"Complexity Theory",
"Data Partitioning",
"Modeling",
"Regression Analysis",
"Computational Modeling",
"Feature Extraction",
"Frequency Domain Analysis",
"Complexity Theory",
"Guided Visualization",
"Regression",
"Model Building",
"Visual Knowledge Discovery",
"Feature Selection"
],
"authors": [
{
"givenName": "Thomas",
"surname": "Muhlbacher",
"fullName": "Thomas Muhlbacher",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Harald",
"surname": "Piringer",
"fullName": "Harald Piringer",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "1962-1971",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wkdd/2008/3090/0/30900063",
"title": "A New Method Multi-Factor Trend Regression and its Application to Economy Forecast in Jiangxi",
"doi": null,
"abstractUrl": "/proceedings-article/wkdd/2008/30900063/12OmNC8uRt7",
"parentPublication": {
"id": "proceedings/wkdd/2008/3090/0",
"title": "International Workshop on Knowledge Discovery and Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csse/2008/3336/1/3336a915",
"title": "A Case Study on the RCMD Method and Fuzzy C-Regression Models for Mining Regression Classes",
"doi": null,
"abstractUrl": "/proceedings-article/csse/2008/3336a915/12OmNCgJe93",
"parentPublication": {
"id": "proceedings/csse/2008/3336/1",
"title": "Computer Science and Software Engineering, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icassp/2004/8484/2/01326308",
"title": "Bias of the corrected KIC for underfitted regression models",
"doi": null,
"abstractUrl": "/proceedings-article/icassp/2004/01326308/12OmNx8OuwH",
"parentPublication": {
"id": "proceedings/icassp/2004/8484/2",
"title": "2004 IEEE International Conference on Acoustics, Speech, and Signal Processing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pdis/1996/7475/0/74750220",
"title": "Building Regression Cost Models for Multidatabase Systems",
"doi": null,
"abstractUrl": "/proceedings-article/pdis/1996/74750220/12OmNxRF78T",
"parentPublication": {
"id": "proceedings/pdis/1996/7475/0",
"title": "Parallel and Distributed Information Systems, International Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2005/2278/0/22780370",
"title": "Ranking-Based Evaluation of Regression Models",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2005/22780370/12OmNyFU72m",
"parentPublication": {
"id": "proceedings/icdm/2005/2278/0",
"title": "Fifth IEEE International Conference on Data Mining (ICDM'05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isda/2009/3872/0/3872a109",
"title": "Handling High-Dimensional Regression Problems by Means of an Efficient Multi-Objective Evolutionary Algorithm",
"doi": null,
"abstractUrl": "/proceedings-article/isda/2009/3872a109/12OmNzC5SRx",
"parentPublication": {
"id": "proceedings/isda/2009/3872/0",
"title": "Intelligent Systems Design and Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2011/4375/0/4375a515",
"title": "Rank Prediction in Graphs with Locally Weighted Polynomial Regression and EM of Polynomial Mixture Models",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2011/4375a515/12OmNzWx01s",
"parentPublication": {
"id": "proceedings/asonam/2011/4375/0",
"title": "2011 International Conference on Advances in Social Networks Analysis and Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2015/09/07058431",
"title": "Pattern-Aided Regression Modeling and Prediction Model Analysis",
"doi": null,
"abstractUrl": "/journal/tk/2015/09/07058431/13rRUIIVlkR",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csde/2021/9552/0/09718403",
"title": "Lighting energy consumption estimation models for a library building with different lighting scenarios",
"doi": null,
"abstractUrl": "/proceedings-article/csde/2021/09718403/1BogXQNL5NS",
"parentPublication": {
"id": "proceedings/csde/2021/9552/0",
"title": "2021 IEEE Asia-Pacific Conference on Computer Science and Data Engineering (CSDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ichi/2019/9138/0/08904528",
"title": "Non-linear regression models for imputing longitudinal missing data",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2019/08904528/1f8N8uQ32Fy",
"parentPublication": {
"id": "proceedings/ichi/2019/9138/0",
"title": "2019 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg201312xxviii",
"articleId": "13rRUygBwhJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013121972",
"articleId": "13rRUxYINfa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFrJ",
"name": "ttg2013121962s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013121962s.zip",
"extension": "zip",
"size": "25.2 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXnFrK",
"name": "ttg2013121962s.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013121962s.avi",
"extension": "avi",
"size": "46.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYINfa",
"doi": "10.1109/TVCG.2013.146",
"abstract": "We present a visual analytics solution designed to address prevalent issues in the area of Operational Decision Management (ODM). In ODM, which has its roots in Artificial Intelligence (Expert Systems) and Management Science, it is increasingly important to align business decisions with business goals. In our work, we consider decision models (executable models of the business domain) as ontologies that describe the business domain, and production rules that describe the business logic of decisions to be made over this ontology. Executing a decision model produces an accumulation of decisions made over time for individual cases. We are interested, first, to get insight in the decision logic and the accumulated facts by themselves. Secondly and more importantly, we want to see how the accumulated facts reveal potential divergences between the reality as captured by the decision model, and the reality as captured by the executed decisions. We illustrate the motivation, added value for visual analytics, and our proposed solution and tooling through a business case from the car insurance industry.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a visual analytics solution designed to address prevalent issues in the area of Operational Decision Management (ODM). In ODM, which has its roots in Artificial Intelligence (Expert Systems) and Management Science, it is increasingly important to align business decisions with business goals. In our work, we consider decision models (executable models of the business domain) as ontologies that describe the business domain, and production rules that describe the business logic of decisions to be made over this ontology. Executing a decision model produces an accumulation of decisions made over time for individual cases. We are interested, first, to get insight in the decision logic and the accumulated facts by themselves. Secondly and more importantly, we want to see how the accumulated facts reveal potential divergences between the reality as captured by the decision model, and the reality as captured by the executed decisions. We illustrate the motivation, added value for visual analytics, and our proposed solution and tooling through a business case from the car insurance industry.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a visual analytics solution designed to address prevalent issues in the area of Operational Decision Management (ODM). In ODM, which has its roots in Artificial Intelligence (Expert Systems) and Management Science, it is increasingly important to align business decisions with business goals. In our work, we consider decision models (executable models of the business domain) as ontologies that describe the business domain, and production rules that describe the business logic of decisions to be made over this ontology. Executing a decision model produces an accumulation of decisions made over time for individual cases. We are interested, first, to get insight in the decision logic and the accumulated facts by themselves. Secondly and more importantly, we want to see how the accumulated facts reveal potential divergences between the reality as captured by the decision model, and the reality as captured by the executed decisions. We illustrate the motivation, added value for visual analytics, and our proposed solution and tooling through a business case from the car insurance industry.",
"title": "Decision Exploration Lab: A Visual Analytics Solution for Decision Management",
"normalizedTitle": "Decision Exploration Lab: A Visual Analytics Solution for Decision Management",
"fno": "ttg2013121972",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Business Data Processing",
"Data Visualisation",
"Decision Support Systems",
"Expert Systems",
"Ontologies Artificial Intelligence",
"Decision Exploration Lab",
"Visual Analytics Solution",
"Operational Decision Management",
"ODM",
"Artificial Intelligence",
"Expert System",
"Ontologies",
"Business Domain",
"Business Logic",
"Decision Logic",
"Decision Making",
"Statistical Analysis",
"Data Visualization",
"Analytical Models",
"Visual Analytics",
"Decision Making",
"Statistical Analysis",
"Data Visualization",
"Analytical Models",
"Visual Analytics",
"Program Analysis",
"Decision Support Systems",
"Model Validation And Analysis",
"Multivariate Statistics"
],
"authors": [
{
"givenName": "Bertjan",
"surname": "Broeksema",
"fullName": "Bertjan Broeksema",
"affiliation": "IBM France Center for Advanced Studies, Institute Johann Bernoulli, University of Groningen, The Netherlands andINRIA, University of Bordeaux, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thomas",
"surname": "Baudel",
"fullName": "Thomas Baudel",
"affiliation": "IBM France Center for Advanced Studies",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alex",
"surname": "Telea",
"fullName": "Alex Telea",
"affiliation": "Institute Johann Bernoulli, University of Groningen, The Netherlands",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Paolo",
"surname": "Crisafulli",
"fullName": "Paolo Crisafulli",
"affiliation": "IBM France",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "1972-1981",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2011/9618/0/05718571",
"title": "An Experimental Study of Financial Portfolio Selection with Visual Analytics for Decision Support",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718571/12OmNAlvHqY",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892b484",
"title": "Introduction to Decision Support and Operational Management Analytics Minitrack",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892b484/12OmNqN6R1X",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892c416",
"title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2014/2504/0/2504b353",
"title": "Introduction to Visualization and Analytics for Decision Support, Operational Management, and Scientific Discovery Minitrack",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2014/2504b353/12OmNxEjYb7",
"parentPublication": {
"id": "proceedings/hicss/2014/2504/0",
"title": "2014 47th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2016/5670/0/5670b426",
"title": "Introduction to the Minitrack on Interactive Visual Decision Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2016/5670b426/12OmNzWfoUn",
"parentPublication": {
"id": "proceedings/hicss/2016/5670/0",
"title": "2016 49th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585665",
"title": "The Anchoring Effect in Decision-Making with Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585665/17D45WZZ7CL",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icvisp/2021/0770/0/077000a296",
"title": "Visual Analytics for the International Trade",
"doi": null,
"abstractUrl": "/proceedings-article/icvisp/2021/077000a296/1APq2QCw3n2",
"parentPublication": {
"id": "proceedings/icvisp/2021/0770/0",
"title": "2021 5th International Conference on Vision, Image and Signal Processing (ICVISP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a110",
"title": "Toward Systematic Considerations of Missingness in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a110/1J6heLU2nNS",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805420",
"title": "FairSight: Visual Analytics for Fairness in Decision Making",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805420/1cG4psmkNQA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2021/1817/0/181700a014",
"title": "Making and Trusting Decisions in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2021/181700a014/1yQB6h3HL6o",
"parentPublication": {
"id": "proceedings/trex/2021/1817/0",
"title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013121962",
"articleId": "13rRUwhHcJg",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013121982",
"articleId": "13rRUy2YLT1",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUy2YLT1",
"doi": "10.1109/TVCG.2013.219",
"abstract": "For preserving the grotto wall paintings and protecting these historic cultural icons from the damage and deterioration in nature environment, a visual analytics framework and a set of tools are proposed for the discovery of degradation patterns. In comparison with the traditional analysis methods that used restricted scales, our method provides users with multi-scale analytic support to study the problems on site, cave, wall and particular degradation area scales, through the application of multidimensional visualization techniques. Several case studies have been carried out using real-world wall painting data collected from a renowned World Heritage site, to verify the usability and effectiveness of the proposed method. User studies and expert reviews were also conducted through by domain experts ranging from scientists such as microenvironment researchers, archivists, geologists, chemists, to practitioners such as conservators, restorers and curators.",
"abstracts": [
{
"abstractType": "Regular",
"content": "For preserving the grotto wall paintings and protecting these historic cultural icons from the damage and deterioration in nature environment, a visual analytics framework and a set of tools are proposed for the discovery of degradation patterns. In comparison with the traditional analysis methods that used restricted scales, our method provides users with multi-scale analytic support to study the problems on site, cave, wall and particular degradation area scales, through the application of multidimensional visualization techniques. Several case studies have been carried out using real-world wall painting data collected from a renowned World Heritage site, to verify the usability and effectiveness of the proposed method. User studies and expert reviews were also conducted through by domain experts ranging from scientists such as microenvironment researchers, archivists, geologists, chemists, to practitioners such as conservators, restorers and curators.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "For preserving the grotto wall paintings and protecting these historic cultural icons from the damage and deterioration in nature environment, a visual analytics framework and a set of tools are proposed for the discovery of degradation patterns. In comparison with the traditional analysis methods that used restricted scales, our method provides users with multi-scale analytic support to study the problems on site, cave, wall and particular degradation area scales, through the application of multidimensional visualization techniques. Several case studies have been carried out using real-world wall painting data collected from a renowned World Heritage site, to verify the usability and effectiveness of the proposed method. User studies and expert reviews were also conducted through by domain experts ranging from scientists such as microenvironment researchers, archivists, geologists, chemists, to practitioners such as conservators, restorers and curators.",
"title": "Vis4Heritage: Visual Analytics Approach on Grotto Wall Painting Degradations",
"normalizedTitle": "Vis4Heritage: Visual Analytics Approach on Grotto Wall Painting Degradations",
"fno": "ttg2013121982",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"History",
"Painting",
"Walls",
"Vis 4 Heritage",
"Visual Analytics Approach",
"Grotto Wall Painting Degradations",
"Historic Cultural Icons",
"Grotto Wall Protection",
"Nature Environment",
"Visual Analytics Framework",
"Degradation Pattern Discovery",
"Multiscale Analytic",
"Degradation Area Scales",
"Multidimensional Visualization Techniques",
"Renowned World Heritage Site",
"Data Visualization",
"Painting",
"Cultural Differences",
"Correlation",
"Visual Analytics",
"Data Visualization",
"Painting",
"Cultural Differences",
"Correlation",
"Visual Analytics",
"Cultural Heritage",
"Wall Paintings",
"Degradation"
],
"authors": [
{
"givenName": "Jiawan",
"surname": "Zhang",
"fullName": "Jiawan Zhang",
"affiliation": "School of Computer Software and Information Technology Research Center for Cultural Heritage Conservation and Promotion, Tianjin University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Kai",
"surname": "Kang",
"fullName": "Kai Kang",
"affiliation": "School of Computer Software, Tianjin University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dajian",
"surname": "Liu",
"fullName": "Dajian Liu",
"affiliation": "School of Computer Software, Tianjin University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ye",
"surname": "Yuan",
"fullName": "Ye Yuan",
"affiliation": "School of Computer Software, Tianjin University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "E.",
"surname": "Yanli",
"fullName": "E. Yanli",
"affiliation": "School of Computer Software, Tianjin University",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "1982-1991",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "mags/cg/2008/01/mcg2008010018",
"title": "An Information-Theoretic View of Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2008/01/mcg2008010018/13rRUB6SpRW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/08/mco2013080090",
"title": "Bixplorer: Visual Analytics with Biclusters",
"doi": null,
"abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/02/mcg2009020014",
"title": "Defining Insight for Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2009/02/mcg2009020014/13rRUwh80JN",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122516",
"title": "Perception of Visual Variables on Tiled Wall-Sized Displays for Information Visualization Applications",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122516/13rRUwwJWFM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2011/12/mco2011120039",
"title": "Cultural Analytics in Large-Scale Visualization Environments",
"doi": null,
"abstractUrl": "/magazine/co/2011/12/mco2011120039/13rRUx0Pqw5",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122899",
"title": "A Visual Analytics Approach to Multiscale Exploration of Environmental Time Series",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122899/13rRUxDqS8g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2018/9385/0/938500a651",
"title": "Path Opening for Hyperspectral Crack Detection of Cultural Heritage Paintings",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2018/938500a651/19RSs6pULhm",
"parentPublication": {
"id": "proceedings/sitis/2018/9385/0",
"title": "2018 14th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mcsi/2018/7500/0/750000a102",
"title": "Making a Tactile Painting of the Painting "Capturing Vasil Levski at the Kakrinsko Hanche" for Blind Users",
"doi": null,
"abstractUrl": "/proceedings-article/mcsi/2018/750000a102/1bXcSdqh3Nu",
"parentPublication": {
"id": "proceedings/mcsi/2018/7500/0",
"title": "2018 5th International Conference on Mathematics and Computers in Sciences and Industry (MCSI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ismar-adjunct/2019/4765/0/476500a118",
"title": "Enhancing Rock Painting Tour Experience with Outdoor Augmented Reality",
"doi": null,
"abstractUrl": "/proceedings-article/ismar-adjunct/2019/476500a118/1gysk60HxPW",
"parentPublication": {
"id": "proceedings/ismar-adjunct/2019/4765/0",
"title": "2019 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300b447",
"title": "End-to-End Partial Convolutions Neural Networks for Dunhuang Grottoes Wall-Painting Restoration",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300b447/1i5mLfLkASI",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013121972",
"articleId": "13rRUxYINfa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013121992",
"articleId": "13rRUIIVlcI",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUIIVlcI",
"doi": "10.1109/TVCG.2013.212",
"abstract": "Topic modeling has been widely used for analyzing text document collections. Recently, there have been significant advancements in various topic modeling techniques, particularly in the form of probabilistic graphical modeling. State-of-the-art techniques such as Latent Dirichlet Allocation (LDA) have been successfully applied in visual text analytics. However, most of the widely-used methods based on probabilistic modeling have drawbacks in terms of consistency from multiple runs and empirical convergence. Furthermore, due to the complicatedness in the formulation and the algorithm, LDA cannot easily incorporate various types of user feedback. To tackle this problem, we propose a reliable and flexible visual analytics system for topic modeling called UTOPIAN (User-driven Topic modeling based on Interactive Nonnegative Matrix Factorization). Centered around its semi-supervised formulation, UTOPIAN enables users to interact with the topic modeling method and steer the result in a user-driven manner. We demonstrate the capability of UTOPIAN via several usage scenarios with real-world document corpuses such as InfoVis/VAST paper data set and product review data sets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Topic modeling has been widely used for analyzing text document collections. Recently, there have been significant advancements in various topic modeling techniques, particularly in the form of probabilistic graphical modeling. State-of-the-art techniques such as Latent Dirichlet Allocation (LDA) have been successfully applied in visual text analytics. However, most of the widely-used methods based on probabilistic modeling have drawbacks in terms of consistency from multiple runs and empirical convergence. Furthermore, due to the complicatedness in the formulation and the algorithm, LDA cannot easily incorporate various types of user feedback. To tackle this problem, we propose a reliable and flexible visual analytics system for topic modeling called UTOPIAN (User-driven Topic modeling based on Interactive Nonnegative Matrix Factorization). Centered around its semi-supervised formulation, UTOPIAN enables users to interact with the topic modeling method and steer the result in a user-driven manner. We demonstrate the capability of UTOPIAN via several usage scenarios with real-world document corpuses such as InfoVis/VAST paper data set and product review data sets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Topic modeling has been widely used for analyzing text document collections. Recently, there have been significant advancements in various topic modeling techniques, particularly in the form of probabilistic graphical modeling. State-of-the-art techniques such as Latent Dirichlet Allocation (LDA) have been successfully applied in visual text analytics. However, most of the widely-used methods based on probabilistic modeling have drawbacks in terms of consistency from multiple runs and empirical convergence. Furthermore, due to the complicatedness in the formulation and the algorithm, LDA cannot easily incorporate various types of user feedback. To tackle this problem, we propose a reliable and flexible visual analytics system for topic modeling called UTOPIAN (User-driven Topic modeling based on Interactive Nonnegative Matrix Factorization). Centered around its semi-supervised formulation, UTOPIAN enables users to interact with the topic modeling method and steer the result in a user-driven manner. We demonstrate the capability of UTOPIAN via several usage scenarios with real-world document corpuses such as InfoVis/VAST paper data set and product review data sets.",
"title": "UTOPIAN: User-Driven Topic Modeling Based on Interactive Nonnegative Matrix Factorization",
"normalizedTitle": "UTOPIAN: User-Driven Topic Modeling Based on Interactive Nonnegative Matrix Factorization",
"fno": "ttg2013121992",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Analytical Models",
"Visual Analytics",
"Computational Modeling",
"Interactive States",
"Context Modeling",
"Interactive Clustering",
"Analytical Models",
"Visual Analytics",
"Computational Modeling",
"Interactive States",
"Context Modeling",
"Text Analytics",
"Latent Dirichlet Allocation",
"Nonnegative Matrix Factorization",
"Topic Modeling",
"Visual Analytics"
],
"authors": [
{
"givenName": null,
"surname": "Jaegul Choo",
"fullName": "Jaegul Choo",
"affiliation": "Georgia Inst. of Technol., Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Changhyun Lee",
"fullName": "Changhyun Lee",
"affiliation": "Georgia Inst. of Technol., Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chandan K.",
"surname": "Reddy",
"fullName": "Chandan K. Reddy",
"affiliation": "Wayne State Univ., Detroit, MI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Haesun",
"surname": "Park",
"fullName": "Haesun Park",
"affiliation": "Georgia Inst. of Technol., Atlanta, GA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "1992-2001",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2016/5473/0/07837872",
"title": "L-EnsNMF: Boosted Local Topic Discovery via Ensemble of Nonnegative Matrix Factorization",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2016/07837872/12OmNAWH9xf",
"parentPublication": {
"id": "proceedings/icdm/2016/5473/0",
"title": "2016 IEEE 16th International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019825",
"title": "Progressive Learning of Topic Modeling Parameters: A Visual Analytics Framework",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019825/13rRUwghd9c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122119",
"title": "Interactive Exploration of Surveillance Video through Action Shot Summarization and Trajectory Visualization",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122119/13rRUxC0SOX",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539597",
"title": "TopicLens: Efficient Multi-Level Visual Topic Exploration of Large-Scale Document Collections",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539597/13rRUy0qnLK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08467535",
"title": "Visual Analytics for Topic Model Optimization based on User-Steerable Speculative Execution",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08467535/17D45XeKgtW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2020/07/08666058",
"title": "Affinity Regularized Non-Negative Matrix Factorization for Lifelong Topic Modeling",
"doi": null,
"abstractUrl": "/journal/tk/2020/07/08666058/18l6FUrOG88",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807224",
"title": "Semantic Concept Spaces: Guided Topic Model Refinement using Word-Embedding Projections",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807224/1cG6twVJ2HC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a148",
"title": "An Interactive Visual Analytics System for Incremental Classification Based on Semi-supervised Topic Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a148/1cMF8cnyXfi",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2019/2284/0/08986922",
"title": "TopicSifter: Interactive Search Space Reduction through Targeted Topic Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2019/08986922/1hrMz9LdbzO",
"parentPublication": {
"id": "proceedings/vast/2019/2284/0",
"title": "2019 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09039699",
"title": "ArchiText: Interactive Hierarchical Topic Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09039699/1igS4Rezjr2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013121982",
"articleId": "13rRUy2YLT1",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122002",
"articleId": "13rRUxNW1Zo",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRPa",
"name": "ttg2013121992s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013121992s.mp4",
"extension": "mp4",
"size": "11.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNW1Zo",
"doi": "10.1109/TVCG.2013.162",
"abstract": "Analyzing large textual collections has become increasingly challenging given the size of the data available and the rate that more data is being generated. Topic-based text summarization methods coupled with interactive visualizations have presented promising approaches to address the challenge of analyzing large text corpora. As the text corpora and vocabulary grow larger, more topics need to be generated in order to capture the meaningful latent themes and nuances in the corpora. However, it is difficult for most of current topic-based visualizations to represent large number of topics without being cluttered or illegible. To facilitate the representation and navigation of a large number of topics, we propose a visual analytics system - HierarchicalTopic (HT). HT integrates a computational algorithm, Topic Rose Tree, with an interactive visual interface. The Topic Rose Tree constructs a topic hierarchy based on a list of topics. The interactive visual interface is designed to present the topic content as well as temporal evolution of topics in a hierarchical fashion. User interactions are provided for users to make changes to the topic hierarchy based on their mental model of the topic space. To qualitatively evaluate HT, we present a case study that showcases how HierarchicalTopics aid expert users in making sense of a large number of topics and discovering interesting patterns of topic groups. We have also conducted a user study to quantitatively evaluate the effect of hierarchical topic structure. The study results reveal that the HT leads to faster identification of large number of relevant topics. We have also solicited user feedback during the experiments and incorporated some suggestions into the current version of HierarchicalTopics.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Analyzing large textual collections has become increasingly challenging given the size of the data available and the rate that more data is being generated. Topic-based text summarization methods coupled with interactive visualizations have presented promising approaches to address the challenge of analyzing large text corpora. As the text corpora and vocabulary grow larger, more topics need to be generated in order to capture the meaningful latent themes and nuances in the corpora. However, it is difficult for most of current topic-based visualizations to represent large number of topics without being cluttered or illegible. To facilitate the representation and navigation of a large number of topics, we propose a visual analytics system - HierarchicalTopic (HT). HT integrates a computational algorithm, Topic Rose Tree, with an interactive visual interface. The Topic Rose Tree constructs a topic hierarchy based on a list of topics. The interactive visual interface is designed to present the topic content as well as temporal evolution of topics in a hierarchical fashion. User interactions are provided for users to make changes to the topic hierarchy based on their mental model of the topic space. To qualitatively evaluate HT, we present a case study that showcases how HierarchicalTopics aid expert users in making sense of a large number of topics and discovering interesting patterns of topic groups. We have also conducted a user study to quantitatively evaluate the effect of hierarchical topic structure. The study results reveal that the HT leads to faster identification of large number of relevant topics. We have also solicited user feedback during the experiments and incorporated some suggestions into the current version of HierarchicalTopics.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Analyzing large textual collections has become increasingly challenging given the size of the data available and the rate that more data is being generated. Topic-based text summarization methods coupled with interactive visualizations have presented promising approaches to address the challenge of analyzing large text corpora. As the text corpora and vocabulary grow larger, more topics need to be generated in order to capture the meaningful latent themes and nuances in the corpora. However, it is difficult for most of current topic-based visualizations to represent large number of topics without being cluttered or illegible. To facilitate the representation and navigation of a large number of topics, we propose a visual analytics system - HierarchicalTopic (HT). HT integrates a computational algorithm, Topic Rose Tree, with an interactive visual interface. The Topic Rose Tree constructs a topic hierarchy based on a list of topics. The interactive visual interface is designed to present the topic content as well as temporal evolution of topics in a hierarchical fashion. User interactions are provided for users to make changes to the topic hierarchy based on their mental model of the topic space. To qualitatively evaluate HT, we present a case study that showcases how HierarchicalTopics aid expert users in making sense of a large number of topics and discovering interesting patterns of topic groups. We have also conducted a user study to quantitatively evaluate the effect of hierarchical topic structure. The study results reveal that the HT leads to faster identification of large number of relevant topics. We have also solicited user feedback during the experiments and incorporated some suggestions into the current version of HierarchicalTopics.",
"title": "HierarchicalTopics: Visually Exploring Large Text Collections Using Topic Hierarchies",
"normalizedTitle": "HierarchicalTopics: Visually Exploring Large Text Collections Using Topic Hierarchies",
"fno": "ttg2013122002",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Vocabulary",
"Analytical Models",
"Text Mining",
"Computational Modeling",
"Algorithm Design And Analysis",
"Visual Analytics",
"Vocabulary",
"Analytical Models",
"Text Mining",
"Computational Modeling",
"Algorithm Design And Analysis",
"Rose Tree",
"Hierarchical Topic Representation",
"Topic Modeling"
],
"authors": [
{
"givenName": null,
"surname": "Wenwen Dou",
"fullName": "Wenwen Dou",
"affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Li Yu",
"fullName": "Li Yu",
"affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Xiaoyu Wang",
"fullName": "Xiaoyu Wang",
"affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Zhiqiang Ma",
"fullName": "Zhiqiang Ma",
"affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "William",
"surname": "Ribarsky",
"fullName": "William Ribarsky",
"affiliation": "Univ. of North Carolina at Charlotte, Charlotte, NC, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2002-2011",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/spire/2000/0746/0/07460055",
"title": "Experiment Analysis in Newspaper Topic Detection",
"doi": null,
"abstractUrl": "/proceedings-article/spire/2000/07460055/12OmNAndin8",
"parentPublication": {
"id": "proceedings/spire/2000/0746/0",
"title": "String Processing and Information Retrieval, International Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ictai/2011/4596/0/4596a936",
"title": "TopicView: Visually Comparing Topic Models of Text Collections",
"doi": null,
"abstractUrl": "/proceedings-article/ictai/2011/4596a936/12OmNArKSif",
"parentPublication": {
"id": "proceedings/ictai/2011/4596/0",
"title": "2011 IEEE 23rd International Conference on Tools with Artificial Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icbk/2017/3120/0/3120a254",
"title": "Incorporating Entity Correlation Knowledge into Topic Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/icbk/2017/3120a254/12OmNzdoMCc",
"parentPublication": {
"id": "proceedings/icbk/2017/3120/0",
"title": "2017 IEEE International Conference on Big Knowledge (ICBK)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2011/4408/0/4408a101",
"title": "SolarMap: Multifaceted Visual Analytics for Topic Exploration",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2011/4408a101/12OmNzw8j1t",
"parentPublication": {
"id": "proceedings/icdm/2011/4408/0",
"title": "2011 IEEE 11th International Conference on Data Mining",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08019825",
"title": "Progressive Learning of Topic Modeling Parameters: A Visual Analytics Framework",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08019825/13rRUwghd9c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/01/ttg2012010093",
"title": "EventRiver: Visually Exploring Text Collections with Temporal References",
"doi": null,
"abstractUrl": "/journal/tg/2012/01/ttg2012010093/13rRUxly8SS",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539597",
"title": "TopicLens: Efficient Multi-Level Visual Topic Exploration of Large-Scale Document Collections",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539597/13rRUy0qnLK",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669550",
"title": "Text Fingerprinting and Topic Mining in the Prescription Opioid Use Literature",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669550/1A9VGSMpNYY",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/09/09039699",
"title": "ArchiText: Interactive Hierarchical Topic Modeling",
"doi": null,
"abstractUrl": "/journal/tg/2021/09/09039699/1igS4Rezjr2",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis4dh/2021/1370/0/137000a012",
"title": "Uncertainty-aware Topic Modeling Visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vis4dh/2021/137000a012/1yNiG9yU9JS",
"parentPublication": {
"id": "proceedings/vis4dh/2021/1370/0",
"title": "2021 IEEE 6th Workshop on Visualization for the Digital Humanities (VIS4DH)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013121992",
"articleId": "13rRUIIVlcI",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122012",
"articleId": "13rRUyogGAa",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyogGAa",
"doi": "10.1109/TVCG.2013.221",
"abstract": "How do various topics compete for public attention when they are spreading on social media? What roles do opinion leaders play in the rise and fall of competitiveness of various topics? In this study, we propose an expanded topic competition model to characterize the competition for public attention on multiple topics promoted by various opinion leaders on social media. To allow an intuitive understanding of the estimated measures, we present a timeline visualization through a metaphoric interpretation of the results. The visual design features both topical and social aspects of the information diffusion process by compositing ThemeRiver with storyline style visualization. ThemeRiver shows the increase and decrease of competitiveness of each topic. Opinion leaders are drawn as threads that converge or diverge with regard to their roles in influencing the public agenda change over time. To validate the effectiveness of the visual analysis techniques, we report the insights gained on two collections of Tweets: the 2012 United States presidential election and the Occupy Wall Street movement.",
"abstracts": [
{
"abstractType": "Regular",
"content": "How do various topics compete for public attention when they are spreading on social media? What roles do opinion leaders play in the rise and fall of competitiveness of various topics? In this study, we propose an expanded topic competition model to characterize the competition for public attention on multiple topics promoted by various opinion leaders on social media. To allow an intuitive understanding of the estimated measures, we present a timeline visualization through a metaphoric interpretation of the results. The visual design features both topical and social aspects of the information diffusion process by compositing ThemeRiver with storyline style visualization. ThemeRiver shows the increase and decrease of competitiveness of each topic. Opinion leaders are drawn as threads that converge or diverge with regard to their roles in influencing the public agenda change over time. To validate the effectiveness of the visual analysis techniques, we report the insights gained on two collections of Tweets: the 2012 United States presidential election and the Occupy Wall Street movement.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "How do various topics compete for public attention when they are spreading on social media? What roles do opinion leaders play in the rise and fall of competitiveness of various topics? In this study, we propose an expanded topic competition model to characterize the competition for public attention on multiple topics promoted by various opinion leaders on social media. To allow an intuitive understanding of the estimated measures, we present a timeline visualization through a metaphoric interpretation of the results. The visual design features both topical and social aspects of the information diffusion process by compositing ThemeRiver with storyline style visualization. ThemeRiver shows the increase and decrease of competitiveness of each topic. Opinion leaders are drawn as threads that converge or diverge with regard to their roles in influencing the public agenda change over time. To validate the effectiveness of the visual analysis techniques, we report the insights gained on two collections of Tweets: the 2012 United States presidential election and the Occupy Wall Street movement.",
"title": "Visual Analysis of Topic Competition on Social Media",
"normalizedTitle": "Visual Analysis of Topic Competition on Social Media",
"fno": "ttg2013122012",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Data Visualization",
"Mathematical Model",
"Recruitment",
"Social Network Services",
"Information Propagation",
"Visual Analytics",
"Data Visualization",
"Mathematical Model",
"Recruitment",
"Social Network Services",
"Agenda Setting",
"Social Media Visuaization",
"Topic Competition",
"Information Diffusion"
],
"authors": [
{
"givenName": null,
"surname": "Panpan Xu",
"fullName": "Panpan Xu",
"affiliation": "Hong Kong Univ. of Sci. & Technol., Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Yingcai Wu",
"fullName": "Yingcai Wu",
"affiliation": "Microsoft Res. Asia, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Enxun Wei",
"fullName": "Enxun Wei",
"affiliation": "Shanghai Jiao Tong Univ., Shanghai, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Tai-Quan Peng",
"fullName": "Tai-Quan Peng",
"affiliation": "Nanyang Technol. Univ., Singapore, Singapore",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Shixia Liu",
"fullName": "Shixia Liu",
"affiliation": "Microsoft Res. Asia, Beijing, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jonathan J. H.",
"surname": "Zhu",
"fullName": "Jonathan J. H. Zhu",
"affiliation": "City Univ. of Hong Kong, Hong Kong, China",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Huamin Qu",
"fullName": "Huamin Qu",
"affiliation": "Hong Kong Univ. of Sci. & Technol., Hong Kong, China",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2012-2021",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icbk/2017/3120/0/3120a315",
"title": "Modeling Topic Evolution in Social Media Short Texts",
"doi": null,
"abstractUrl": "/proceedings-article/icbk/2017/3120a315/12OmNy2rS0U",
"parentPublication": {
"id": "proceedings/icbk/2017/3120/0",
"title": "2017 IEEE International Conference on Big Knowledge (ICBK)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/aina/2017/6029/0/6029b012",
"title": "Mining Opinion Leaders in Big Social Network",
"doi": null,
"abstractUrl": "/proceedings-article/aina/2017/6029b012/12OmNzUPpq4",
"parentPublication": {
"id": "proceedings/aina/2017/6029/0",
"title": "2017 IEEE 31st International Conference on Advanced Information Networking and Applications (AINA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icsc/2010/4154/0/4154a394",
"title": "Sentiment Mining within Social Media for Topic Identification",
"doi": null,
"abstractUrl": "/proceedings-article/icsc/2010/4154a394/12OmNzayNlc",
"parentPublication": {
"id": "proceedings/icsc/2010/4154/0",
"title": "2010 IEEE Fourth International Conference on Semantic Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/05/mco2013050068",
"title": "Visual Analysis of Social Media Data",
"doi": null,
"abstractUrl": "/magazine/co/2013/05/mco2013050068/13rRUB6Sq3O",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/09/08037991",
"title": "A Visual Analytics Framework for Identifying Topic Drivers in Media Events",
"doi": null,
"abstractUrl": "/journal/tg/2018/09/08037991/13rRUxASuhI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875992",
"title": "EvoRiver: Visual Analysis of Topic Coopetition on Social Media",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875992/13rRUxBa563",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876032",
"title": "OpinionFlow: Visual Analysis of Opinion Diffusion on Social Media",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876032/13rRUxYINfe",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08534023",
"title": "SocialOcean: Visual Analysis and Characterization of Social Media Bubbles",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08534023/17D45WIXbOL",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2021/3892/0/389200a389",
"title": "Research on The Key Technology of Counselors Using Social Network to Lead The Topic",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2021/389200a389/1t2nr0qAS5O",
"parentPublication": {
"id": "proceedings/icmtma/2021/3892/0",
"title": "2021 13th International Conference on Measuring Technology and Mechatronics Automation (ICMTMA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09552886",
"title": "Real-Time Visual Analysis of High-Volume Social Media Posts",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09552886/1xic6y40Iwg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122002",
"articleId": "13rRUxNW1Zo",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122022",
"articleId": "13rRUwhHcJh",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesX8",
"name": "ttg2013122012s.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122012s.avi",
"extension": "avi",
"size": "39.7 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwhHcJh",
"doi": "10.1109/TVCG.2013.186",
"abstract": "The number of microblog posts published daily has reached a level that hampers the effective retrieval of relevant messages, and the amount of information conveyed through services such as Twitter is still increasing. Analysts require new methods for monitoring their topic of interest, dealing with the data volume and its dynamic nature. It is of particular importance to provide situational awareness for decision making in time-critical tasks. Current tools for monitoring microblogs typically filter messages based on user-defined keyword queries and metadata restrictions. Used on their own, such methods can have drawbacks with respect to filter accuracy and adaptability to changes in trends and topic structure. We suggest ScatterBlogs2, a new approach to let analysts build task-tailored message filters in an interactive and visual manner based on recorded messages of well-understood previous events. These message filters include supervised classification and query creation backed by the statistical distribution of terms and their co-occurrences. The created filter methods can be orchestrated and adapted afterwards for interactive, visual real-time monitoring and analysis of microblog feeds. We demonstrate the feasibility of our approach for analyzing the Twitter stream in emergency management scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The number of microblog posts published daily has reached a level that hampers the effective retrieval of relevant messages, and the amount of information conveyed through services such as Twitter is still increasing. Analysts require new methods for monitoring their topic of interest, dealing with the data volume and its dynamic nature. It is of particular importance to provide situational awareness for decision making in time-critical tasks. Current tools for monitoring microblogs typically filter messages based on user-defined keyword queries and metadata restrictions. Used on their own, such methods can have drawbacks with respect to filter accuracy and adaptability to changes in trends and topic structure. We suggest ScatterBlogs2, a new approach to let analysts build task-tailored message filters in an interactive and visual manner based on recorded messages of well-understood previous events. These message filters include supervised classification and query creation backed by the statistical distribution of terms and their co-occurrences. The created filter methods can be orchestrated and adapted afterwards for interactive, visual real-time monitoring and analysis of microblog feeds. We demonstrate the feasibility of our approach for analyzing the Twitter stream in emergency management scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The number of microblog posts published daily has reached a level that hampers the effective retrieval of relevant messages, and the amount of information conveyed through services such as Twitter is still increasing. Analysts require new methods for monitoring their topic of interest, dealing with the data volume and its dynamic nature. It is of particular importance to provide situational awareness for decision making in time-critical tasks. Current tools for monitoring microblogs typically filter messages based on user-defined keyword queries and metadata restrictions. Used on their own, such methods can have drawbacks with respect to filter accuracy and adaptability to changes in trends and topic structure. We suggest ScatterBlogs2, a new approach to let analysts build task-tailored message filters in an interactive and visual manner based on recorded messages of well-understood previous events. These message filters include supervised classification and query creation backed by the statistical distribution of terms and their co-occurrences. The created filter methods can be orchestrated and adapted afterwards for interactive, visual real-time monitoring and analysis of microblog feeds. We demonstrate the feasibility of our approach for analyzing the Twitter stream in emergency management scenarios.",
"title": "ScatterBlogs2: Real-Time Monitoring of Microblog Messages through User-Guided Filtering",
"normalizedTitle": "ScatterBlogs2: Real-Time Monitoring of Microblog Messages through User-Guided Filtering",
"fno": "ttg2013122022",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Blogs",
"Social Network Services",
"Information Retrieval",
"Twitter",
"Real Time Systems",
"Labeling",
"Spatiotemporal Phenomena",
"Query Construction",
"Blogs",
"Social Network Services",
"Information Retrieval",
"Twitter",
"Real Time Systems",
"Labeling",
"Spatiotemporal Phenomena",
"Text Classification",
"Microblog Analysis",
"Twitter",
"Text Analytics",
"Social Media Monitoring",
"Live Monitoring",
"Visual Analytics",
"Information Visualization",
"Filter Construction"
],
"authors": [
{
"givenName": "Harald",
"surname": "Bosch",
"fullName": "Harald Bosch",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dennis",
"surname": "Thom",
"fullName": "Dennis Thom",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Florian",
"surname": "Heimerl",
"fullName": "Florian Heimerl",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Edwin",
"surname": "Puttmann",
"fullName": "Edwin Puttmann",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steffen",
"surname": "Koch",
"fullName": "Steffen Koch",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Robert",
"surname": "Kruger",
"fullName": "Robert Kruger",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Worner",
"fullName": "Michael Worner",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thomas",
"surname": "Ertl",
"fullName": "Thomas Ertl",
"affiliation": "Visualization & Interactive Syst., Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2022-2031",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cgc/2012/3027/0/06382901",
"title": "Training Opinion Leaders in Microblog: A Game Theory Approach",
"doi": null,
"abstractUrl": "/proceedings-article/cgc/2012/06382901/12OmNBkxstl",
"parentPublication": {
"id": "proceedings/cgc/2012/3027/0",
"title": "2012 International Conference on Cloud and Green Computing (CGC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/msr/2012/1760/0/06224287",
"title": "What does software engineering community microblog about?",
"doi": null,
"abstractUrl": "/proceedings-article/msr/2012/06224287/12OmNwDACtB",
"parentPublication": {
"id": "proceedings/msr/2012/1760/0",
"title": "2012 9th IEEE Working Conference on Mining Software Repositories (MSR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2013/5009/0/5009a453",
"title": "YouFlow Microblog: Following Discussions on an Educational Microblog",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2013/5009a453/12OmNwcCIXd",
"parentPublication": {
"id": "proceedings/icalt/2013/5009/0",
"title": "2013 IEEE 13th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2013/2240/0/06785707",
"title": "Information propagation in microblog networks",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2013/06785707/12OmNxE2mHy",
"parentPublication": {
"id": "proceedings/asonam/2013/2240/0",
"title": "2013 International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icrtccm/2017/4799/0/4799a083",
"title": "Age Forecasting Analysis - Over Microblogs",
"doi": null,
"abstractUrl": "/proceedings-article/icrtccm/2017/4799a083/12OmNxvO085",
"parentPublication": {
"id": "proceedings/icrtccm/2017/4799/0",
"title": "2017 Second International Conference on Recent Trends and Challenges in Computational Models (ICRTCCM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2015/8657/0/8657a161",
"title": "Detecting Jihadist Messages on Twitter",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2015/8657a161/12OmNy4r3Ow",
"parentPublication": {
"id": "proceedings/eisic/2015/8657/0",
"title": "2015 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cscloud/2015/9300/0/9300a231",
"title": "Feature Analysis of Important Nodes in Microblog",
"doi": null,
"abstractUrl": "/proceedings-article/cscloud/2015/9300a231/12OmNyQGS7W",
"parentPublication": {
"id": "proceedings/cscloud/2015/9300/0",
"title": "2015 IEEE 2nd International Conference on Cyber Security and Cloud Computing (CSCloud)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2012/5680/0/06495306",
"title": "A Topic Detection Method for Chinese Microblog",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2012/06495306/12OmNyRPgUd",
"parentPublication": {
"id": "proceedings/isise/2012/5680/0",
"title": "2012 Fourth International Symposium on Information Science and Engineering (ISISE 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/acii/2015/9953/0/07344668",
"title": "Improving emotion classification on Chinese microblog texts with auxiliary cross-domain data",
"doi": null,
"abstractUrl": "/proceedings-article/acii/2015/07344668/12OmNz2kqrI",
"parentPublication": {
"id": "proceedings/acii/2015/9953/0",
"title": "2015 International Conference on Affective Computing and Intelligent Interaction (ACII)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192694",
"title": "An Uncertainty-Aware Approach for Exploratory Microblog Retrieval",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192694/13rRUy2YLYy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122012",
"articleId": "13rRUyogGAa",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122032",
"articleId": "13rRUyuegp5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXnFrL",
"name": "ttg2013122022s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122022s.mp4",
"extension": "mp4",
"size": "20.9 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyuegp5",
"doi": "10.1109/TVCG.2013.223",
"abstract": "Social network analysis (SNA) is becoming increasingly concerned not only with actors and their relations, but also with distinguishing between different types of such entities. For example, social scientists may want to investigate asymmetric relations in organizations with strict chains of command, or incorporate non-actors such as conferences and projects when analyzing coauthorship patterns. Multimodal social networks are those where actors and relations belong to different types, or modes, and multimodal social network analysis (mSNA) is accordingly SNA for such networks. In this paper, we present a design study that we conducted with several social scientist collaborators on how to support mSNA using visual analytics tools. Based on an openended, formative design process, we devised a visual representation called parallel node-link bands (PNLBs) that splits modes into separate bands and renders connections between adjacent ones, similar to the list view in Jigsaw. We then used the tool in a qualitative evaluation involving five social scientists whose feedback informed a second design phase that incorporated additional network metrics. Finally, we conducted a second qualitative evaluation with our social scientist collaborators that provided further insights on the utility of the PNLBs representation and the potential of visual analytics for mSNA.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Social network analysis (SNA) is becoming increasingly concerned not only with actors and their relations, but also with distinguishing between different types of such entities. For example, social scientists may want to investigate asymmetric relations in organizations with strict chains of command, or incorporate non-actors such as conferences and projects when analyzing coauthorship patterns. Multimodal social networks are those where actors and relations belong to different types, or modes, and multimodal social network analysis (mSNA) is accordingly SNA for such networks. In this paper, we present a design study that we conducted with several social scientist collaborators on how to support mSNA using visual analytics tools. Based on an openended, formative design process, we devised a visual representation called parallel node-link bands (PNLBs) that splits modes into separate bands and renders connections between adjacent ones, similar to the list view in Jigsaw. We then used the tool in a qualitative evaluation involving five social scientists whose feedback informed a second design phase that incorporated additional network metrics. Finally, we conducted a second qualitative evaluation with our social scientist collaborators that provided further insights on the utility of the PNLBs representation and the potential of visual analytics for mSNA.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Social network analysis (SNA) is becoming increasingly concerned not only with actors and their relations, but also with distinguishing between different types of such entities. For example, social scientists may want to investigate asymmetric relations in organizations with strict chains of command, or incorporate non-actors such as conferences and projects when analyzing coauthorship patterns. Multimodal social networks are those where actors and relations belong to different types, or modes, and multimodal social network analysis (mSNA) is accordingly SNA for such networks. In this paper, we present a design study that we conducted with several social scientist collaborators on how to support mSNA using visual analytics tools. Based on an openended, formative design process, we devised a visual representation called parallel node-link bands (PNLBs) that splits modes into separate bands and renders connections between adjacent ones, similar to the list view in Jigsaw. We then used the tool in a qualitative evaluation involving five social scientists whose feedback informed a second design phase that incorporated additional network metrics. Finally, we conducted a second qualitative evaluation with our social scientist collaborators that provided further insights on the utility of the PNLBs representation and the potential of visual analytics for mSNA.",
"title": "Visual Analytics for Multimodal Social Network Analysis: A Design Study with Social Scientists",
"normalizedTitle": "Visual Analytics for Multimodal Social Network Analysis: A Design Study with Social Scientists",
"fno": "ttg2013122032",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Social Network Services",
"Visual Analytics",
"Data Visualization",
"Complexity Theory",
"Design Methodology",
"User Centered Design",
"Interaction",
"Social Network Services",
"Visual Analytics",
"Data Visualization",
"Complexity Theory",
"Design Methodology",
"User Centered Design",
"Qualitative Evaluation",
"Design Study",
"User Centered Design",
"Node Link Diagrams",
"Multimodal Graphs"
],
"authors": [
{
"givenName": "Sohaib",
"surname": "Ghani",
"fullName": "Sohaib Ghani",
"affiliation": "Sch. of Electr. & Comput. Eng., Purdue Univ., West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Bum Chul Kwon",
"fullName": "Bum Chul Kwon",
"affiliation": "Sch. of Ind. Eng., Purdue Univ., West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Seungyoon Lee",
"fullName": "Seungyoon Lee",
"affiliation": "Brian Lamb Sch. of Commun., Purdue Univ., West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Ji Soo Yi",
"fullName": "Ji Soo Yi",
"affiliation": "Sch. of Ind. Eng., Purdue Univ., West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Niklas",
"surname": "Elmqvist",
"fullName": "Niklas Elmqvist",
"affiliation": "Sch. of Electr. & Comput. Eng., Purdue Univ., West Lafayette, IN, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2032-2041",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cts/2016/2300/0/07870996",
"title": "Be the Data: Social Meetings with Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/cts/2016/07870996/12OmNAnMuEx",
"parentPublication": {
"id": "proceedings/cts/2016/2300/0",
"title": "2016 International Conference on Collaboration Technologies and Systems (CTS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2011/4406/0/4406a273",
"title": "Extraction Distractions: A Comparison of Social Network Model Construction Methods",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2011/4406a273/12OmNBOllh1",
"parentPublication": {
"id": "proceedings/eisic/2011/4406/0",
"title": "European Intelligence and Security Informatics Conference",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2014/2504/0/2504b364",
"title": "Studying Animation for Real-Time Visual Analytics: A Design Study of Social Media Analytics in Emergency Management",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2014/2504b364/12OmNBrlPzK",
"parentPublication": {
"id": "proceedings/hicss/2014/2504/0",
"title": "2014 47th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cse/2009/3823/4/3823e332",
"title": "First Steps to Netviz Nirvana: Evaluating Social Network Analysis with NodeXL",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2009/3823e332/12OmNwoPtjX",
"parentPublication": {
"id": "proceedings/cse/2009/3823/2",
"title": "2009 International Conference on Computational Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2013/04/mcg2013040088",
"title": "Visual Matrix Clustering of Social Networks",
"doi": null,
"abstractUrl": "/magazine/cg/2013/04/mcg2013040088/13rRUx0xPCH",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/04/mcg2012040063",
"title": "The Top 10 Challenges in Extreme-Scale Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2012/04/mcg2012040063/13rRUxC0SGA",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585638",
"title": "E-Map: A Visual Analytics Approach for Exploring Significant Event Evolutions in Social Media",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585638/17D45WrVg7l",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805463",
"title": "VASSL: A Visual Analytics Toolkit for Social Spambot Labeling",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805463/1cG4JtVNK2A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a216",
"title": "Compositional Microservices for Immersive Social Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a216/1cMFalENINq",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ldav/2019/2605/0/08944264",
"title": "VAMD: Visual Analytics for Multimodal Data",
"doi": null,
"abstractUrl": "/proceedings-article/ldav/2019/08944264/1grOFlKsRPO",
"parentPublication": {
"id": "proceedings/ldav/2019/2605/0",
"title": "2019 IEEE 9th Symposium on Large Data Analysis and Visualization (LDAV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122022",
"articleId": "13rRUwhHcJh",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122042",
"articleId": "13rRUxNW1TS",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxNW1TS",
"doi": "10.1109/TVCG.2013.157",
"abstract": "This paper introduces an approach to exploration and discovery in high-dimensional data that incorporates a user's knowledge and questions to craft sets of projection functions meaningful to them. Unlike most prior work that defines projections based on their statistical properties, our approach creates projection functions that align with user-specified annotations. Therefore, the resulting derived dimensions represent concepts defined by the user's examples. These especially crafted projection functions, or explainers, can help find and explain relationships between the data variables and user-designated concepts. They can organize the data according to these concepts. Sets of explainers can provide multiple perspectives on the data. Our approach considers tradeoffs in choosing these projection functions, including their simplicity, expressive power, alignment with prior knowledge, and diversity. We provide techniques for creating collections of explainers. The methods, based on machine learning optimization frameworks, allow exploring the tradeoffs. We demonstrate our approach on model problems and applications in text analysis.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This paper introduces an approach to exploration and discovery in high-dimensional data that incorporates a user's knowledge and questions to craft sets of projection functions meaningful to them. Unlike most prior work that defines projections based on their statistical properties, our approach creates projection functions that align with user-specified annotations. Therefore, the resulting derived dimensions represent concepts defined by the user's examples. These especially crafted projection functions, or explainers, can help find and explain relationships between the data variables and user-designated concepts. They can organize the data according to these concepts. Sets of explainers can provide multiple perspectives on the data. Our approach considers tradeoffs in choosing these projection functions, including their simplicity, expressive power, alignment with prior knowledge, and diversity. We provide techniques for creating collections of explainers. The methods, based on machine learning optimization frameworks, allow exploring the tradeoffs. We demonstrate our approach on model problems and applications in text analysis.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This paper introduces an approach to exploration and discovery in high-dimensional data that incorporates a user's knowledge and questions to craft sets of projection functions meaningful to them. Unlike most prior work that defines projections based on their statistical properties, our approach creates projection functions that align with user-specified annotations. Therefore, the resulting derived dimensions represent concepts defined by the user's examples. These especially crafted projection functions, or explainers, can help find and explain relationships between the data variables and user-designated concepts. They can organize the data according to these concepts. Sets of explainers can provide multiple perspectives on the data. Our approach considers tradeoffs in choosing these projection functions, including their simplicity, expressive power, alignment with prior knowledge, and diversity. We provide techniques for creating collections of explainers. The methods, based on machine learning optimization frameworks, allow exploring the tradeoffs. We demonstrate our approach on model problems and applications in text analysis.",
"title": "Explainers: Expert Explorations with Crafted Projections",
"normalizedTitle": "Explainers: Expert Explorations with Crafted Projections",
"fno": "ttg2013122042",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Cities And Towns",
"Support Vector Machines",
"Text Mining",
"Optimization",
"Quantization Signal",
"Exploration",
"Cities And Towns",
"Support Vector Machines",
"Text Mining",
"Optimization",
"Quantization Signal",
"Support Vector Machines",
"High Dimensional Spaces"
],
"authors": [
{
"givenName": "Michael",
"surname": "Gleicher",
"fullName": "Michael Gleicher",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin - Madison, Madison, WI, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2042-2051",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/mdm/2013/4973/2/4973b073",
"title": "Location Sharing with Trusted Peers -- Measuring Sensitivity of Location Observations",
"doi": null,
"abstractUrl": "/proceedings-article/mdm/2013/4973b073/12OmNApcuqo",
"parentPublication": {
"id": "proceedings/mdm/2013/4973/2",
"title": "2013 IEEE 14th International Conference on Mobile Data Management",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibmw/2011/1612/0/06112400",
"title": "Protein conformational search with geometric projections",
"doi": null,
"abstractUrl": "/proceedings-article/bibmw/2011/06112400/12OmNBigFmH",
"parentPublication": {
"id": "proceedings/bibmw/2011/1612/0",
"title": "2011 IEEE International Conference on Bioinformatics and Biomedicine Workshops (BIBMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/tools/1997/8383/0/83830048",
"title": "The translator pattern-external functionality with homomorphic mappings",
"doi": null,
"abstractUrl": "/proceedings-article/tools/1997/83830048/12OmNvqEvPI",
"parentPublication": {
"id": "proceedings/tools/1997/8383/0",
"title": "Technology of Object-Oriented Languages, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/arith/1989/8963/0/00072825",
"title": "Cascade: hardware for high/variable precision arithmetic",
"doi": null,
"abstractUrl": "/proceedings-article/arith/1989/00072825/12OmNwudQMD",
"parentPublication": {
"id": "proceedings/arith/1989/8963/0",
"title": "9th Symposium on Computer Arithmetic",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2005/2415/0/24150379",
"title": "Biological Ontology Enhancement with Fuzzy Relations: A Text-Mining Framework",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2005/24150379/12OmNxwENuO",
"parentPublication": {
"id": "proceedings/wi/2005/2415/0",
"title": "Proceedings. The 2005 IEEE/WIC/ACM International Conference on Web Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/dexa/2015/7581/0/07406299",
"title": "Distance Based Queries in Open Street Map",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2015/07406299/12OmNyFCvQ2",
"parentPublication": {
"id": "proceedings/dexa/2015/7581/0",
"title": "2015 26th International Workshop on Database and Expert Systems Applications (DEXA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/fie/2011/468/0/06143050",
"title": "First experiences with eBlocks as an assistive technology for individuals with autistic spectrum condition",
"doi": null,
"abstractUrl": "/proceedings-article/fie/2011/06143050/12OmNyFU77S",
"parentPublication": {
"id": "proceedings/fie/2011/468/0",
"title": "2011 Frontiers in Education Conference (FIE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/1993/4960/0/00395775",
"title": "A block segmentation method for document images with complicated column structures",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/1993/00395775/12OmNyRg4tQ",
"parentPublication": {
"id": "proceedings/icdar/1993/4960/0",
"title": "Proceedings of 2nd International Conference on Document Analysis and Recognition (ICDAR '93)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icedeg/2015/8910/0/07114459",
"title": "Use of geographic information systems with open source solutions, an approach to access edemocracy & egovernment",
"doi": null,
"abstractUrl": "/proceedings-article/icedeg/2015/07114459/12OmNyS6RBq",
"parentPublication": {
"id": "proceedings/icedeg/2015/8910/0",
"title": "2015 Second International Conference on eDemocracy & eGovernment (ICEDEG)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2018/9264/0/926400a392",
"title": "Semi-Supervised Learning with Interactive Label Propagation Guided by Feature Space Projections",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2018/926400a392/17D45WaTkgw",
"parentPublication": {
"id": "proceedings/sibgrapi/2018/9264/0",
"title": "2018 31st SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122032",
"articleId": "13rRUyuegp5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122052",
"articleId": "13rRUxBJhFw",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBJhFw",
"doi": "10.1109/TVCG.2013.188",
"abstract": "When high-dimensional data is visualized in a 2D plane by using parametric projection algorithms, users may wish to manipulate the layout of the data points to better reflect their domain knowledge or to explore alternative structures. However, few users are well-versed in the algorithms behind the visualizations, making parameter tweaking more of a guessing game than a series of decisive interactions. Translating user interactions into algorithmic input is a key component of Visual to Parametric Interaction (V2PI) [13]. Instead of adjusting parameters, users directly move data points on the screen, which then updates the underlying statistical model. However, we have found that some data points that are not moved by the user are just as important in the interactions as the data points that are moved. Users frequently move some data points with respect to some other 'unmoved' data points that they consider as spatially contextual. However, in current V2PI interactions, these points are not explicitly identified when directly manipulating the moved points. We design a richer set of interactions that makes this context more explicit, and a new algorithm and sophisticated weighting scheme that incorporates the importance of these unmoved data points into V2PI.",
"abstracts": [
{
"abstractType": "Regular",
"content": "When high-dimensional data is visualized in a 2D plane by using parametric projection algorithms, users may wish to manipulate the layout of the data points to better reflect their domain knowledge or to explore alternative structures. However, few users are well-versed in the algorithms behind the visualizations, making parameter tweaking more of a guessing game than a series of decisive interactions. Translating user interactions into algorithmic input is a key component of Visual to Parametric Interaction (V2PI) [13]. Instead of adjusting parameters, users directly move data points on the screen, which then updates the underlying statistical model. However, we have found that some data points that are not moved by the user are just as important in the interactions as the data points that are moved. Users frequently move some data points with respect to some other 'unmoved' data points that they consider as spatially contextual. However, in current V2PI interactions, these points are not explicitly identified when directly manipulating the moved points. We design a richer set of interactions that makes this context more explicit, and a new algorithm and sophisticated weighting scheme that incorporates the importance of these unmoved data points into V2PI.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "When high-dimensional data is visualized in a 2D plane by using parametric projection algorithms, users may wish to manipulate the layout of the data points to better reflect their domain knowledge or to explore alternative structures. However, few users are well-versed in the algorithms behind the visualizations, making parameter tweaking more of a guessing game than a series of decisive interactions. Translating user interactions into algorithmic input is a key component of Visual to Parametric Interaction (V2PI) [13]. Instead of adjusting parameters, users directly move data points on the screen, which then updates the underlying statistical model. However, we have found that some data points that are not moved by the user are just as important in the interactions as the data points that are moved. Users frequently move some data points with respect to some other 'unmoved' data points that they consider as spatially contextual. However, in current V2PI interactions, these points are not explicitly identified when directly manipulating the moved points. We design a richer set of interactions that makes this context more explicit, and a new algorithm and sophisticated weighting scheme that incorporates the importance of these unmoved data points into V2PI.",
"title": "Semantics of Directly Manipulating Spatializations",
"normalizedTitle": "Semantics of Directly Manipulating Spatializations",
"fno": "ttg2013122052",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Cognitive Science",
"Mathematical Model",
"Algorithm Design And Analysis",
"Semantics",
"Visual Analytics",
"Data Visualization",
"Cognitive Science",
"Mathematical Model",
"Algorithm Design And Analysis",
"Semantics",
"Statistical Models",
"Visual To Parametric Interaction"
],
"authors": [
{
"givenName": null,
"surname": "Xinran Hu",
"fullName": "Xinran Hu",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Lauren",
"surname": "Bradel",
"fullName": "Lauren Bradel",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Dipayan",
"surname": "Maiti",
"fullName": "Dipayan Maiti",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Leanna",
"surname": "House",
"fullName": "Leanna House",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "North",
"fullName": "Chris North",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Scotland",
"surname": "Leman",
"fullName": "Scotland Leman",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2052-2059",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cedem/2016/1042/0/07781926",
"title": "Supporting Cognition in the Face of Political Data and Discourse: A Mental Models Perspective on Designing Information Visualization Systems",
"doi": null,
"abstractUrl": "/proceedings-article/cedem/2016/07781926/12OmNALlcht",
"parentPublication": {
"id": "proceedings/cedem/2016/1042/0",
"title": "2016 Conference for E-Democracy and Open Government (CeDEM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom/2011/2135/0/06120823",
"title": "Semantics-Enhanced Privacy Recommendation for Social Networking Sites",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom/2011/06120823/12OmNwNOaMD",
"parentPublication": {
"id": "proceedings/trustcom/2011/2135/0",
"title": "2011IEEE 10th International Conference on Trust, Security and Privacy in Computing and Communications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdar/2013/4999/0/06628715",
"title": "Bringing Semantics in Word Image Retrieval",
"doi": null,
"abstractUrl": "/proceedings-article/icdar/2013/06628715/12OmNx0RIMg",
"parentPublication": {
"id": "proceedings/icdar/2013/4999/0",
"title": "2013 12th International Conference on Document Analysis and Recognition (ICDAR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lab-rs/2008/3272/0/3272a077",
"title": "Using Cognitive Semantics to Integrate Perception and Motion in a Behavior-Based Robot",
"doi": null,
"abstractUrl": "/proceedings-article/lab-rs/2008/3272a077/12OmNx3q6WW",
"parentPublication": {
"id": "proceedings/lab-rs/2008/3272/0",
"title": "Learning and Adaptive Behaviors for Robotic Systems, ECSIS Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08618426",
"title": "Hierarchical Image Semantics Using Probabilistic Path Propagations for Biomedical Research",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08618426/17D45WwsQ6K",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2018/9288/0/928800b493",
"title": "Development of Mental Model in Understanding Users' Thought Processes for the Evaluation and Functional Enhancement of Clinical Decision Support Systems",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2018/928800b493/18jXDdLQc3C",
"parentPublication": {
"id": "proceedings/icdmw/2018/9288/0",
"title": "2018 IEEE International Conference on Data Mining Workshops (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09020101",
"title": "Characterizing the Quality of Insight by Interactions: A Case Study",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09020101/1hS2LZ6csyQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2020/7303/0/730300a892",
"title": "Smart Contracts Vulnerability Auditing with Multi-semantics",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2020/730300a892/1nkDlG8I9tS",
"parentPublication": {
"id": "proceedings/compsac/2020/7303/0",
"title": "2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2020/9642/0/964200a038",
"title": "Micro-entries: Encouraging Deeper Evaluation of Mental Models Over Time for Interactive Data Systems",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2020/964200a038/1q0FNKyBkE8",
"parentPublication": {
"id": "proceedings/beliv/2020/9642/0",
"title": "2020 IEEE Workshop on Evaluation and Beyond - Methodological Approaches to Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2020/8009/0/800900a060",
"title": "Visual Abstraction of Geographical Point Data with Spatial Autocorrelations",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2020/800900a060/1q7jw7xKEh2",
"parentPublication": {
"id": "proceedings/vast/2020/8009/0",
"title": "2020 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122042",
"articleId": "13rRUxNW1TS",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122060",
"articleId": "13rRUxBa5xi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5xi",
"doi": "10.1109/TVCG.2013.190",
"abstract": "High-dimensional data visualization has been attracting much attention. To fully test related software and algorithms, researchers require a diverse pool of data with known and desired features. Test data do not always provide this, or only partially. Here we propose the paradigm WYDIWYGS (What You Draw Is What You Get). Its embodiment, SketchPadND, is a tool that allows users to generate high-dimensional data in the same interface they also use for visualization. This provides for an immersive and direct data generation activity, and furthermore it also enables users to interactively edit and clean existing high-dimensional data from possible artifacts. SketchPadND offers two visualization paradigms, one based on parallel coordinates and the other based on a relatively new framework using an N-D polygon to navigate in high-dimensional space. The first interface allows users to draw arbitrary profiles of probability density functions along each dimension axis and sketch shapes for data density and connections between adjacent dimensions. The second interface embraces the idea of sculpting. Users can carve data at arbitrary orientations and refine them wherever necessary. This guarantees that the data generated is truly high-dimensional. We demonstrate our tool's usefulness in real data visualization scenarios.",
"abstracts": [
{
"abstractType": "Regular",
"content": "High-dimensional data visualization has been attracting much attention. To fully test related software and algorithms, researchers require a diverse pool of data with known and desired features. Test data do not always provide this, or only partially. Here we propose the paradigm WYDIWYGS (What You Draw Is What You Get). Its embodiment, SketchPadND, is a tool that allows users to generate high-dimensional data in the same interface they also use for visualization. This provides for an immersive and direct data generation activity, and furthermore it also enables users to interactively edit and clean existing high-dimensional data from possible artifacts. SketchPadND offers two visualization paradigms, one based on parallel coordinates and the other based on a relatively new framework using an N-D polygon to navigate in high-dimensional space. The first interface allows users to draw arbitrary profiles of probability density functions along each dimension axis and sketch shapes for data density and connections between adjacent dimensions. The second interface embraces the idea of sculpting. Users can carve data at arbitrary orientations and refine them wherever necessary. This guarantees that the data generated is truly high-dimensional. We demonstrate our tool's usefulness in real data visualization scenarios.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "High-dimensional data visualization has been attracting much attention. To fully test related software and algorithms, researchers require a diverse pool of data with known and desired features. Test data do not always provide this, or only partially. Here we propose the paradigm WYDIWYGS (What You Draw Is What You Get). Its embodiment, SketchPadND, is a tool that allows users to generate high-dimensional data in the same interface they also use for visualization. This provides for an immersive and direct data generation activity, and furthermore it also enables users to interactively edit and clean existing high-dimensional data from possible artifacts. SketchPadND offers two visualization paradigms, one based on parallel coordinates and the other based on a relatively new framework using an N-D polygon to navigate in high-dimensional space. The first interface allows users to draw arbitrary profiles of probability density functions along each dimension axis and sketch shapes for data density and connections between adjacent dimensions. The second interface embraces the idea of sculpting. Users can carve data at arbitrary orientations and refine them wherever necessary. This guarantees that the data generated is truly high-dimensional. We demonstrate our tool's usefulness in real data visualization scenarios.",
"title": "SketchPadN-D: WYDIWYG Sculpting and Editing in High-Dimensional Space",
"normalizedTitle": "SketchPadN-D: WYDIWYG Sculpting and Editing in High-Dimensional Space",
"fno": "ttg2013122060",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Shape Analysis",
"Image Color Analysis",
"N D Navigation",
"Data Visualization",
"Shape Analysis",
"Image Color Analysis",
"Multiple Views",
"Synthetic Data Generation",
"Data Editing",
"Data Acquisition And Management",
"Multivariate Data",
"High Dimensional Data",
"Interaction",
"User Interface",
"Parallel Coordinates",
"Scatterplot"
],
"authors": [
{
"givenName": null,
"surname": "Bing Wang",
"fullName": "Bing Wang",
"affiliation": "Comput. Sci. Dept., Stony Brook Univ., Stony Brook, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Puripant",
"surname": "Ruchikachorn",
"fullName": "Puripant Ruchikachorn",
"affiliation": "Comput. Sci. Dept., Stony Brook Univ., Stony Brook, NY, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Klaus",
"surname": "Mueller",
"fullName": "Klaus Mueller",
"affiliation": "Comput. Sci. Dept., Stony Brook Univ., Stony Brook, NY, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2060-2069",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400527",
"title": "SemanticPrism: A multi-aspect view of large high-dimensional data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400527/12OmNxEjY8U",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/itme/2016/3906/0/3906a011",
"title": "A Fast GPU Based High-Quality Three-Dimensional Visualization Method",
"doi": null,
"abstractUrl": "/proceedings-article/itme/2016/3906a011/12OmNy87Qyq",
"parentPublication": {
"id": "proceedings/itme/2016/3906/0",
"title": "2016 8th International Conference on Information Technology in Medicine and Education (ITME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/infvis/2005/9464/0/01532144",
"title": "Two-tone pseudo coloring: compact visualization for one-dimensional data",
"doi": null,
"abstractUrl": "/proceedings-article/infvis/2005/01532144/12OmNzxPTPJ",
"parentPublication": {
"id": "proceedings/infvis/2005/9464/0",
"title": "IEEE Symposium on Information Visualization (InfoVis 05)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/12/ttg2011122053",
"title": "Evaluation of Trend Localization with Multi-Variate Visualizations",
"doi": null,
"abstractUrl": "/journal/tg/2011/12/ttg2011122053/13rRUxC0SEf",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cw/2022/6814/0/681400a122",
"title": "An Extended Scatterplot Selection Technique for Representing Three Numeric Variables",
"doi": null,
"abstractUrl": "/proceedings-article/cw/2022/681400a122/1I6RMxpWlLG",
"parentPublication": {
"id": "proceedings/cw/2022/6814/0",
"title": "2022 International Conference on Cyberworlds (CW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2019/2838/0/283800a106",
"title": "Viewpoint Selection for Shape Comparison of Mode Water Regions in a VR Space",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2019/283800a106/1cMF9J2MPLi",
"parentPublication": {
"id": "proceedings/iv/2019/2838/0",
"title": "2019 23rd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933709",
"title": "Hi-D Maps: An Interactive Visualization Technique for Multi-Dimensional Categorical Data",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933709/1fTgGSW14Q0",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/04/08967136",
"title": "Glyphboard: Visual Exploration of High-Dimensional Data Combining Glyphs with Dimensionality Reduction",
"doi": null,
"abstractUrl": "/journal/tg/2020/04/08967136/1gPjxXgWQM0",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2005/2790/0/01532144",
"title": "Two-tone pseudo coloring: compact visualization for one-dimensional data",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2005/01532144/1h0FwMMLAmk",
"parentPublication": {
"id": "proceedings/ieee-infovis/2005/2790/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2020/5697/0/09086210",
"title": "Representing Multivariate Data by Optimal Colors to Uncover Events of Interest in Time Series Data",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2020/09086210/1kuHmMXxUmA",
"parentPublication": {
"id": "proceedings/pacificvis/2020/5697/0",
"title": "2020 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122052",
"articleId": "13rRUxBJhFw",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122070",
"articleId": "13rRUyYSWsW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRHf",
"name": "ttg2013122060s.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122060s.mov",
"extension": "mov",
"size": "48.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYSWsW",
"doi": "10.1109/TVCG.2013.220",
"abstract": "Visual exploration and analysis of multidimensional data becomes increasingly difficult with increasing dimensionality. We want to understand the relationships between dimensions of data, but lack flexible techniques for exploration beyond low-order relationships. Current visual techniques for multidimensional data analysis focus on binary conjunctive relationships between dimensions. Recent techniques, such as cross-filtering on an attribute relationship graph, facilitate the exploration of some higher-order conjunctive relationships, but require a great deal of care and precision to do so effectively. This paper provides a detailed analysis of the expressive power of existing visual querying systems and describes a more flexible approach in which users can explore n-ary conjunctive inter- and intra- dimensional relationships by interactively constructing queries as visual hypergraphs. In a hypergraph query, nodes represent subsets of values and hyperedges represent conjunctive relationships. Analysts can dynamically build and modify the query using sequences of simple interactions. The hypergraph serves not only as a query specification, but also as a compact visual representation of the interactive state. Using examples from several domains, focusing on the digital humanities, we describe the design considerations for developing the querying system and incorporating it into visual analysis tools. We analyze query expressiveness with regard to the kinds of questions it can and cannot pose, and describe how it simultaneously expands the expressiveness of and is complemented by cross-filtering.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visual exploration and analysis of multidimensional data becomes increasingly difficult with increasing dimensionality. We want to understand the relationships between dimensions of data, but lack flexible techniques for exploration beyond low-order relationships. Current visual techniques for multidimensional data analysis focus on binary conjunctive relationships between dimensions. Recent techniques, such as cross-filtering on an attribute relationship graph, facilitate the exploration of some higher-order conjunctive relationships, but require a great deal of care and precision to do so effectively. This paper provides a detailed analysis of the expressive power of existing visual querying systems and describes a more flexible approach in which users can explore n-ary conjunctive inter- and intra- dimensional relationships by interactively constructing queries as visual hypergraphs. In a hypergraph query, nodes represent subsets of values and hyperedges represent conjunctive relationships. Analysts can dynamically build and modify the query using sequences of simple interactions. The hypergraph serves not only as a query specification, but also as a compact visual representation of the interactive state. Using examples from several domains, focusing on the digital humanities, we describe the design considerations for developing the querying system and incorporating it into visual analysis tools. We analyze query expressiveness with regard to the kinds of questions it can and cannot pose, and describe how it simultaneously expands the expressiveness of and is complemented by cross-filtering.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visual exploration and analysis of multidimensional data becomes increasingly difficult with increasing dimensionality. We want to understand the relationships between dimensions of data, but lack flexible techniques for exploration beyond low-order relationships. Current visual techniques for multidimensional data analysis focus on binary conjunctive relationships between dimensions. Recent techniques, such as cross-filtering on an attribute relationship graph, facilitate the exploration of some higher-order conjunctive relationships, but require a great deal of care and precision to do so effectively. This paper provides a detailed analysis of the expressive power of existing visual querying systems and describes a more flexible approach in which users can explore n-ary conjunctive inter- and intra- dimensional relationships by interactively constructing queries as visual hypergraphs. In a hypergraph query, nodes represent subsets of values and hyperedges represent conjunctive relationships. Analysts can dynamically build and modify the query using sequences of simple interactions. The hypergraph serves not only as a query specification, but also as a compact visual representation of the interactive state. Using examples from several domains, focusing on the digital humanities, we describe the design considerations for developing the querying system and incorporating it into visual analysis tools. We analyze query expressiveness with regard to the kinds of questions it can and cannot pose, and describe how it simultaneously expands the expressiveness of and is complemented by cross-filtering.",
"title": "Visual Analysis of Higher-Order Conjunctive Relationships in Multidimensional Data Using a Hypergraph Query System",
"normalizedTitle": "Visual Analysis of Higher-Order Conjunctive Relationships in Multidimensional Data Using a Hypergraph Query System",
"fno": "ttg2013122070",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Data Analysis",
"Data Visualization",
"Marine Vehicles",
"Semantics",
"Database Languages",
"Visual Query Language",
"Visual Analytics",
"Data Analysis",
"Data Visualization",
"Marine Vehicles",
"Semantics",
"Database Languages",
"Digital Humanities",
"Graph Search",
"Graph Query Language",
"Multidimensional Data",
"Attribute Relationship Graphs",
"Multivariate Data Analysis",
"Higher Order Conjunctive Queries"
],
"authors": [
{
"givenName": "Rachel",
"surname": "Shadoan",
"fullName": "Rachel Shadoan",
"affiliation": "Akashic Labs. LLC, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "Weaver",
"fullName": "Chris Weaver",
"affiliation": "Sch. of Comput. Sci., Univ. of Oklahoma, Norman, OK, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2070-2079",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/wiw/2016/4771/0/6039a112",
"title": "Double-Hypergraph Based Sentence Ranking for Query-Focused Multi-document Summarizaton",
"doi": null,
"abstractUrl": "/proceedings-article/wiw/2016/6039a112/12OmNC1Gug8",
"parentPublication": {
"id": "proceedings/wiw/2016/4771/0",
"title": "2016 IEEE/WIC/ACM International Conference on Web Intelligence Workshops (WIW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/focs/1998/9172/0/91720706",
"title": "The Complexity of Acyclic Conjunctive Queries",
"doi": null,
"abstractUrl": "/proceedings-article/focs/1998/91720706/12OmNvAiSFU",
"parentPublication": {
"id": "proceedings/focs/1998/9172/0",
"title": "Proceedings 39th Annual Symposium on Foundations of Computer Science (Cat. No.98CB36280)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigcomp/2018/3649/0/364901a739",
"title": "Efficient Searching of Subhypergraph Isomorphism in Hypergraph Databases",
"doi": null,
"abstractUrl": "/proceedings-article/bigcomp/2018/364901a739/12OmNwGZNNR",
"parentPublication": {
"id": "proceedings/bigcomp/2018/3649/0",
"title": "2018 IEEE International Conference on Big Data and Smart Computing (BigComp)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdc/2014/1897/0/1897a035",
"title": "GeoLens: Enabling Interactive Visual Analytics over Large-Scale, Multidimensional Geospatial Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/bdc/2014/1897a035/12OmNzwHvid",
"parentPublication": {
"id": "proceedings/bdc/2014/1897/0",
"title": "2014 IEEE/ACM International Symposium on Big Data Computing (BDC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009060929",
"title": "Conjunctive Visual Forms",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009060929/13rRUxD9gXA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2008/06/ttg2008061141",
"title": "Rolling the Dice: Multidimensional Visual Exploration using Scatterplot Matrix Navigation",
"doi": null,
"abstractUrl": "/journal/tg/2008/06/ttg2008061141/13rRUyuegp1",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2021/0126/0/09669280",
"title": "A Simplex Hypergraph Clustering Method for Detecting Higher-order Modules in Microbial Network",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2021/09669280/1A9WfD2dvkk",
"parentPublication": {
"id": "proceedings/bibm/2021/0126/0",
"title": "2021 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09729550",
"title": "Visual Exploration of Relationships and Structure in Low-Dimensional Embeddings",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09729550/1Bya8LDahDa",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2022/04/09099985",
"title": "LBSN2Vec++: Heterogeneous Hypergraph Embedding for Location-Based Social Networks",
"doi": null,
"abstractUrl": "/journal/tk/2022/04/09099985/1k93iAmLWzS",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09508898",
"title": "Towards Systematic Design Considerations for Visualizing Cross-View Data Relationships",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09508898/1vQzkzRdSWk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122060",
"articleId": "13rRUxBa5xi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122080",
"articleId": "13rRUwI5U2F",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesZ5",
"name": "ttg2013122070s.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122070s.mov",
"extension": "mov",
"size": "4.86 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwI5U2F",
"doi": "10.1109/TVCG.2013.167",
"abstract": "Many datasets, such as scientific literature collections, contain multiple heterogeneous facets which derive implicit relations, as well as explicit relational references between data items. The exploration of this data is challenging not only because of large data scales but also the complexity of resource structures and semantics. In this paper, we present PivotSlice, an interactive visualization technique which provides efficient faceted browsing as well as flexible capabilities to discover data relationships. With the metaphor of direct manipulation, PivotSlice allows the user to visually and logically construct a series of dynamic queries over the data, based on a multi-focus and multi-scale tabular view that subdivides the entire dataset into several meaningful parts with customized semantics. PivotSlice further facilitates the visual exploration and sensemaking process through features including live search and integration of online data, graphical interaction histories and smoothly animated visual state transitions. We evaluated PivotSlice through a qualitative lab study with university researchers and report the findings from our observations and interviews. We also demonstrate the effectiveness of PivotSlice using a scenario of exploring a repository of information visualization literature.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many datasets, such as scientific literature collections, contain multiple heterogeneous facets which derive implicit relations, as well as explicit relational references between data items. The exploration of this data is challenging not only because of large data scales but also the complexity of resource structures and semantics. In this paper, we present PivotSlice, an interactive visualization technique which provides efficient faceted browsing as well as flexible capabilities to discover data relationships. With the metaphor of direct manipulation, PivotSlice allows the user to visually and logically construct a series of dynamic queries over the data, based on a multi-focus and multi-scale tabular view that subdivides the entire dataset into several meaningful parts with customized semantics. PivotSlice further facilitates the visual exploration and sensemaking process through features including live search and integration of online data, graphical interaction histories and smoothly animated visual state transitions. We evaluated PivotSlice through a qualitative lab study with university researchers and report the findings from our observations and interviews. We also demonstrate the effectiveness of PivotSlice using a scenario of exploring a repository of information visualization literature.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many datasets, such as scientific literature collections, contain multiple heterogeneous facets which derive implicit relations, as well as explicit relational references between data items. The exploration of this data is challenging not only because of large data scales but also the complexity of resource structures and semantics. In this paper, we present PivotSlice, an interactive visualization technique which provides efficient faceted browsing as well as flexible capabilities to discover data relationships. With the metaphor of direct manipulation, PivotSlice allows the user to visually and logically construct a series of dynamic queries over the data, based on a multi-focus and multi-scale tabular view that subdivides the entire dataset into several meaningful parts with customized semantics. PivotSlice further facilitates the visual exploration and sensemaking process through features including live search and integration of online data, graphical interaction histories and smoothly animated visual state transitions. We evaluated PivotSlice through a qualitative lab study with university researchers and report the findings from our observations and interviews. We also demonstrate the effectiveness of PivotSlice using a scenario of exploring a repository of information visualization literature.",
"title": "Interactive Exploration of Implicit and Explicit Relations in Faceted Datasets",
"normalizedTitle": "Interactive Exploration of Implicit and Explicit Relations in Faceted Datasets",
"fno": "ttg2013122080",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Market Research",
"Faceted Searches",
"Information Filters",
"Information Visualization",
"Data Visualization",
"Market Research",
"Faceted Searches",
"Information Filters",
"Visual Analytics",
"Faceted Browsing",
"Network Exploration",
"Dynamic Query",
"Interaction"
],
"authors": [
{
"givenName": null,
"surname": "Jian Zhao",
"fullName": "Jian Zhao",
"affiliation": "Univ. of Toronto, Toronto, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christopher",
"surname": "Collins",
"fullName": "Christopher Collins",
"affiliation": "Univ. of Ontario Inst. of Technol., Oshawa, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Fanny",
"surname": "Chevalier",
"fullName": "Fanny Chevalier",
"affiliation": "Univ. of Toronto, Toronto, ON, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ravin",
"surname": "Balakrishnan",
"fullName": "Ravin Balakrishnan",
"affiliation": "Univ. of Toronto, Toronto, ON, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2080-2089",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icde/2007/0802/0/04221725",
"title": "Updating Recursive XML Views of Relations",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2007/04221725/12OmNBpEeUf",
"parentPublication": {
"id": "proceedings/icde/2007/0802/0",
"title": "2007 IEEE 23rd International Conference on Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2005/2415/0/24150557",
"title": "Multi-Faceted Information Retrieval System for Large Scale Email Archives",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2005/24150557/12OmNzhELfJ",
"parentPublication": {
"id": "proceedings/wi/2005/2415/0",
"title": "Proceedings. The 2005 IEEE/WIC/ACM International Conference on Web Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192685",
"title": "CiteRivers: Visual Analytics of Citation Patterns",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192685/13rRUwd9CG5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/so/2014/05/mso2014050052",
"title": "Programming with Implicit Flows",
"doi": null,
"abstractUrl": "/magazine/so/2014/05/mso2014050052/13rRUwhpBMz",
"parentPublication": {
"id": "mags/so",
"title": "IEEE Software",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122709",
"title": "PivotPaths: Strolling through Faceted Information Spaces",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122709/13rRUxASuGi",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2003/2055/0/01249015",
"title": "Interactive hierarchical dimension ordering, spacing and filtering for exploration of high dimensional datasets",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2003/01249015/18M76LncjTO",
"parentPublication": {
"id": "proceedings/ieee-infovis/2003/2055/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/08/09020101",
"title": "Characterizing the Quality of Insight by Interactions: A Case Study",
"doi": null,
"abstractUrl": "/journal/tg/2021/08/09020101/1hS2LZ6csyQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101621",
"title": "Visualization Systems for Linked Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101621/1kaMBtV8mIg",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222353",
"title": "Implicit Multidimensional Projection of Local Subspaces",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222353/1nTqcxPMEIE",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icnisc/2021/0232/0/023200a536",
"title": "A Numerical Real Time Web Tracking and Scrapping Strategy Applied to Analysing COVID-19 Datasets",
"doi": null,
"abstractUrl": "/proceedings-article/icnisc/2021/023200a536/1yLPr021RjW",
"parentPublication": {
"id": "proceedings/icnisc/2021/0232/0",
"title": "2021 7th Annual International Conference on Network and Information Systems for Computers (ICNISC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122070",
"articleId": "13rRUyYSWsW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122090",
"articleId": "13rRUyoPSP5",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgCI",
"name": "ttg2013122080s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122080s.mp4",
"extension": "mp4",
"size": "40.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyoPSP5",
"doi": "10.1109/TVCG.2013.213",
"abstract": "Scientists, engineers, and analysts are confronted with ever larger and more complex sets of data, whose analysis poses special challenges. In many situations it is necessary to compare two or more datasets. Hence there is a need for comparative visualization tools to help analyze differences or similarities among datasets. In this paper an approach for comparative visualization for sets of images is presented. Well-established techniques for comparing images frequently place them side-by-side. A major drawback of such approaches is that they do not scale well. Other image comparison methods encode differences in images by abstract parameters like color. In this case information about the underlying image data gets lost. This paper introduces a new method for visualizing differences and similarities in large sets of images which preserves contextual information, but also allows the detailed analysis of subtle variations. Our approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this process are then presented in an interactive web application which allows users to rapidly explore the space of differences and drill-down on particular features. We demonstrate the flexibility of our approach by applying it to multiple distinct domains.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Scientists, engineers, and analysts are confronted with ever larger and more complex sets of data, whose analysis poses special challenges. In many situations it is necessary to compare two or more datasets. Hence there is a need for comparative visualization tools to help analyze differences or similarities among datasets. In this paper an approach for comparative visualization for sets of images is presented. Well-established techniques for comparing images frequently place them side-by-side. A major drawback of such approaches is that they do not scale well. Other image comparison methods encode differences in images by abstract parameters like color. In this case information about the underlying image data gets lost. This paper introduces a new method for visualizing differences and similarities in large sets of images which preserves contextual information, but also allows the detailed analysis of subtle variations. Our approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this process are then presented in an interactive web application which allows users to rapidly explore the space of differences and drill-down on particular features. We demonstrate the flexibility of our approach by applying it to multiple distinct domains.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Scientists, engineers, and analysts are confronted with ever larger and more complex sets of data, whose analysis poses special challenges. In many situations it is necessary to compare two or more datasets. Hence there is a need for comparative visualization tools to help analyze differences or similarities among datasets. In this paper an approach for comparative visualization for sets of images is presented. Well-established techniques for comparing images frequently place them side-by-side. A major drawback of such approaches is that they do not scale well. Other image comparison methods encode differences in images by abstract parameters like color. In this case information about the underlying image data gets lost. This paper introduces a new method for visualizing differences and similarities in large sets of images which preserves contextual information, but also allows the detailed analysis of subtle variations. Our approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this process are then presented in an interactive web application which allows users to rapidly explore the space of differences and drill-down on particular features. We demonstrate the flexibility of our approach by applying it to multiple distinct domains.",
"title": "VAICo: Visual Analysis for Image Comparison",
"normalizedTitle": "VAICo: Visual Analysis for Image Comparison",
"fno": "ttg2013122090",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Image Color Analysis",
"Visual Analytics",
"Shape Analysis",
"Image Segmentation",
"Focus Context Visualization",
"Data Visualization",
"Image Color Analysis",
"Visual Analytics",
"Shape Analysis",
"Image Segmentation",
"Image Set Comparison",
"Comparative Visualization"
],
"authors": [
{
"givenName": "Johanna",
"surname": "Schmidt",
"fullName": "Johanna Schmidt",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M. Eduard",
"surname": "Groller",
"fullName": "M. Eduard Groller",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Stefan",
"surname": "Bruckner",
"fullName": "Stefan Bruckner",
"affiliation": "Univ. of Bergen, Bergen, Norway",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2090-2099",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/biovis/2011/0003/0/071078livengoo",
"title": "A visual analysis system for metabolomics data",
"doi": null,
"abstractUrl": "/proceedings-article/biovis/2011/071078livengoo/12OmNqN6R57",
"parentPublication": {
"id": "proceedings/biovis/2011/0003/0",
"title": "2011 IEEE Symposium on Biological Data Visualization (BioVis).",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/visual/1991/2245/0/00175797",
"title": "The visual comparison of three sequences",
"doi": null,
"abstractUrl": "/proceedings-article/visual/1991/00175797/12OmNxEjY9n",
"parentPublication": {
"id": "proceedings/visual/1991/2245/0",
"title": "1991 Proceeding Visualization",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/04/mcg2012040026",
"title": "A Graph Algebra for Scalable Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2012/04/mcg2012040026/13rRUILLkpN",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534826",
"title": "Blockwise Human Brain Network Visual Comparison Using NodeTrix Representation",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534826/13rRUwwJWFQ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904461",
"title": "Visual Comparison of Language Model Adaptation",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904461/1H1gkbJeuas",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2018/6861/0/08802392",
"title": "Visual Analysis for Subgroups in a Dynamic Network",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2018/08802392/1cJ6YlaGtAA",
"parentPublication": {
"id": "proceedings/vast/2018/6861/0",
"title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933778",
"title": "Visual Analysis of the Time Management of Learning Multiple Courses in Online Learning Environment",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933778/1fTgG02CBgs",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09246250",
"title": "Co-Bridges: Pair-wise Visual Connection and Comparison for Multi-item Data Streams",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09246250/1olE35lxD8c",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmcce/2020/2314/0/231400b742",
"title": "An Improved Image Entropy Algorithm Suitable for Digital Painting Style",
"doi": null,
"abstractUrl": "/proceedings-article/icmcce/2020/231400b742/1tzyxOBcrrq",
"parentPublication": {
"id": "proceedings/icmcce/2020/2314/0",
"title": "2020 5th International Conference on Mechanical, Control and Computer Engineering (ICMCCE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09490333",
"title": "A Visual Analytics Approach for Structural Differences Among Graphs via Deep Learning",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09490333/1vmGUJFABLW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122080",
"articleId": "13rRUwI5U2F",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122100",
"articleId": "13rRUxjQyvi",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxjQyvi",
"doi": "10.1109/TVCG.2013.181",
"abstract": "Spectral clustering is a powerful and versatile technique, whose broad range of applications includes 3D image analysis. However, its practical use often involves a tedious and time-consuming process of tuning parameters and making application-specific choices. In the absence of training data with labeled clusters, help from a human analyst is required to decide the number of clusters, to determine whether hierarchical clustering is needed, and to define the appropriate distance measures, parameters of the underlying graph, and type of graph Laplacian. We propose to simplify this process via an open-box approach, in which an interactive system visualizes the involved mathematical quantities, suggests parameter values, and provides immediate feedback to support the required decisions. Our framework focuses on applications in 3D image analysis, and links the abstract high-dimensional feature space used in spectral clustering to the three-dimensional data space. This provides a better understanding of the technique, and helps the analyst predict how well specific parameter settings will generalize to similar tasks. In addition, our system supports filtering outliers and labeling the final clusters in such a way that user actions can be recorded and transferred to different data in which the same structures are to be found. Our system supports a wide range of inputs, including triangular meshes, regular grids, and point clouds. We use our system to develop segmentation protocols in chest CT and brain MRI that are then successfully applied to other datasets in an automated manner.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Spectral clustering is a powerful and versatile technique, whose broad range of applications includes 3D image analysis. However, its practical use often involves a tedious and time-consuming process of tuning parameters and making application-specific choices. In the absence of training data with labeled clusters, help from a human analyst is required to decide the number of clusters, to determine whether hierarchical clustering is needed, and to define the appropriate distance measures, parameters of the underlying graph, and type of graph Laplacian. We propose to simplify this process via an open-box approach, in which an interactive system visualizes the involved mathematical quantities, suggests parameter values, and provides immediate feedback to support the required decisions. Our framework focuses on applications in 3D image analysis, and links the abstract high-dimensional feature space used in spectral clustering to the three-dimensional data space. This provides a better understanding of the technique, and helps the analyst predict how well specific parameter settings will generalize to similar tasks. In addition, our system supports filtering outliers and labeling the final clusters in such a way that user actions can be recorded and transferred to different data in which the same structures are to be found. Our system supports a wide range of inputs, including triangular meshes, regular grids, and point clouds. We use our system to develop segmentation protocols in chest CT and brain MRI that are then successfully applied to other datasets in an automated manner.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Spectral clustering is a powerful and versatile technique, whose broad range of applications includes 3D image analysis. However, its practical use often involves a tedious and time-consuming process of tuning parameters and making application-specific choices. In the absence of training data with labeled clusters, help from a human analyst is required to decide the number of clusters, to determine whether hierarchical clustering is needed, and to define the appropriate distance measures, parameters of the underlying graph, and type of graph Laplacian. We propose to simplify this process via an open-box approach, in which an interactive system visualizes the involved mathematical quantities, suggests parameter values, and provides immediate feedback to support the required decisions. Our framework focuses on applications in 3D image analysis, and links the abstract high-dimensional feature space used in spectral clustering to the three-dimensional data space. This provides a better understanding of the technique, and helps the analyst predict how well specific parameter settings will generalize to similar tasks. In addition, our system supports filtering outliers and labeling the final clusters in such a way that user actions can be recorded and transferred to different data in which the same structures are to be found. Our system supports a wide range of inputs, including triangular meshes, regular grids, and point clouds. We use our system to develop segmentation protocols in chest CT and brain MRI that are then successfully applied to other datasets in an automated manner.",
"title": "Open-Box Spectral Clustering: Applications to Medical Image Analysis",
"normalizedTitle": "Open-Box Spectral Clustering: Applications to Medical Image Analysis",
"fno": "ttg2013122100",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Image Segmentation",
"Three Dimensional Displays",
"Eigenvalues And Eigenfunctions",
"Laplace Equations",
"Image Analysis",
"Data Visualization",
"Clustering",
"Linked Views",
"Image Segmentation",
"Three Dimensional Displays",
"Eigenvalues And Eigenfunctions",
"Laplace Equations",
"Image Analysis",
"Data Visualization",
"Clustering",
"Programming With Example",
"Image Segmentation",
"Spectral Clustering",
"High Dimensional Embeddings"
],
"authors": [
{
"givenName": "Thomas",
"surname": "Schultz",
"fullName": "Thomas Schultz",
"affiliation": "Univ. of Bonn, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gordon L.",
"surname": "Kindlmann",
"fullName": "Gordon L. Kindlmann",
"affiliation": "Univ. of Chicago, Chicago, IL, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2100-2108",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icdm/2017/3835/0/3835a949",
"title": "Fast Compressive Spectral Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2017/3835a949/12OmNC3Xhju",
"parentPublication": {
"id": "proceedings/icdm/2017/3835/0",
"title": "2017 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ic3/2015/7947/0/07346659",
"title": "Dimension reduction using spectral methods in FANNY for fuzzy clustering of graphs",
"doi": null,
"abstractUrl": "/proceedings-article/ic3/2015/07346659/12OmNyxFKdB",
"parentPublication": {
"id": "proceedings/ic3/2015/7947/0",
"title": "2015 Eighth International Conference on Contemporary Computing (IC3)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/08/06701205",
"title": "Segmentation of 3D Meshes Usingp-Spectral Clustering",
"doi": null,
"abstractUrl": "/journal/tp/2014/08/06701205/13rRUzphDz6",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/micai/2016/7735/0/773500a069",
"title": "A Study on Pattern-Based Spectral Clustering Methods in DWN",
"doi": null,
"abstractUrl": "/proceedings-article/micai/2016/773500a069/17D45VTRorA",
"parentPublication": {
"id": "proceedings/micai/2016/7735/0",
"title": "2016 Fifteenth Mexican International Conference on Artificial Intelligence (MICAI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2022/01/08778699",
"title": "Fast Compressive Spectral Clustering for Large-Scale Sparse Graph",
"doi": null,
"abstractUrl": "/journal/bd/2022/01/08778699/1A8c2XDewyA",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscv/2020/8041/0/09204117",
"title": "k-eNSC: k-estimation for Normalized Spectral Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/2020/09204117/1nmi8yrOjMQ",
"parentPublication": {
"id": "proceedings/iscv/2020/8041/0",
"title": "2020 International Conference on Intelligent Systems and Computer Vision (ISCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2021/06/09263338",
"title": "Regularized Spectral Clustering With Entropy Perturbation",
"doi": null,
"abstractUrl": "/journal/bd/2021/06/09263338/1oReEPfsM6s",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313125",
"title": "Fast Approximate Spectral Clustering via Adaptive Filtering of Random Graph Signals",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313125/1qmfSLj61fW",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdia/2020/2232/0/223200a257",
"title": "DBSCAN Is Semi-Spectral Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/bigdia/2020/223200a257/1stvyZFcIYU",
"parentPublication": {
"id": "proceedings/bigdia/2020/2232/0",
"title": "2020 6th International Conference on Big Data and Information Analytics (BigDIA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2023/02/09495158",
"title": "Fast Optimization of Spectral Embedding and Improved Spectral Rotation",
"doi": null,
"abstractUrl": "/journal/tk/2023/02/09495158/1vyjgdBlrRS",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122090",
"articleId": "13rRUyoPSP5",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122109",
"articleId": "13rRUx0xPIJ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRRL",
"name": "ttg2013122100s.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122100s.avi",
"extension": "avi",
"size": "37.2 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUx0xPIJ",
"doi": "10.1109/TVCG.2013.207",
"abstract": "Traditional sketch-based image or video search systems rely on machine learning concepts as their core technology. However, in many applications, machine learning alone is impractical since videos may not be semantically annotated sufficiently, there may be a lack of suitable training data, and the search requirements of the user may frequently change for different tasks. In this work, we develop a visual analytics systems that overcomes the shortcomings of the traditional approach. We make use of a sketch-based interface to enable users to specify search requirement in a flexible manner without depending on semantic annotation. We employ active machine learning to train different analytical models for different types of search requirements. We use visualization to facilitate knowledge discovery at the different stages of visual analytics. This includes visualizing the parameter space of the trained model, visualizing the search space to support interactive browsing, visualizing candidature search results to support rapid interaction for active learning while minimizing watching videos, and visualizing aggregated information of the search results. We demonstrate the system for searching spatiotemporal attributes from sports video to identify key instances of the team and player performance.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Traditional sketch-based image or video search systems rely on machine learning concepts as their core technology. However, in many applications, machine learning alone is impractical since videos may not be semantically annotated sufficiently, there may be a lack of suitable training data, and the search requirements of the user may frequently change for different tasks. In this work, we develop a visual analytics systems that overcomes the shortcomings of the traditional approach. We make use of a sketch-based interface to enable users to specify search requirement in a flexible manner without depending on semantic annotation. We employ active machine learning to train different analytical models for different types of search requirements. We use visualization to facilitate knowledge discovery at the different stages of visual analytics. This includes visualizing the parameter space of the trained model, visualizing the search space to support interactive browsing, visualizing candidature search results to support rapid interaction for active learning while minimizing watching videos, and visualizing aggregated information of the search results. We demonstrate the system for searching spatiotemporal attributes from sports video to identify key instances of the team and player performance.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Traditional sketch-based image or video search systems rely on machine learning concepts as their core technology. However, in many applications, machine learning alone is impractical since videos may not be semantically annotated sufficiently, there may be a lack of suitable training data, and the search requirements of the user may frequently change for different tasks. In this work, we develop a visual analytics systems that overcomes the shortcomings of the traditional approach. We make use of a sketch-based interface to enable users to specify search requirement in a flexible manner without depending on semantic annotation. We employ active machine learning to train different analytical models for different types of search requirements. We use visualization to facilitate knowledge discovery at the different stages of visual analytics. This includes visualizing the parameter space of the trained model, visualizing the search space to support interactive browsing, visualizing candidature search results to support rapid interaction for active learning while minimizing watching videos, and visualizing aggregated information of the search results. We demonstrate the system for searching spatiotemporal attributes from sports video to identify key instances of the team and player performance.",
"title": "Transformation of an Uncertain Video Search Pipeline to a Sketch-Based Visual Analytics Loop",
"normalizedTitle": "Transformation of an Uncertain Video Search Pipeline to a Sketch-Based Visual Analytics Loop",
"fno": "ttg2013122109",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Visual Analytics",
"Analytical Models",
"Computational Modeling",
"Machine Learning",
"Multimedia Communication",
"Machine Learning",
"Data Visualization",
"Visual Analytics",
"Analytical Models",
"Computational Modeling",
"Machine Learning",
"Multimedia Communication",
"Multimedia Visualization",
"Visual Knowledge Discovery",
"Data Clustering"
],
"authors": [
{
"givenName": "Philip A.",
"surname": "Legg",
"fullName": "Philip A. Legg",
"affiliation": "Dept. of Comput. Sci., Swansea Univ., Swansea, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "David H. S.",
"surname": "Chung",
"fullName": "David H. S. Chung",
"affiliation": "Dept. of Comput. Sci., Swansea Univ., Swansea, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Matthew L.",
"surname": "Parry",
"fullName": "Matthew L. Parry",
"affiliation": "Dept. of Comput. Sci., Swansea Univ., Swansea, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rhodri",
"surname": "Bown",
"fullName": "Rhodri Bown",
"affiliation": "Welsh Rugby Union, Cardiff, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mark W.",
"surname": "Jones",
"fullName": "Mark W. Jones",
"affiliation": "Dept. of Comput. Sci., Swansea Univ., Swansea, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Iwan W.",
"surname": "Griffiths",
"fullName": "Iwan W. Griffiths",
"affiliation": "Dept. of Eng., Swansea Univ., Swansea, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Min Chen",
"fullName": "Min Chen",
"affiliation": "e-Res. Centre, Univ. of Oxford, Oxford, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2109-2118",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042522",
"title": "A sketch+fisheye interface for visual analytics of large time-series",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042522/12OmNvAiSiA",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2018/6420/0/642000i014",
"title": "Learning Deep Sketch Abstraction",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2018/642000i014/17D45WLdYQJ",
"parentPublication": {
"id": "proceedings/cvpr/2018/6420/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2022/0883/0/088300c928",
"title": "A Sketch-based Index for Correlated Dataset Search",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2022/088300c928/1FwFEeACbFm",
"parentPublication": {
"id": "proceedings/icde/2022/0883/0",
"title": "2022 IEEE 38th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sec/2022/8611/0/861100a175",
"title": "Preva: Protecting Inference Privacy through Policy-based Video-frame Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/sec/2022/861100a175/1JC1gxUcmK4",
"parentPublication": {
"id": "proceedings/sec/2022/8611/0",
"title": "2022 IEEE/ACM 7th Symposium on Edge Computing (SEC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlui/2019/4064/0/10075563",
"title": "Shall we play? – Extending the Visual Analytics Design Space through Gameful Design Concepts",
"doi": null,
"abstractUrl": "/proceedings-article/mlui/2019/10075563/1LIRvkrCyGc",
"parentPublication": {
"id": "proceedings/mlui/2019/4064/0",
"title": "2019 IEEE Workshop on Machine Learning from User Interaction for Visualization and Analytics (MLUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/mlui/2018/4063/0/10075562",
"title": "A Bidirectional Pipeline for Semantic Interaction",
"doi": null,
"abstractUrl": "/proceedings-article/mlui/2018/10075562/1LIRzmx4WUo",
"parentPublication": {
"id": "proceedings/mlui/2018/4063/0",
"title": "2018 IEEE Workshop on Machine Learning from User Interaction for Visualization and Analytics (MLUI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/04/08739141",
"title": "Personalized Sketch-Based Brushing in Scatterplots",
"doi": null,
"abstractUrl": "/magazine/cg/2019/04/08739141/1aXM9T7Z0xq",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08812988",
"title": "Explaining Vulnerabilities to Adversarial Machine Learning through Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08812988/1cOhCfAgaZO",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09246282",
"title": "P6: A Declarative Language for Integrating Machine Learning in Visual Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09246282/1olDLxl43Qc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09377843",
"title": "Sketch and Scale Geo-distributed tSNE and UMAP",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09377843/1s64Nc3lk9W",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122100",
"articleId": "13rRUxjQyvi",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122119",
"articleId": "13rRUxC0SOX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxC0SOX",
"doi": "10.1109/TVCG.2013.168",
"abstract": "We propose a novel video visual analytics system for interactive exploration of surveillance video data. Our approach consists of providing analysts with various views of information related to moving objects in a video. To do this we first extract each object's movement path. We visualize each movement by (a) creating a single action shot image (a still image that coalesces multiple frames), (b) plotting its trajectory in a space-time cube and (c) displaying an overall timeline view of all the movements. The action shots provide a still view of the moving object while the path view presents movement properties such as speed and location. We also provide tools for spatial and temporal filtering based on regions of interest. This allows analysts to filter out large amounts of movement activities while the action shot representation summarizes the content of each movement. We incorporated this multi-part visual representation of moving objects in sViSIT, a tool to facilitate browsing through the video content by interactive querying and retrieval of data. Based on our interaction with security personnel who routinely interact with surveillance video data, we identified some of the most common tasks performed. This resulted in designing a user study to measure time-to-completion of the various tasks. These generally required searching for specific events of interest (targets) in videos. Fourteen different tasks were designed and a total of 120 min of surveillance video were recorded (indoor and outdoor locations recording movements of people and vehicles). The time-to-completion of these tasks were compared against a manual fast forward video browsing guided with movement detection. We demonstrate how our system can facilitate lengthy video exploration and significantly reduce browsing time to find events of interest. Reports from expert users identify positive aspects of our approach which we summarize in our recommendations for future video visual analytics systems.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel video visual analytics system for interactive exploration of surveillance video data. Our approach consists of providing analysts with various views of information related to moving objects in a video. To do this we first extract each object's movement path. We visualize each movement by (a) creating a single action shot image (a still image that coalesces multiple frames), (b) plotting its trajectory in a space-time cube and (c) displaying an overall timeline view of all the movements. The action shots provide a still view of the moving object while the path view presents movement properties such as speed and location. We also provide tools for spatial and temporal filtering based on regions of interest. This allows analysts to filter out large amounts of movement activities while the action shot representation summarizes the content of each movement. We incorporated this multi-part visual representation of moving objects in sViSIT, a tool to facilitate browsing through the video content by interactive querying and retrieval of data. Based on our interaction with security personnel who routinely interact with surveillance video data, we identified some of the most common tasks performed. This resulted in designing a user study to measure time-to-completion of the various tasks. These generally required searching for specific events of interest (targets) in videos. Fourteen different tasks were designed and a total of 120 min of surveillance video were recorded (indoor and outdoor locations recording movements of people and vehicles). The time-to-completion of these tasks were compared against a manual fast forward video browsing guided with movement detection. We demonstrate how our system can facilitate lengthy video exploration and significantly reduce browsing time to find events of interest. Reports from expert users identify positive aspects of our approach which we summarize in our recommendations for future video visual analytics systems.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel video visual analytics system for interactive exploration of surveillance video data. Our approach consists of providing analysts with various views of information related to moving objects in a video. To do this we first extract each object's movement path. We visualize each movement by (a) creating a single action shot image (a still image that coalesces multiple frames), (b) plotting its trajectory in a space-time cube and (c) displaying an overall timeline view of all the movements. The action shots provide a still view of the moving object while the path view presents movement properties such as speed and location. We also provide tools for spatial and temporal filtering based on regions of interest. This allows analysts to filter out large amounts of movement activities while the action shot representation summarizes the content of each movement. We incorporated this multi-part visual representation of moving objects in sViSIT, a tool to facilitate browsing through the video content by interactive querying and retrieval of data. Based on our interaction with security personnel who routinely interact with surveillance video data, we identified some of the most common tasks performed. This resulted in designing a user study to measure time-to-completion of the various tasks. These generally required searching for specific events of interest (targets) in videos. Fourteen different tasks were designed and a total of 120 min of surveillance video were recorded (indoor and outdoor locations recording movements of people and vehicles). The time-to-completion of these tasks were compared against a manual fast forward video browsing guided with movement detection. We demonstrate how our system can facilitate lengthy video exploration and significantly reduce browsing time to find events of interest. Reports from expert users identify positive aspects of our approach which we summarize in our recommendations for future video visual analytics systems.",
"title": "Interactive Exploration of Surveillance Video through Action Shot Summarization and Trajectory Visualization",
"normalizedTitle": "Interactive Exploration of Surveillance Video through Action Shot Summarization and Trajectory Visualization",
"fno": "ttg2013122119",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Interactive States",
"Image Segmentation",
"Data Visualization",
"Visual Analytics",
"Surveillance",
"Tracking",
"Navigation",
"Video Summarization",
"Interactive States",
"Image Segmentation",
"Data Visualization",
"Visual Analytics",
"Surveillance",
"Tracking",
"Navigation",
"Video Browsing And Exploration",
"Video Visual Analytics",
"Surveillance Video",
"Video Visualization"
],
"authors": [
{
"givenName": "Amir H.",
"surname": "Meghdadi",
"fullName": "Amir H. Meghdadi",
"affiliation": "Dept. of Comput. Sci., Univ. of Manitoba, Winnipeg, MB, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Pourang",
"surname": "Irani",
"fullName": "Pourang Irani",
"affiliation": "Dept. of Comput. Sci., Univ. of Manitoba, Winnipeg, MB, Canada",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2119-2128",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icpr/2012/2216/0/06460562",
"title": "Video summarization using simple action patterns",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460562/12OmNBfZSmr",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/avss/2016/3811/0/07738018",
"title": "Video summarization of surveillance cameras",
"doi": null,
"abstractUrl": "/proceedings-article/avss/2016/07738018/12OmNC3FGfx",
"parentPublication": {
"id": "proceedings/avss/2016/3811/0",
"title": "2016 13th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2013/1604/0/06618440",
"title": "Abnormal action warning on encrypted-coded surveillance video for home safety",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2013/06618440/12OmNxvwoUL",
"parentPublication": {
"id": "proceedings/icmew/2013/1604/0",
"title": "2013 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmew/2015/7079/0/07169855",
"title": "Coherent event-based surveillance video synopsis using trajectory clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icmew/2015/07169855/12OmNyYm2F4",
"parentPublication": {
"id": "proceedings/icmew/2015/7079/0",
"title": "2015 IEEE International Conference on Multimedia & Expo Workshops (ICMEW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2011/0063/0/06130487",
"title": "A surveillance video analysis and storage scheme for scalable synopsis browsing",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2011/06130487/12OmNzvz6G4",
"parentPublication": {
"id": "proceedings/iccvw/2011/0063/0",
"title": "2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacvw/2022/5824/0/582400a122",
"title": "GabriellaV2: Towards better generalization in surveillance videos for Action Detection",
"doi": null,
"abstractUrl": "/proceedings-article/wacvw/2022/582400a122/1B12tFgF1pm",
"parentPublication": {
"id": "proceedings/wacvw/2022/5824/0",
"title": "2022 IEEE/CVF Winter Conference on Applications of Computer Vision Workshops (WACVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/td/2019/12/08734005",
"title": "ISEE: An Intelligent Scene Exploration and Evaluation Platform for Large-Scale Visual Surveillance",
"doi": null,
"abstractUrl": "/journal/td/2019/12/08734005/1aKu0HupDji",
"parentPublication": {
"id": "trans/td",
"title": "IEEE Transactions on Parallel & Distributed Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2019/9552/0/955200a568",
"title": "Localization Guided Fight Action Detection in Surveillance Videos",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2019/955200a568/1cdOT45YODm",
"parentPublication": {
"id": "proceedings/icme/2019/9552/0",
"title": "2019 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccgrid/2021/9586/0/958600a386",
"title": "VRefine: Refining Massive Surveillance Videos for Efficient Store and Fast Analyzing",
"doi": null,
"abstractUrl": "/proceedings-article/ccgrid/2021/958600a386/1vK0xNgx7Es",
"parentPublication": {
"id": "proceedings/ccgrid/2021/9586/0",
"title": "2021 IEEE/ACM 21st International Symposium on Cluster, Cloud and Internet Computing (CCGrid)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sibgrapi/2021/2354/0/235400a176",
"title": "A System for Visual Analysis of Objects Behavior in Surveillance Videos",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2021/235400a176/1zurq5mjP0c",
"parentPublication": {
"id": "proceedings/sibgrapi/2021/2354/0",
"title": "2021 34th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122109",
"articleId": "13rRUx0xPIJ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122129",
"articleId": "13rRUxBa5rW",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesR8",
"name": "ttg2013122119s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122119s.mp4",
"extension": "mp4",
"size": "14.5 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxBa5rW",
"doi": "10.1109/TVCG.2013.194",
"abstract": "We introduce a visual analytics method to analyze eye movement data recorded for dynamic stimuli such as video or animated graphics. The focus lies on the analysis of data of several viewers to identify trends in the general viewing behavior, including time sequences of attentional synchrony and objects with strong attentional focus. By using a space-time cube visualization in combination with clustering, the dynamic stimuli and associated eye gazes can be analyzed in a static 3D representation. Shot-based, spatiotemporal clustering of the data generates potential areas of interest that can be filtered interactively. We also facilitate data drill-down: the gaze points are shown with density-based color mapping and individual scan paths as lines in the space-time cube. The analytical process is supported by multiple coordinated views that allow the user to focus on different aspects of spatial and temporal information in eye gaze data. Common eye-tracking visualization techniques are extended to incorporate the spatiotemporal characteristics of the data. For example, heat maps are extended to motion-compensated heat maps and trajectories of scan paths are included in the space-time visualization. Our visual analytics approach is assessed in a qualitative users study with expert users, which showed the usefulness of the approach and uncovered that the experts applied different analysis strategies supported by the system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We introduce a visual analytics method to analyze eye movement data recorded for dynamic stimuli such as video or animated graphics. The focus lies on the analysis of data of several viewers to identify trends in the general viewing behavior, including time sequences of attentional synchrony and objects with strong attentional focus. By using a space-time cube visualization in combination with clustering, the dynamic stimuli and associated eye gazes can be analyzed in a static 3D representation. Shot-based, spatiotemporal clustering of the data generates potential areas of interest that can be filtered interactively. We also facilitate data drill-down: the gaze points are shown with density-based color mapping and individual scan paths as lines in the space-time cube. The analytical process is supported by multiple coordinated views that allow the user to focus on different aspects of spatial and temporal information in eye gaze data. Common eye-tracking visualization techniques are extended to incorporate the spatiotemporal characteristics of the data. For example, heat maps are extended to motion-compensated heat maps and trajectories of scan paths are included in the space-time visualization. Our visual analytics approach is assessed in a qualitative users study with expert users, which showed the usefulness of the approach and uncovered that the experts applied different analysis strategies supported by the system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We introduce a visual analytics method to analyze eye movement data recorded for dynamic stimuli such as video or animated graphics. The focus lies on the analysis of data of several viewers to identify trends in the general viewing behavior, including time sequences of attentional synchrony and objects with strong attentional focus. By using a space-time cube visualization in combination with clustering, the dynamic stimuli and associated eye gazes can be analyzed in a static 3D representation. Shot-based, spatiotemporal clustering of the data generates potential areas of interest that can be filtered interactively. We also facilitate data drill-down: the gaze points are shown with density-based color mapping and individual scan paths as lines in the space-time cube. The analytical process is supported by multiple coordinated views that allow the user to focus on different aspects of spatial and temporal information in eye gaze data. Common eye-tracking visualization techniques are extended to incorporate the spatiotemporal characteristics of the data. For example, heat maps are extended to motion-compensated heat maps and trajectories of scan paths are included in the space-time visualization. Our visual analytics approach is assessed in a qualitative users study with expert users, which showed the usefulness of the approach and uncovered that the experts applied different analysis strategies supported by the system.",
"title": "Space-Time Visual Analytics of Eye-Tracking Data for Dynamic Stimuli",
"normalizedTitle": "Space-Time Visual Analytics of Eye-Tracking Data for Dynamic Stimuli",
"fno": "ttg2013122129",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Tracking",
"Spatiotemporal Phenomena",
"Visual Analytics",
"Clustering Algorithms",
"Space Time Codes",
"Context Awareness",
"Spatiotemporal Clustering",
"Data Visualization",
"Tracking",
"Spatiotemporal Phenomena",
"Visual Analytics",
"Clustering Algorithms",
"Space Time Codes",
"Context Awareness",
"Motion Compensated Heat Map",
"Eye Tracking",
"Space Time Cube",
"Dynamic Areas Of Interest"
],
"authors": [
{
"givenName": "Kuno",
"surname": "Kurzhals",
"fullName": "Kuno Kurzhals",
"affiliation": "Visualization Res. Center (VISUS), Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Daniel",
"surname": "Weiskopf",
"fullName": "Daniel Weiskopf",
"affiliation": "Visualization Res. Center (VISUS), Univ. of Stuttgart, Stuttgart, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2129-2138",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/etvis/2016/4731/0/07851160",
"title": "Hilbert attention maps for visualizing spatiotemporal gaze data",
"doi": null,
"abstractUrl": "/proceedings-article/etvis/2016/07851160/12OmNzVoBuv",
"parentPublication": {
"id": "proceedings/etvis/2016/4731/0",
"title": "2016 IEEE Second Workshop on Eye Tracking and Visualization (ETVIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07194851",
"title": "Gaze Stripes: Image-Based Visualization of Eye Tracking Data",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194851/13rRUIJuxvk",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/03/07845707",
"title": "A Data Model and Task Space for Data of Interest (DOI) Eye-Tracking Analyses",
"doi": null,
"abstractUrl": "/journal/tg/2018/03/07845707/13rRUILLkDY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cs/2015/05/mcs2015050064",
"title": "Eye Tracking in Computer-Based Visualization",
"doi": null,
"abstractUrl": "/magazine/cs/2015/05/mcs2015050064/13rRUxjyXbd",
"parentPublication": {
"id": "mags/cs",
"title": "Computing in Science & Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/04/mcg2015040064",
"title": "Eye Tracking for Personal Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2015/04/mcg2015040064/13rRUyft7x8",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07539297",
"title": "Visual Analytics for Mobile Eye Tracking",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07539297/13rRUyv53Fx",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2022/9062/0/09956312",
"title": "A Joint Cascaded Framework for Simultaneous Eye State, Eye Center, and Gaze Estimation",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2022/09956312/1IHq8em8jug",
"parentPublication": {
"id": "proceedings/icpr/2022/9062/0",
"title": "2022 26th International Conference on Pattern Recognition (ICPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2019/3485/0/348500a199",
"title": "Stimuli-Based Gaze Analytics to Enhance Motivation and Learning in MOOCs",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2019/348500a199/1cYi3XxE62Y",
"parentPublication": {
"id": "proceedings/icalt/2019/3485/2161-377X",
"title": "2019 IEEE 19th International Conference on Advanced Learning Technologies (ICALT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/emip/2019/2243/0/224300a037",
"title": "Design of an Executable Specification Language Using Eye Tracking",
"doi": null,
"abstractUrl": "/proceedings-article/emip/2019/224300a037/1dlvML7SkCs",
"parentPublication": {
"id": "proceedings/emip/2019/2243/0",
"title": "2019 IEEE/ACM 6th International Workshop on Eye Movements in Programming (EMIP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2019/3293/0/329300j823",
"title": "Neuro-Inspired Eye Tracking With Eye Movement Dynamics",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2019/329300j823/1gyrA973n3O",
"parentPublication": {
"id": "proceedings/cvpr/2019/3293/0",
"title": "2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122119",
"articleId": "13rRUxC0SOX",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122139",
"articleId": "13rRUyfbwqH",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyfbwqH",
"doi": "10.1109/TVCG.2013.132",
"abstract": "We describe and demonstrate an extensible framework that supports data exploration and provenance in the context of Human Terrain Analysis (HTA). Working closely with defence analysts we extract requirements and a list of features that characterise data analysed at the end of the HTA chain. From these, we select an appropriate non-classified data source with analogous features, and model it as a set of facets. We develop ProveML, an XML-based extension of the Open Provenance Model, using these facets and augment it with the structures necessary to record the provenance of data, analytical process and interpretations. Through an iterative process, we develop and refine a prototype system for Human Terrain Visual Analytics (HTVA), and demonstrate means of storing, browsing and recalling analytical provenance and process through analytic bookmarks in ProveML. We show how these bookmarks can be combined to form narratives that link back to the live data. Throughout the process, we demonstrate that through structured workshops, rapid prototyping and structured communication with intelligence analysts we are able to establish requirements, and design schema, techniques and tools that meet the requirements of the intelligence community. We use the needs and reactions of defence analysts in defining and steering the methods to validate the framework.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We describe and demonstrate an extensible framework that supports data exploration and provenance in the context of Human Terrain Analysis (HTA). Working closely with defence analysts we extract requirements and a list of features that characterise data analysed at the end of the HTA chain. From these, we select an appropriate non-classified data source with analogous features, and model it as a set of facets. We develop ProveML, an XML-based extension of the Open Provenance Model, using these facets and augment it with the structures necessary to record the provenance of data, analytical process and interpretations. Through an iterative process, we develop and refine a prototype system for Human Terrain Visual Analytics (HTVA), and demonstrate means of storing, browsing and recalling analytical provenance and process through analytic bookmarks in ProveML. We show how these bookmarks can be combined to form narratives that link back to the live data. Throughout the process, we demonstrate that through structured workshops, rapid prototyping and structured communication with intelligence analysts we are able to establish requirements, and design schema, techniques and tools that meet the requirements of the intelligence community. We use the needs and reactions of defence analysts in defining and steering the methods to validate the framework.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We describe and demonstrate an extensible framework that supports data exploration and provenance in the context of Human Terrain Analysis (HTA). Working closely with defence analysts we extract requirements and a list of features that characterise data analysed at the end of the HTA chain. From these, we select an appropriate non-classified data source with analogous features, and model it as a set of facets. We develop ProveML, an XML-based extension of the Open Provenance Model, using these facets and augment it with the structures necessary to record the provenance of data, analytical process and interpretations. Through an iterative process, we develop and refine a prototype system for Human Terrain Visual Analytics (HTVA), and demonstrate means of storing, browsing and recalling analytical provenance and process through analytic bookmarks in ProveML. We show how these bookmarks can be combined to form narratives that link back to the live data. Throughout the process, we demonstrate that through structured workshops, rapid prototyping and structured communication with intelligence analysts we are able to establish requirements, and design schema, techniques and tools that meet the requirements of the intelligence community. We use the needs and reactions of defence analysts in defining and steering the methods to validate the framework.",
"title": "An Extensible Framework for Provenance in Human Terrain Visual Analytics",
"normalizedTitle": "An Extensible Framework for Provenance in Human Terrain Visual Analytics",
"fno": "ttg2013122139",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Human Factors",
"Terrain Mapping",
"Data Visualization",
"Context Awareness",
"Visual Analytics",
"Bookmarks",
"Human Factors",
"Terrain Mapping",
"Data Visualization",
"Context Awareness",
"Visual Analytics",
"Narratives",
"Human Terrain Analysis",
"Provenance",
"Framework"
],
"authors": [
{
"givenName": "Rick",
"surname": "Walker",
"fullName": "Rick Walker",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aiden",
"surname": "Slingsby",
"fullName": "Aiden Slingsby",
"affiliation": "giCentre, City Univ. London, London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jason",
"surname": "Dykes",
"fullName": "Jason Dykes",
"affiliation": "giCentre, City Univ. London, London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Kai Xu",
"fullName": "Kai Xu",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jo",
"surname": "Wood",
"fullName": "Jo Wood",
"affiliation": "giCentre, City Univ. London, London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Phong H.",
"surname": "Nguyen",
"fullName": "Phong H. Nguyen",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Derek",
"surname": "Stephens",
"fullName": "Derek Stephens",
"affiliation": "Loughborough Univ., Loughborough, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "B. L. William",
"surname": "Wong",
"fullName": "B. L. William Wong",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Yongjun Zheng",
"fullName": "Yongjun Zheng",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2139-2148",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icde/2017/6543/0/6543b373",
"title": "POLYTICS: Provenance-Based Analytics of Data-Centric Applications",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2017/6543b373/12OmNyaGeJ6",
"parentPublication": {
"id": "proceedings/icde/2017/6543/0",
"title": "2017 IEEE 33rd International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/04/ttg2011040440",
"title": "Forecasting Hotspots—A Predictive Analytics Approach",
"doi": null,
"abstractUrl": "/journal/tg/2011/04/ttg2011040440/13rRUwdrdSv",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/07/mco2013070030",
"title": "Visual Analytics Support for Intelligence Analysis",
"doi": null,
"abstractUrl": "/magazine/co/2013/07/mco2013070030/13rRUxD9h0P",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192714",
"title": "Characterizing Provenance in Visualization and Data Analysis: An Organizational Framework of Provenance Types and Purposes",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192714/13rRUxOdD2F",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08534019",
"title": "Multiple Workspaces in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08534019/17D45W9KVIu",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bdva/2018/9194/0/08534022",
"title": "Revealing the Invisible: Visual Analytics and Explanatory Storytelling for Advanced Team Sport Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/bdva/2018/08534022/17D45WODasQ",
"parentPublication": {
"id": "proceedings/bdva/2018/9194/0",
"title": "2018 International Symposium on Big Data Visual and Immersive Analytics (BDVA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08788592",
"title": "Analytic Provenance in Practice: The Role of Provenance in Real-World Visualization and Data Analysis Environments",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08788592/1cfqCMPtgRy",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2021/1817/0/181700a045",
"title": "A Case Study of Using Analytic Provenance to Reconstruct User Trust in a Guided Visual Analytics System",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2021/181700a045/1yQB6KjO9oI",
"parentPublication": {
"id": "proceedings/trex/2021/1817/0",
"title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2021/1817/0/181700a040",
"title": "Beyond Visual Analytics: Human-Machine Teaming for AI-Driven Data Sensemaking",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2021/181700a040/1yQB6SxBJ0A",
"parentPublication": {
"id": "proceedings/trex/2021/1817/0",
"title": "2021 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09652041",
"title": "Provectories: Embedding-based Analysis of Interaction Provenance Data",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09652041/1zmuReh8VZ6",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122129",
"articleId": "13rRUxBa5rW",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122149",
"articleId": "13rRUILtJzz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRJ5",
"name": "ttg2013122139s.txt",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122139s.txt",
"extension": "txt",
"size": "25 B",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXWRJ4",
"name": "ttg2013122139s.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122139s.mov",
"extension": "mov",
"size": "36.6 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILtJzz",
"doi": "10.1109/TVCG.2013.226",
"abstract": "As increasing volumes of urban data are captured and become available, new opportunities arise for data-driven analysis that can lead to improvements in the lives of citizens through evidence-based decision making and policies. In this paper, we focus on a particularly important urban data set: taxi trips. Taxis are valuable sensors and information associated with taxi trips can provide unprecedented insight into many different aspects of city life, from economic activity and human behavior to mobility patterns. But analyzing these data presents many challenges. The data are complex, containing geographical and temporal components in addition to multiple variables associated with each trip. Consequently, it is hard to specify exploratory queries and to perform comparative analyses (e.g., compare different regions over time). This problem is compounded due to the size of the data-there are on average 500,000 taxi trips each day in NYC. We propose a new model that allows users to visually query taxi trips. Besides standard analytics queries, the model supports origin-destination queries that enable the study of mobility across the city. We show that this model is able to express a wide range of spatio-temporal queries, and it is also flexible in that not only can queries be composed but also different aggregations and visual representations can be applied, allowing users to explore and compare results. We have built a scalable system that implements this model which supports interactive response times; makes use of an adaptive level-of-detail rendering strategy to generate clutter-free visualization for large results; and shows hidden details to the users in a summary through the use of overlay heat maps. We present a series of case studies motivated by traffic engineers and economists that show how our model and system enable domain experts to perform tasks that were previously unattainable for them.",
"abstracts": [
{
"abstractType": "Regular",
"content": "As increasing volumes of urban data are captured and become available, new opportunities arise for data-driven analysis that can lead to improvements in the lives of citizens through evidence-based decision making and policies. In this paper, we focus on a particularly important urban data set: taxi trips. Taxis are valuable sensors and information associated with taxi trips can provide unprecedented insight into many different aspects of city life, from economic activity and human behavior to mobility patterns. But analyzing these data presents many challenges. The data are complex, containing geographical and temporal components in addition to multiple variables associated with each trip. Consequently, it is hard to specify exploratory queries and to perform comparative analyses (e.g., compare different regions over time). This problem is compounded due to the size of the data-there are on average 500,000 taxi trips each day in NYC. We propose a new model that allows users to visually query taxi trips. Besides standard analytics queries, the model supports origin-destination queries that enable the study of mobility across the city. We show that this model is able to express a wide range of spatio-temporal queries, and it is also flexible in that not only can queries be composed but also different aggregations and visual representations can be applied, allowing users to explore and compare results. We have built a scalable system that implements this model which supports interactive response times; makes use of an adaptive level-of-detail rendering strategy to generate clutter-free visualization for large results; and shows hidden details to the users in a summary through the use of overlay heat maps. We present a series of case studies motivated by traffic engineers and economists that show how our model and system enable domain experts to perform tasks that were previously unattainable for them.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "As increasing volumes of urban data are captured and become available, new opportunities arise for data-driven analysis that can lead to improvements in the lives of citizens through evidence-based decision making and policies. In this paper, we focus on a particularly important urban data set: taxi trips. Taxis are valuable sensors and information associated with taxi trips can provide unprecedented insight into many different aspects of city life, from economic activity and human behavior to mobility patterns. But analyzing these data presents many challenges. The data are complex, containing geographical and temporal components in addition to multiple variables associated with each trip. Consequently, it is hard to specify exploratory queries and to perform comparative analyses (e.g., compare different regions over time). This problem is compounded due to the size of the data-there are on average 500,000 taxi trips each day in NYC. We propose a new model that allows users to visually query taxi trips. Besides standard analytics queries, the model supports origin-destination queries that enable the study of mobility across the city. We show that this model is able to express a wide range of spatio-temporal queries, and it is also flexible in that not only can queries be composed but also different aggregations and visual representations can be applied, allowing users to explore and compare results. We have built a scalable system that implements this model which supports interactive response times; makes use of an adaptive level-of-detail rendering strategy to generate clutter-free visualization for large results; and shows hidden details to the users in a summary through the use of overlay heat maps. We present a series of case studies motivated by traffic engineers and economists that show how our model and system enable domain experts to perform tasks that were previously unattainable for them.",
"title": "Visual Exploration of Big Spatio-Temporal Urban Data: A Study of New York City Taxi Trips",
"normalizedTitle": "Visual Exploration of Big Spatio-Temporal Urban Data: A Study of New York City Taxi Trips",
"fno": "ttg2013122149",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Cities And Towns",
"Data Visualization",
"Data Models",
"Analytical Models",
"Time Factors",
"Mathematical Model",
"NYC Taxis",
"Visual Analytics",
"Cities And Towns",
"Data Visualization",
"Data Models",
"Analytical Models",
"Time Factors",
"Mathematical Model",
"Visual Exploration",
"Spatio Temporal Queries",
"Urban Data"
],
"authors": [
{
"givenName": "Nivan",
"surname": "Ferreira",
"fullName": "Nivan Ferreira",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jorge",
"surname": "Poco",
"fullName": "Jorge Poco",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huy T.",
"surname": "Vo",
"fullName": "Huy T. Vo",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Juliana",
"surname": "Freire",
"fullName": "Juliana Freire",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Claudio T.",
"surname": "Silva",
"fullName": "Claudio T. Silva",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2149-2158",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/big-data/2015/9926/0/07363837",
"title": "A scalable approach for data-driven taxi ride-sharing simulation",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363837/12OmNrFBQ03",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07364113",
"title": "Taxi trip time prediction using similar trips and road network data",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07364113/12OmNrMZpzH",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ms/2016/2625/0/2625a057",
"title": "Big Data Mobile Services for New York City Taxi Riders and Drivers",
"doi": null,
"abstractUrl": "/proceedings-article/ms/2016/2625a057/12OmNwD1pXR",
"parentPublication": {
"id": "proceedings/ms/2016/2625/0",
"title": "2016 IEEE International Conference on Mobile Services (MS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2014/2874/0/2874a137",
"title": "Visualizing Hidden Themes of Taxi Movement with Semantic Transformation",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2014/2874a137/12OmNwI8ccX",
"parentPublication": {
"id": "proceedings/pacificvis/2014/2874/0",
"title": "2014 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bigdata-congress/2014/5057/0/06906763",
"title": "High-Performance Spatial Query Processing on Big Taxi Trip Data Using GPGPUs",
"doi": null,
"abstractUrl": "/proceedings-article/bigdata-congress/2014/06906763/12OmNx38vVE",
"parentPublication": {
"id": "proceedings/bigdata-congress/2014/5057/0",
"title": "2014 IEEE International Congress on Big Data (BigData Congress)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2016/9005/0/07840904",
"title": "Big data computation of taxi movement in New York City",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2016/07840904/12OmNxwncfV",
"parentPublication": {
"id": "proceedings/big-data/2016/9005/0",
"title": "2016 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2013/1293/0/06691775",
"title": "A big data driven model for taxi drivers' airport pick-up decisions in New York City",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2013/06691775/12OmNyS6REg",
"parentPublication": {
"id": "proceedings/big-data/2013/1293/0",
"title": "2013 IEEE International Conference on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-ithingscpscom/2013/5046/0/06682189",
"title": "How Long a Passenger Waits for a Vacant Taxi -- Large-Scale Taxi Trace Mining for Smart Cities",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-ithingscpscom/2013/06682189/12OmNzFdt7d",
"parentPublication": {
"id": "proceedings/greencom-ithingscpscom/2013/5046/0",
"title": "2013 IEEE International Conference on Green Computing and Communications (GreenCom) and IEEE Internet of Things(iThings) and IEEE Cyber, Physical and Social Computing(CPSCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07534822",
"title": "SemanticTraj: A New Approach to Interacting with Massive Taxi Trajectories",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07534822/13rRUygT7sI",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2019/9226/0/922600a174",
"title": "Visual Analytics of Taxi Trajectory Data via Topical Sub-trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2019/922600a174/1cMF7meccAo",
"parentPublication": {
"id": "proceedings/pacificvis/2019/9226/0",
"title": "2019 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122139",
"articleId": "13rRUyfbwqH",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122159",
"articleId": "13rRUNvgz9O",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet3X",
"name": "ttg2013122149s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122149s.mp4",
"extension": "mp4",
"size": "34.4 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUNvgz9O",
"doi": "10.1109/TVCG.2013.228",
"abstract": "In this work, we present an interactive system for visual analysis of urban traffic congestion based on GPS trajectories. For these trajectories we develop strategies to extract and derive traffic jam information. After cleaning the trajectories, they are matched to a road network. Subsequently, traffic speed on each road segment is computed and traffic jam events are automatically detected. Spatially and temporally related events are concatenated in, so-called, traffic jam propagation graphs. These graphs form a high-level description of a traffic jam and its propagation in time and space. Our system provides multiple views for visually exploring and analyzing the traffic condition of a large city as a whole, on the level of propagation graphs, and on road segment level. Case studies with 24 days of taxi GPS trajectories collected in Beijing demonstrate the effectiveness of our system.",
"abstracts": [
{
"abstractType": "Regular",
"content": "In this work, we present an interactive system for visual analysis of urban traffic congestion based on GPS trajectories. For these trajectories we develop strategies to extract and derive traffic jam information. After cleaning the trajectories, they are matched to a road network. Subsequently, traffic speed on each road segment is computed and traffic jam events are automatically detected. Spatially and temporally related events are concatenated in, so-called, traffic jam propagation graphs. These graphs form a high-level description of a traffic jam and its propagation in time and space. Our system provides multiple views for visually exploring and analyzing the traffic condition of a large city as a whole, on the level of propagation graphs, and on road segment level. Case studies with 24 days of taxi GPS trajectories collected in Beijing demonstrate the effectiveness of our system.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "In this work, we present an interactive system for visual analysis of urban traffic congestion based on GPS trajectories. For these trajectories we develop strategies to extract and derive traffic jam information. After cleaning the trajectories, they are matched to a road network. Subsequently, traffic speed on each road segment is computed and traffic jam events are automatically detected. Spatially and temporally related events are concatenated in, so-called, traffic jam propagation graphs. These graphs form a high-level description of a traffic jam and its propagation in time and space. Our system provides multiple views for visually exploring and analyzing the traffic condition of a large city as a whole, on the level of propagation graphs, and on road segment level. Case studies with 24 days of taxi GPS trajectories collected in Beijing demonstrate the effectiveness of our system.",
"title": "Visual Traffic Jam Analysis Based on Trajectory Data",
"normalizedTitle": "Visual Traffic Jam Analysis Based on Trajectory Data",
"fno": "ttg2013122159",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Analysis",
"Data Visualisation",
"Global Positioning System",
"Interactive Systems",
"Pattern Matching",
"Traffic Information Systems",
"Beijing",
"Taxi GPS Trajectories",
"Visual Exploration",
"High Level Traffic Jam Description",
"Traffic Jam Propagation Graphs",
"Spatially Related Events",
"Temporally Related Events",
"Automatic Traffic Jam Event Detection",
"Road Segment",
"Traffic Speed",
"Trajectory Matching",
"Road Network",
"Traffic Jam Information Extraction",
"GPS Trajectories",
"Interactive System",
"Visual Urban Traffic Congestion Analysis",
"Trajectory Data",
"Visual Traffic Jam Analysis",
"Road Traffic",
"Trajectory",
"Urban Areas",
"Global Positioning System",
"Data Visualization",
"Cities And Towns",
"Data Mining",
"Traffic Control",
"Road Traffic",
"Trajectory",
"Urban Areas",
"Global Positioning System",
"Data Visualization",
"Cities And Towns",
"Data Mining",
"Traffic Control",
"Traffic Jam Propagation",
"Traffic Visualization"
],
"authors": [
{
"givenName": "Zuchao",
"surname": "Wang",
"fullName": "Zuchao Wang",
"affiliation": "Key Laboratory of Machine Perception (Ministry of Education), and School of EECS, Peking University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Min",
"surname": "Lu",
"fullName": "Min Lu",
"affiliation": "Key Laboratory of Machine Perception (Ministry of Education), and School of EECS, Peking University, and Center for Computational Science and Engineering, Peking University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Xiaoru",
"surname": "Yuan",
"fullName": "Xiaoru Yuan",
"affiliation": "Key Laboratory of Machine Perception (Ministry of Education), and School of EECS, Peking University, and Center for Computational Science and Engineering, Peking University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Junping",
"surname": "Zhang",
"fullName": "Junping Zhang",
"affiliation": "Shanghai Key Laboratory of Intelligent Information Processing, and School of Computer Science, Fudan University",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Huub",
"surname": "Van De Wetering",
"fullName": "Huub Van De Wetering",
"affiliation": "Department of Mathematics and Computer Science, Technische Universiteit Eindhoven",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2159-2168",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icmtma/2009/3583/3/3583c479",
"title": "Analysis of the Relation between Highway Horizontal Curve and Traffic Safety",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583c479/12OmNAR1b1u",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccima/2007/3050/1/30500061",
"title": "Avoiding Traffic Jam Using Ant Colony Optimization - A Novel Approach",
"doi": null,
"abstractUrl": "/proceedings-article/iccima/2007/30500061/12OmNAlNiIG",
"parentPublication": {
"id": "proceedings/iccima/2007/3050/1",
"title": "Computational Intelligence and Multimedia Applications, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/sitis/2009/3959/0/3959a257",
"title": "Characterizing Traffic Density and Its Evolution through Moving Object Trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/sitis/2009/3959a257/12OmNCctfiQ",
"parentPublication": {
"id": "proceedings/sitis/2009/3959/0",
"title": "2009 Fifth International Conference on Signal Image Technology and Internet Based Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/isise/2009/6325/0/05447295",
"title": "Study on the Traffic Impact Analysis of the Second Ring Road in Beijing",
"doi": null,
"abstractUrl": "/proceedings-article/isise/2009/05447295/12OmNyKJigL",
"parentPublication": {
"id": "proceedings/isise/2009/6325/0",
"title": "2009 Second International Symposium on Information Science and Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2012/4685/0/4685a142",
"title": "NEAT: Road Network Aware Trajectory Clustering",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2012/4685a142/12OmNz5JBOu",
"parentPublication": {
"id": "proceedings/icdcs/2012/4685/0",
"title": "2012 IEEE 32nd International Conference on Distributed Computing Systems",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cenics/2009/3832/0/3832a056",
"title": "Parking Traffic Jam Forecast System",
"doi": null,
"abstractUrl": "/proceedings-article/cenics/2009/3832a056/12OmNzayNzS",
"parentPublication": {
"id": "proceedings/cenics/2009/3832/0",
"title": "Advances in Circuits, Electronics and Micro-electronics, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/bd/2021/06/08798705",
"title": "Citywide Traffic Volume Inference with Surveillance Camera Records",
"doi": null,
"abstractUrl": "/journal/bd/2021/06/08798705/1cumMzvtNU4",
"parentPublication": {
"id": "trans/bd",
"title": "IEEE Transactions on Big Data",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2020/2903/0/09101794",
"title": "Traffic Incident Detection: A Trajectory-based Approach",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2020/09101794/1kaMD7AW7n2",
"parentPublication": {
"id": "proceedings/icde/2020/2903/0",
"title": "2020 IEEE 36th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cibda/2020/9837/0/983700a037",
"title": "Traffic Jam Prediction Based on Analysis of Residents Spatial Activities",
"doi": null,
"abstractUrl": "/proceedings-article/cibda/2020/983700a037/1lO1MlrryYU",
"parentPublication": {
"id": "proceedings/cibda/2020/9837/0",
"title": "2020 International Conference on Computer Information and Big Data Applications (CIBDA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2020/6251/0/09378068",
"title": "Real-time Traffic Jam Detection and Congestion Reduction Using Streaming Graph Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2020/09378068/1s64920VxSM",
"parentPublication": {
"id": "proceedings/big-data/2020/6251/0",
"title": "2020 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122149",
"articleId": "13rRUILtJzz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122169",
"articleId": "13rRUxYrbUG",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgRC",
"name": "ttg2013122159s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122159s.mp4",
"extension": "mp4",
"size": "18.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxYrbUG",
"doi": "10.1109/TVCG.2013.193",
"abstract": "We suggest a methodology for analyzing movement behaviors of individuals moving in a group. Group movement is analyzed at two levels of granularity: the group as a whole and the individuals it comprises. For analyzing the relative positions and movements of the individuals with respect to the rest of the group, we apply space transformation, in which the trajectories of the individuals are converted from geographical space to an abstract 'group space'. The group space reference system is defined by both the position of the group center, which is taken as the coordinate origin, and the direction of the group's movement. Based on the individuals' positions mapped onto the group space, we can compare the behaviors of different individuals, determine their roles and/or ranks within the groups, and, possibly, understand how group movement is organized. The utility of the methodology has been evaluated by applying it to a set of real data concerning movements of wild social animals and discussing the results with experts in animal ethology.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We suggest a methodology for analyzing movement behaviors of individuals moving in a group. Group movement is analyzed at two levels of granularity: the group as a whole and the individuals it comprises. For analyzing the relative positions and movements of the individuals with respect to the rest of the group, we apply space transformation, in which the trajectories of the individuals are converted from geographical space to an abstract 'group space'. The group space reference system is defined by both the position of the group center, which is taken as the coordinate origin, and the direction of the group's movement. Based on the individuals' positions mapped onto the group space, we can compare the behaviors of different individuals, determine their roles and/or ranks within the groups, and, possibly, understand how group movement is organized. The utility of the methodology has been evaluated by applying it to a set of real data concerning movements of wild social animals and discussing the results with experts in animal ethology.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We suggest a methodology for analyzing movement behaviors of individuals moving in a group. Group movement is analyzed at two levels of granularity: the group as a whole and the individuals it comprises. For analyzing the relative positions and movements of the individuals with respect to the rest of the group, we apply space transformation, in which the trajectories of the individuals are converted from geographical space to an abstract 'group space'. The group space reference system is defined by both the position of the group center, which is taken as the coordinate origin, and the direction of the group's movement. Based on the individuals' positions mapped onto the group space, we can compare the behaviors of different individuals, determine their roles and/or ranks within the groups, and, possibly, understand how group movement is organized. The utility of the methodology has been evaluated by applying it to a set of real data concerning movements of wild social animals and discussing the results with experts in animal ethology.",
"title": "Space Transformation for Understanding Group Movement",
"normalizedTitle": "Space Transformation for Understanding Group Movement",
"fno": "ttg2013122169",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Trajectory",
"Visual Analytics",
"Market Research",
"Behavioral Science",
"Data Models",
"Movement Data",
"Trajectory",
"Visual Analytics",
"Market Research",
"Behavioral Science",
"Data Models",
"Collective Movement",
"Visual Analytics"
],
"authors": [
{
"givenName": "Natalia",
"surname": "Andrienko",
"fullName": "Natalia Andrienko",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Gennady",
"surname": "Andrienko",
"fullName": "Gennady Andrienko",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Louise",
"surname": "Barrett",
"fullName": "Louise Barrett",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marcus",
"surname": "Dostie",
"fullName": "Marcus Dostie",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Henzi",
"fullName": "Peter Henzi",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2169-2178",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/sibgrapi/2017/2219/0/2219a103",
"title": "Visual Analysis of Predictive Suffix Trees for Discovering Movement Patterns and Behaviors",
"doi": null,
"abstractUrl": "/proceedings-article/sibgrapi/2017/2219a103/12OmNA0MZ4B",
"parentPublication": {
"id": "proceedings/sibgrapi/2017/2219/0",
"title": "2017 30th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2015/9325/0/9325a196",
"title": "Discovering Loose Group Movement Patterns from Animal Trajectories",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2015/9325a196/12OmNBZpH8o",
"parentPublication": {
"id": "proceedings/e-science/2015/9325/0",
"title": "2015 IEEE 11th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icisa/2013/0602/0/06579385",
"title": "The Measurement of Group Arousal via Movement Synchronization",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579385/12OmNqyDjnF",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/big-data/2015/9926/0/07363802",
"title": "Visual analysis of bi-directional movement behavior",
"doi": null,
"abstractUrl": "/proceedings-article/big-data/2015/07363802/12OmNwpoFAQ",
"parentPublication": {
"id": "proceedings/big-data/2015/9926/0",
"title": "2015 IEEE International Conference on Big Data (Big Data)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/greencom-cpscom/2010/4331/0/4331a612",
"title": "A Methodology to Predicate Human-Being's Movement Based on Movement Group",
"doi": null,
"abstractUrl": "/proceedings-article/greencom-cpscom/2010/4331a612/12OmNzYeAWJ",
"parentPublication": {
"id": "proceedings/greencom-cpscom/2010/4331/0",
"title": "IEEE-ACM International Conference on Green Computing and Communications and International Conference on Cyber, Physical and Social Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2011/02/ttg2011020205",
"title": "Spatial Generalization and Aggregation of Massive Movement Data",
"doi": null,
"abstractUrl": "/journal/tg/2011/02/ttg2011020205/13rRUwfI0Q5",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122889",
"title": "Visual Analytics Methodology for Eye Movement Studies",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122889/13rRUxjyX3Z",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585564",
"title": "Interactive Visual Analytics Application for Spatiotemporal Movement Data VAST Challenge 2017 Mini-Challenge 1: Award for Actionable and Detailed Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585564/17D45VsBU7R",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440823",
"title": "MotionRugs: Visualizing Collective Trends in Space and Time",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440823/17D45W2WyxW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/bibm/2020/6215/0/09313104",
"title": "Anxiety Detection with Nonlinear Group Correlation Fusion of Electroencephalogram and Eye Movement",
"doi": null,
"abstractUrl": "/proceedings-article/bibm/2020/09313104/1qmfPNp0Vmo",
"parentPublication": {
"id": "proceedings/bibm/2020/6215/0",
"title": "2020 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122159",
"articleId": "13rRUNvgz9O",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122179",
"articleId": "13rRUyeTVi3",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyeTVi3",
"doi": "10.1109/TVCG.2013.224",
"abstract": "We propose a novel approach of distance-based spatial clustering and contribute a heuristic computation of input parameters for guiding users in the search of interesting cluster constellations. We thereby combine computational geometry with interactive visualization into one coherent framework. Our approach entails displaying the results of the heuristics to users, as shown in Figure 1, providing a setting from which to start the exploration and data analysis. Addition interaction capabilities are available containing visual feedback for exploring further clustering options and is able to cope with noise in the data. We evaluate, and show the benefits of our approach on a sophisticated artificial dataset and demonstrate its usefulness on real-world data.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We propose a novel approach of distance-based spatial clustering and contribute a heuristic computation of input parameters for guiding users in the search of interesting cluster constellations. We thereby combine computational geometry with interactive visualization into one coherent framework. Our approach entails displaying the results of the heuristics to users, as shown in Figure 1, providing a setting from which to start the exploration and data analysis. Addition interaction capabilities are available containing visual feedback for exploring further clustering options and is able to cope with noise in the data. We evaluate, and show the benefits of our approach on a sophisticated artificial dataset and demonstrate its usefulness on real-world data.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We propose a novel approach of distance-based spatial clustering and contribute a heuristic computation of input parameters for guiding users in the search of interesting cluster constellations. We thereby combine computational geometry with interactive visualization into one coherent framework. Our approach entails displaying the results of the heuristics to users, as shown in Figure 1, providing a setting from which to start the exploration and data analysis. Addition interaction capabilities are available containing visual feedback for exploring further clustering options and is able to cope with noise in the data. We evaluate, and show the benefits of our approach on a sophisticated artificial dataset and demonstrate its usefulness on real-world data.",
"title": "Visual Analytics for Spatial Clustering: Using a Heuristic Approach for Guided Exploration",
"normalizedTitle": "Visual Analytics for Spatial Clustering: Using a Heuristic Approach for Guided Exploration",
"fno": "ttg2013122179",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Shape Analysis",
"Clustering Algorithms",
"Noise Measurement",
"Visual Analytics",
"Data Visualization",
"Image Color Analysis",
"Heuristic Algorithms",
"I Interactive Visual Clustering",
"Shape Analysis",
"Clustering Algorithms",
"Noise Measurement",
"Visual Analytics",
"Data Visualization",
"Image Color Analysis",
"Heuristic Algorithms",
"K Order A Alpha Shapes",
"Heuristic Based Spatial Clustering"
],
"authors": [
{
"givenName": "Eli",
"surname": "Packer",
"fullName": "Eli Packer",
"affiliation": "IBM Res. Haifa Lab., Haifa, Israel",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Bak",
"fullName": "Peter Bak",
"affiliation": "IBM Res. Haifa Lab., Haifa, Israel",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Mikko",
"surname": "Nikkila",
"fullName": "Mikko Nikkila",
"affiliation": "Univ. of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Valentin",
"surname": "Polishchuk",
"fullName": "Valentin Polishchuk",
"affiliation": "Univ. of Helsinki, Helsinki, Finland",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Harold J.",
"surname": "Ship",
"fullName": "Harold J. Ship",
"affiliation": "IBM Res. Haifa Lab., Haifa, Israel",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2179-2188",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042521",
"title": "Visual analytics for the exploration of multiparametric cancer imaging",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042521/12OmNAGNCeQ",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400514",
"title": "Big data exploration through visual analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400514/12OmNC3XhwY",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iri/2014/5880/0/07051972",
"title": "Detecting geo-spatial weather clusters using dynamic heuristic subspaces",
"doi": null,
"abstractUrl": "/proceedings-article/iri/2014/07051972/12OmNz2kqfM",
"parentPublication": {
"id": "proceedings/iri/2014/5880/0",
"title": "2014 IEEE International Conference on Information Reuse and Integration (IRI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2012/04/mcg2012040026",
"title": "A Graph Algebra for Scalable Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2012/04/mcg2012040026/13rRUILLkpN",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122207",
"title": "The Impact of Physical Navigation on Spatial Organization for Sensemaking",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122207/13rRUwI5TQZ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/06/v1363",
"title": "High-Dimensional Visual Analytics: Interactive Exploration Guided by Pairwise Views of Point Distributions",
"doi": null,
"abstractUrl": "/journal/tg/2006/06/v1363/13rRUx0xPIs",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2012/12/ttg2012122899",
"title": "A Visual Analytics Approach to Multiscale Exploration of Environmental Time Series",
"doi": null,
"abstractUrl": "/journal/tg/2012/12/ttg2012122899/13rRUxDqS8g",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876049",
"title": "Progressive Visual Analytics: User-Driven Visual Exploration of In-Progress Analytics",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876049/13rRUyogGAd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192717",
"title": "Reducing Snapshots to Points: A Visual Analytics Approach to Dynamic Network Exploration",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192717/13rRUyp7tWY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585677",
"title": "VAST Mini-Challenge 1",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585677/17D45WK5Aox",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122169",
"articleId": "13rRUxYrbUG",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122189",
"articleId": "13rRUwdrdSz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRIK",
"name": "ttg2013122179s.wmv",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122179s.wmv",
"extension": "wmv",
"size": "7.08 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwdrdSz",
"doi": "10.1109/TVCG.2013.197",
"abstract": "Maintaining an awareness of collaborators' actions is critical during collaborative work, including during collaborative visualization activities. Particularly when collaborators are located at a distance, it is important to know what everyone is working on in order to avoid duplication of effort, share relevant results in a timely manner and build upon each other's results. Can a person's brushing actions provide an indication of their queries and interests in a data set? Can these actions be revealed to a collaborator without substantially disrupting their own independent work? We designed a study to answer these questions in the context of distributed collaborative visualization of tabular data. Participants in our study worked independently to answer questions about a tabular data set, while simultaneously viewing brushing actions of a fictitious collaborator, shown directly within a shared workspace. We compared three methods of presenting the collaborator's actions: brushing & linking (i.e. highlighting exactly what the collaborator would see), selection (i.e. showing only a selected item), and persistent selection (i.e. showing only selected items but having them persist for some time). Our results demonstrated that persistent selection enabled some awareness of the collaborator's activities while causing minimal interference with independent work. Other techniques were less effective at providing awareness, and brushing & linking caused substantial interference. These findings suggest promise for the idea of exploiting natural brushing actions to provide awareness in collaborative work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Maintaining an awareness of collaborators' actions is critical during collaborative work, including during collaborative visualization activities. Particularly when collaborators are located at a distance, it is important to know what everyone is working on in order to avoid duplication of effort, share relevant results in a timely manner and build upon each other's results. Can a person's brushing actions provide an indication of their queries and interests in a data set? Can these actions be revealed to a collaborator without substantially disrupting their own independent work? We designed a study to answer these questions in the context of distributed collaborative visualization of tabular data. Participants in our study worked independently to answer questions about a tabular data set, while simultaneously viewing brushing actions of a fictitious collaborator, shown directly within a shared workspace. We compared three methods of presenting the collaborator's actions: brushing & linking (i.e. highlighting exactly what the collaborator would see), selection (i.e. showing only a selected item), and persistent selection (i.e. showing only selected items but having them persist for some time). Our results demonstrated that persistent selection enabled some awareness of the collaborator's activities while causing minimal interference with independent work. Other techniques were less effective at providing awareness, and brushing & linking caused substantial interference. These findings suggest promise for the idea of exploiting natural brushing actions to provide awareness in collaborative work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Maintaining an awareness of collaborators' actions is critical during collaborative work, including during collaborative visualization activities. Particularly when collaborators are located at a distance, it is important to know what everyone is working on in order to avoid duplication of effort, share relevant results in a timely manner and build upon each other's results. Can a person's brushing actions provide an indication of their queries and interests in a data set? Can these actions be revealed to a collaborator without substantially disrupting their own independent work? We designed a study to answer these questions in the context of distributed collaborative visualization of tabular data. Participants in our study worked independently to answer questions about a tabular data set, while simultaneously viewing brushing actions of a fictitious collaborator, shown directly within a shared workspace. We compared three methods of presenting the collaborator's actions: brushing & linking (i.e. highlighting exactly what the collaborator would see), selection (i.e. showing only a selected item), and persistent selection (i.e. showing only selected items but having them persist for some time). Our results demonstrated that persistent selection enabled some awareness of the collaborator's activities while causing minimal interference with independent work. Other techniques were less effective at providing awareness, and brushing & linking caused substantial interference. These findings suggest promise for the idea of exploiting natural brushing actions to provide awareness in collaborative work.",
"title": "Supporting Awareness through Collaborative Brushing and Linking of Tabular Data",
"normalizedTitle": "Supporting Awareness through Collaborative Brushing and Linking of Tabular Data",
"fno": "ttg2013122189",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Context Awareness",
"Collaborative Work",
"Linked Views",
"Data Visualization",
"Context Awareness",
"Collaborative Work",
"User Study",
"Collaboration",
"Awareness",
"Attentionally Ambient Visualization",
"Brushing And Linking"
],
"authors": [
{
"givenName": "Amir Hossein",
"surname": "Hajizadeh",
"fullName": "Amir Hossein Hajizadeh",
"affiliation": "Univ. of Victoria, Victoria, BC, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Melanie",
"surname": "Tory",
"fullName": "Melanie Tory",
"affiliation": "Univ. of Victoria, Victoria, BC, Canada",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Rock",
"surname": "Leung",
"fullName": "Rock Leung",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2189-2197",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icons/2007/2807/0/04196335",
"title": "Supporting Social Awareness with 3D Collaborative Virtual Environments and Mobile Devices: VirasMobile",
"doi": null,
"abstractUrl": "/proceedings-article/icons/2007/04196335/12OmNBoNroj",
"parentPublication": {
"id": "proceedings/icons/2007/2807/0",
"title": "Second International Conference on Systems (ICONS'07)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/colcom/2006/0428/0/04207531",
"title": "A Framework for Inter-referential Awareness in Collaborative Environments",
"doi": null,
"abstractUrl": "/proceedings-article/colcom/2006/04207531/12OmNC4eSra",
"parentPublication": {
"id": "proceedings/colcom/2006/0428/0",
"title": "International Conference on Collaborative Computing: Networking, Applications and Worksharing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/user/2012/1859/0/06226580",
"title": "Evaluating awareness information in distributed collaborative editing by software-engineers",
"doi": null,
"abstractUrl": "/proceedings-article/user/2012/06226580/12OmNCcKQes",
"parentPublication": {
"id": "proceedings/user/2012/1859/0",
"title": "2012 First International Workshop on User Evaluation for Software Engineering Researchers (USER 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/criwg/2000/0828/0/08280112",
"title": "Group Awareness Support in Collaborative Writing Systems",
"doi": null,
"abstractUrl": "/proceedings-article/criwg/2000/08280112/12OmNrNh0sK",
"parentPublication": {
"id": "proceedings/criwg/2000/0828/0",
"title": "Groupware, International Workshop on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icalt/2011/4346/0/4346a071",
"title": "A Shared Rationale Space for Supporting Knowledge Awareness in Collaborative Learning Activities: An Empirical Study",
"doi": null,
"abstractUrl": "/proceedings-article/icalt/2011/4346a071/12OmNxwENJw",
"parentPublication": {
"id": "proceedings/icalt/2011/4346/0",
"title": "Advanced Learning Technologies, IEEE International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/collaboratecom/2013/92/0/06680016",
"title": "Enterprise 2.0 in action: Potentials for improvement of awareness support in enterprises",
"doi": null,
"abstractUrl": "/proceedings-article/collaboratecom/2013/06680016/12OmNz61dIg",
"parentPublication": {
"id": "proceedings/collaboratecom/2013/92/0",
"title": "2013 9th International Conference on Collaborative Computing: Networking, Applications and Worksharing (CollaborateCom)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ieee-infovis/2002/1751/0/17510127",
"title": "Angular Brushing of Extended Parallel Coordinates",
"doi": null,
"abstractUrl": "/proceedings-article/ieee-infovis/2002/17510127/12OmNzYNNf3",
"parentPublication": {
"id": "proceedings/ieee-infovis/2002/1751/0",
"title": "Information Visualization, IEEE Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/05/mcg2009050034",
"title": "Supporting Exploration Awareness in Information Visualization",
"doi": null,
"abstractUrl": "/magazine/cg/2009/05/mcg2009050034/13rRUxBrGjn",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2018/01/08017621",
"title": "MyBrush: Brushing and Linking with Personal Agency",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017621/13rRUxD9gXN",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/clei/2018/0437/0/043700a021",
"title": "Quality Assessment of Awareness Support in Agile Collaborative Tools",
"doi": null,
"abstractUrl": "/proceedings-article/clei/2018/043700a021/1cdOZ9XLJcc",
"parentPublication": {
"id": "proceedings/clei/2018/0437/0",
"title": "2018 XLIV Latin American Computer Conference (CLEI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122179",
"articleId": "13rRUyeTVi3",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122198",
"articleId": "13rRUwInvsQ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInvsQ",
"doi": "10.1109/TVCG.2013.164",
"abstract": "We present a system that lets analysts use paid crowd workers to explore data sets and helps analysts interactively examine and build upon workers' insights. We take advantage of the fact that, for many types of data, independent crowd workers can readily perform basic analysis tasks like examining views and generating explanations for trends and patterns. However, workers operating in parallel can often generate redundant explanations. Moreover, because workers have different competencies and domain knowledge, some responses are likely to be more plausible than others. To efficiently utilize the crowd's work, analysts must be able to quickly identify and consolidate redundant responses and determine which explanations are the most plausible. In this paper, we demonstrate several crowd-assisted techniques to help analysts make better use of crowdsourced explanations: (1) We explore crowd-assisted strategies that utilize multiple workers to detect redundant explanations. We introduce color clustering with representative selection-a strategy in which multiple workers cluster explanations and we automatically select the most-representative result-and show that it generates clusterings that are as good as those produced by experts. (2) We capture explanation provenance by introducing highlighting tasks and capturing workers' browsing behavior via an embedded web browser, and refine that provenance information via source-review tasks. We expose this information in an explanation-management interface that allows analysts to interactively filter and sort responses, select the most plausible explanations, and decide which to explore further.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present a system that lets analysts use paid crowd workers to explore data sets and helps analysts interactively examine and build upon workers' insights. We take advantage of the fact that, for many types of data, independent crowd workers can readily perform basic analysis tasks like examining views and generating explanations for trends and patterns. However, workers operating in parallel can often generate redundant explanations. Moreover, because workers have different competencies and domain knowledge, some responses are likely to be more plausible than others. To efficiently utilize the crowd's work, analysts must be able to quickly identify and consolidate redundant responses and determine which explanations are the most plausible. In this paper, we demonstrate several crowd-assisted techniques to help analysts make better use of crowdsourced explanations: (1) We explore crowd-assisted strategies that utilize multiple workers to detect redundant explanations. We introduce color clustering with representative selection-a strategy in which multiple workers cluster explanations and we automatically select the most-representative result-and show that it generates clusterings that are as good as those produced by experts. (2) We capture explanation provenance by introducing highlighting tasks and capturing workers' browsing behavior via an embedded web browser, and refine that provenance information via source-review tasks. We expose this information in an explanation-management interface that allows analysts to interactively filter and sort responses, select the most plausible explanations, and decide which to explore further.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present a system that lets analysts use paid crowd workers to explore data sets and helps analysts interactively examine and build upon workers' insights. We take advantage of the fact that, for many types of data, independent crowd workers can readily perform basic analysis tasks like examining views and generating explanations for trends and patterns. However, workers operating in parallel can often generate redundant explanations. Moreover, because workers have different competencies and domain knowledge, some responses are likely to be more plausible than others. To efficiently utilize the crowd's work, analysts must be able to quickly identify and consolidate redundant responses and determine which explanations are the most plausible. In this paper, we demonstrate several crowd-assisted techniques to help analysts make better use of crowdsourced explanations: (1) We explore crowd-assisted strategies that utilize multiple workers to detect redundant explanations. We introduce color clustering with representative selection-a strategy in which multiple workers cluster explanations and we automatically select the most-representative result-and show that it generates clusterings that are as good as those produced by experts. (2) We capture explanation provenance by introducing highlighting tasks and capturing workers' browsing behavior via an embedded web browser, and refine that provenance information via source-review tasks. We expose this information in an explanation-management interface that allows analysts to interactively filter and sort responses, select the most plausible explanations, and decide which to explore further.",
"title": "Identifying Redundancy and Exposing Provenance in Crowdsourced Data Analysis",
"normalizedTitle": "Identifying Redundancy and Exposing Provenance in Crowdsourced Data Analysis",
"fno": "ttg2013122198",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Market Research",
"Redundancy",
"Data Analysis",
"Social Network Services",
"Image Color Analysis",
"Clustering Algorithms",
"Crowdsourcing",
"Market Research",
"Redundancy",
"Data Analysis",
"Social Network Services",
"Image Color Analysis",
"Clustering Algorithms",
"Social Data Analysis"
],
"authors": [
{
"givenName": "Wesley",
"surname": "Willett",
"fullName": "Wesley Willett",
"affiliation": "INRIA, Sophia-Antipolis, France",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shiry",
"surname": "Ginosar",
"fullName": "Shiry Ginosar",
"affiliation": "UC Berkeley, Berkeley, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Avital",
"surname": "Steinitz",
"fullName": "Avital Steinitz",
"affiliation": "UC Berkeley, Berkeley, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bjorn",
"surname": "Hartmann",
"fullName": "Bjorn Hartmann",
"affiliation": "UC Berkeley, Berkeley, CA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Maneesh",
"surname": "Agrawala",
"fullName": "Maneesh Agrawala",
"affiliation": "UC Berkeley, Berkeley, CA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2198-2206",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/cse/2014/7981/0/7981b514",
"title": "A Pilot Crowdsourced City Governance System: CITY FEED",
"doi": null,
"abstractUrl": "/proceedings-article/cse/2014/7981b514/12OmNrIJqqc",
"parentPublication": {
"id": "proceedings/cse/2014/7981/0",
"title": "2014 IEEE 17th International Conference on Computational Science and Engineering (CSE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdcs/2015/7214/0/7214a726",
"title": "Towards Redundancy-Aware Data Utility Maximization in Crowdsourced Sensing with Smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/icdcs/2015/7214a726/12OmNx9nGM3",
"parentPublication": {
"id": "proceedings/icdcs/2015/7214/0",
"title": "2015 IEEE 35th International Conference on Distributed Computing Systems (ICDCS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2015/6683/0/6683b169",
"title": "How to Collect Segmentations for Biomedical Images? A Benchmark Evaluating the Performance of Experts, Crowdsourced Non-experts, and Algorithms",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2015/6683b169/12OmNyqiaQD",
"parentPublication": {
"id": "proceedings/wacv/2015/6683/0",
"title": "2015 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpp/2015/7587/0/7587a899",
"title": "Towards Redundancy-Aware Data Utility Maximization in Crowdsourced Sensing with Smartphones",
"doi": null,
"abstractUrl": "/proceedings-article/icpp/2015/7587a899/12OmNyugyJP",
"parentPublication": {
"id": "proceedings/icpp/2015/7587/0",
"title": "2015 44th International Conference on Parallel Processing (ICPP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/compsac/2017/0367/1/0367a075",
"title": "Who Should Be Selected to Perform a Task in Crowdsourced Testing?",
"doi": null,
"abstractUrl": "/proceedings-article/compsac/2017/0367a075/12OmNyvY9Ag",
"parentPublication": {
"id": "proceedings/compsac/2017/0367/1",
"title": "2017 IEEE 41st Annual Computer Software and Applications Conference (COMPSAC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/csi-se/2018/5733/0/573301a028",
"title": "A Hybrid Simulation Model for Crowdsourced Software Development",
"doi": null,
"abstractUrl": "/proceedings-article/csi-se/2018/573301a028/13bd1gzWkQW",
"parentPublication": {
"id": "proceedings/csi-se/2018/5733/0",
"title": "2018 IEEE/ACM 5th International Workshop on Crowd Sourcing in Software Engineering (CSI-SE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ica/2018/8180/0/08460133",
"title": "Adaptive Selection of Working Conditions for Crowdsourced Tasks",
"doi": null,
"abstractUrl": "/proceedings-article/ica/2018/08460133/13xI8B0j9pd",
"parentPublication": {
"id": "proceedings/ica/2018/8180/0",
"title": "2018 IEEE International Conference on Agents (ICA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tk/2020/07/08663457",
"title": "A Review of Judgment Analysis Algorithms for Crowdsourced Opinions",
"doi": null,
"abstractUrl": "/journal/tk/2020/07/08663457/18exkCGCv5u",
"parentPublication": {
"id": "trans/tk",
"title": "IEEE Transactions on Knowledge & Data Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tq/5555/01/09762803",
"title": "MsDroid: Identifying Malicious Snippets for Android Malware Detection",
"doi": null,
"abstractUrl": "/journal/tq/5555/01/09762803/1CRro0PikU0",
"parentPublication": {
"id": "trans/tq",
"title": "IEEE Transactions on Dependable and Secure Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ts/2021/06/08721154",
"title": "Characterizing Crowds to Better Optimize Worker Recommendation in Crowdsourced Testing",
"doi": null,
"abstractUrl": "/journal/ts/2021/06/08721154/1mq8pEDllQc",
"parentPublication": {
"id": "trans/ts",
"title": "IEEE Transactions on Software Engineering",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122189",
"articleId": "13rRUwdrdSz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122207",
"articleId": "13rRUwI5TQZ",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwI5TQZ",
"doi": "10.1109/TVCG.2013.205",
"abstract": "Spatial organization has been proposed as a compelling approach to externalizing the sensemaking process. However, there are two ways in which space can be provided to the user: by creating a physical workspace that the user can interact with directly, such as can be provided by a large, high-resolution display, or through the use of a virtual workspace that the user navigates using virtual navigation techniques such as zoom and pan. In this study we explicitly examined the use of spatial sensemaking techniques within these two environments. The results demonstrate that these two approaches to providing sensemaking space are not equivalent, and that the greater embodiment afforded by the physical workspace changes how the space is perceived and used, leading to increased externalization of the sensemaking process.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Spatial organization has been proposed as a compelling approach to externalizing the sensemaking process. However, there are two ways in which space can be provided to the user: by creating a physical workspace that the user can interact with directly, such as can be provided by a large, high-resolution display, or through the use of a virtual workspace that the user navigates using virtual navigation techniques such as zoom and pan. In this study we explicitly examined the use of spatial sensemaking techniques within these two environments. The results demonstrate that these two approaches to providing sensemaking space are not equivalent, and that the greater embodiment afforded by the physical workspace changes how the space is perceived and used, leading to increased externalization of the sensemaking process.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Spatial organization has been proposed as a compelling approach to externalizing the sensemaking process. However, there are two ways in which space can be provided to the user: by creating a physical workspace that the user can interact with directly, such as can be provided by a large, high-resolution display, or through the use of a virtual workspace that the user navigates using virtual navigation techniques such as zoom and pan. In this study we explicitly examined the use of spatial sensemaking techniques within these two environments. The results demonstrate that these two approaches to providing sensemaking space are not equivalent, and that the greater embodiment afforded by the physical workspace changes how the space is perceived and used, leading to increased externalization of the sensemaking process.",
"title": "The Impact of Physical Navigation on Spatial Organization for Sensemaking",
"normalizedTitle": "The Impact of Physical Navigation on Spatial Organization for Sensemaking",
"fno": "ttg2013122207",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Navigation",
"Visual Analytics",
"Browsers",
"Image Color Analysis",
"Embodiment Large",
"Navigation",
"Visual Analytics",
"Browsers",
"Image Color Analysis",
"High Resolution Displays",
"Sensemaking",
"Visual Analytics",
"Physical Navigation"
],
"authors": [
{
"givenName": "Christopher",
"surname": "Andrews",
"fullName": "Christopher Andrews",
"affiliation": "Middlebury Coll., USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "North",
"fullName": "Chris North",
"affiliation": "Virginia Tech, Blacksburg, VA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2207-2216",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2012/4752/0/06400559",
"title": "Analyst's Workspace: An embodied sensemaking environment for large, high-resolution displays",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400559/12OmNwF0BJt",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400558",
"title": "SocialNetSense: Supporting sensemaking of social and structural features in networks with interactive visualization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400558/12OmNxdm4ya",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2016/5661/0/07883515",
"title": "SenseMap: Supporting browser-based online sensemaking through analytic provenance",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2016/07883515/12OmNz2kqj9",
"parentPublication": {
"id": "proceedings/vast/2016/5661/0",
"title": "2016 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/eisic/2015/8657/0/8657a177",
"title": "Guidelines for Sensemaking in Intelligence Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/eisic/2015/8657a177/12OmNzTYBR1",
"parentPublication": {
"id": "proceedings/eisic/2015/8657/0",
"title": "2015 European Intelligence and Security Informatics Conference (EISIC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07194834",
"title": "SensePath: Understanding the Sensemaking Process Through Analytic Provenance",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194834/13rRUEgarnM",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585484",
"title": "CRICTO: Supporting Sensemaking through Crowdsourced Information Schematization",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585484/17D45Wc1ILV",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09894094",
"title": "Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09894094/1GIqpC6j7na",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2018/6861/0/08802424",
"title": "The Effect of Semantic Interaction on Foraging in Text Analysis",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2018/08802424/1cJ6XAJz7gc",
"parentPublication": {
"id": "proceedings/vast/2018/6861/0",
"title": "2018 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2019/06/08889811",
"title": "Provenance Analysis for Sensemaking",
"doi": null,
"abstractUrl": "/magazine/cg/2019/06/08889811/1eBul1FAEIE",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2021/3335/0/333500a181",
"title": "Narrative Sensemaking: Strategies for Narrative Maps Construction",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2021/333500a181/1yXuj3PJXRm",
"parentPublication": {
"id": "proceedings/vis/2021/3335/0",
"title": "2021 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122198",
"articleId": "13rRUwInvsQ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122217",
"articleId": "13rRUxlgy3I",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgGi",
"name": "ttg2013122207s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122207s.zip",
"extension": "zip",
"size": "416 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxlgy3I",
"doi": "10.1109/TVCG.2013.211",
"abstract": "This research aims to develop design guidelines for systems that support investigators and analysts in the exploration and assembly of evidence and inferences. We focus here on the problem of identifying candidate 'influencers' within a community of practice. To better understand this problem and its related cognitive and interaction needs, we conducted a user study using a system called INVISQUE (INteractive Visual Search and QUery Environment) loaded with content from the ACM Digital Library. INVISQUE supports search and manipulation of results over a freeform infinite 'canvas'. The study focuses on the representations user create and their reasoning process. It also draws on some pre-established theories and frameworks related to sense-making and cognitive work in general, which we apply as a 'theoretical lenses' to consider findings and articulate solutions. Analysing the user-study data in the light of these provides some understanding of how the high-level problem of identifying key players within a domain can translate into lower-level questions and interactions. This, in turn, has informed our understanding of representation and functionality needs at a level of description which abstracts away from the specifics of the problem at hand to the class of problems of interest. We consider the study outcomes from the perspective of implications for design.",
"abstracts": [
{
"abstractType": "Regular",
"content": "This research aims to develop design guidelines for systems that support investigators and analysts in the exploration and assembly of evidence and inferences. We focus here on the problem of identifying candidate 'influencers' within a community of practice. To better understand this problem and its related cognitive and interaction needs, we conducted a user study using a system called INVISQUE (INteractive Visual Search and QUery Environment) loaded with content from the ACM Digital Library. INVISQUE supports search and manipulation of results over a freeform infinite 'canvas'. The study focuses on the representations user create and their reasoning process. It also draws on some pre-established theories and frameworks related to sense-making and cognitive work in general, which we apply as a 'theoretical lenses' to consider findings and articulate solutions. Analysing the user-study data in the light of these provides some understanding of how the high-level problem of identifying key players within a domain can translate into lower-level questions and interactions. This, in turn, has informed our understanding of representation and functionality needs at a level of description which abstracts away from the specifics of the problem at hand to the class of problems of interest. We consider the study outcomes from the perspective of implications for design.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "This research aims to develop design guidelines for systems that support investigators and analysts in the exploration and assembly of evidence and inferences. We focus here on the problem of identifying candidate 'influencers' within a community of practice. To better understand this problem and its related cognitive and interaction needs, we conducted a user study using a system called INVISQUE (INteractive Visual Search and QUery Environment) loaded with content from the ACM Digital Library. INVISQUE supports search and manipulation of results over a freeform infinite 'canvas'. The study focuses on the representations user create and their reasoning process. It also draws on some pre-established theories and frameworks related to sense-making and cognitive work in general, which we apply as a 'theoretical lenses' to consider findings and articulate solutions. Analysing the user-study data in the light of these provides some understanding of how the high-level problem of identifying key players within a domain can translate into lower-level questions and interactions. This, in turn, has informed our understanding of representation and functionality needs at a level of description which abstracts away from the specifics of the problem at hand to the class of problems of interest. We consider the study outcomes from the perspective of implications for design.",
"title": "Using Interactive Visual Reasoning to Support Sense-Making: Implications for Design",
"normalizedTitle": "Using Interactive Visual Reasoning to Support Sense-Making: Implications for Design",
"fno": "ttg2013122217",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"User Interfaces",
"Design Methodology",
"Query Processing",
"Interaction",
"Visual Analytics",
"User Interfaces",
"Design Methodology",
"Query Processing",
"Interface Design",
"Visual Analytics",
"Sense Making",
"Dataframe Mode",
"Evaluation",
"Reasoning",
"Analysis"
],
"authors": [
{
"givenName": "Neesha",
"surname": "Kodagoda",
"fullName": "Neesha Kodagoda",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Simon",
"surname": "Attfield",
"fullName": "Simon Attfield",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "B. L. William",
"surname": "Wong",
"fullName": "B. L. William Wong",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Chris",
"surname": "Rooney",
"fullName": "Chris Rooney",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Sharmin",
"surname": "Choudhury",
"fullName": "Sharmin Choudhury",
"affiliation": "Middlesex Univ., London, UK",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2217-2226",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/hicss/2013/4892/0/4892b495",
"title": "A Role for Reasoning in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892b495/12OmNqJ8tq4",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2013/4892/0/4892c416",
"title": "Visual Analytics for Public Health: Supporting Knowledge Construction and Decision-Making",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2013/4892c416/12OmNrJiCNq",
"parentPublication": {
"id": "proceedings/hicss/2013/4892/0",
"title": "2013 46th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2011/9618/0/05718616",
"title": "Pair Analytics: Capturing Reasoning Processes in Collaborative Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2011/05718616/12OmNvAiShB",
"parentPublication": {
"id": "proceedings/hicss/2011/9618/0",
"title": "2011 44th Hawaii International Conference on System Sciences",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ems/2008/3325/0/3325a424",
"title": "Making History Happen: Spatiotemporal Data Visualization for Historians",
"doi": null,
"abstractUrl": "/proceedings-article/ems/2008/3325a424/12OmNyUWR35",
"parentPublication": {
"id": "proceedings/ems/2008/3325/0",
"title": "Computer Modeling and Simulation, UKSIM European Symposium on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2009/06/ttg2009061335",
"title": "Interactive Visual Optimization and Analysis for RFID Benchmarking",
"doi": null,
"abstractUrl": "/journal/tg/2009/06/ttg2009061335/13rRUNvgziz",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2006/06/v1386",
"title": "Generating Graphs for Visual Analytics through Interactive Sketching",
"doi": null,
"abstractUrl": "/journal/tg/2006/06/v1386/13rRUwcS1CP",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192676",
"title": "VAiRoma: A Visual Analytics System for Making Sense of Places, Times, and Events in Roman History",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192676/13rRUxDqS8l",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2017/3163/0/08585665",
"title": "The Anchoring Effect in Decision-Making with Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2017/08585665/17D45WZZ7CL",
"parentPublication": {
"id": "proceedings/vast/2017/3163/0",
"title": "2017 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2020/03/09082751",
"title": "Putting the “I” in Interaction: Interactive Interfaces Personalized to Individuals",
"doi": null,
"abstractUrl": "/magazine/cg/2020/03/09082751/1jrRWZgvV72",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09462502",
"title": "Collaborative Sense-Making in Genomic Research: The Role of Visualisation",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09462502/1uDSzIxXFwA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122207",
"articleId": "13rRUwI5TQZ",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122227",
"articleId": "13rRUwjXZSd",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwjXZSd",
"doi": "10.1109/TVCG.2013.200",
"abstract": "Electronic Health Records (EHRs) have emerged as a cost-effective data source for conducting medical research. The difficulty in using EHRs for research purposes, however, is that both patient selection and record analysis must be conducted across very large, and typically very noisy datasets. Our previous work introduced EventFlow, a visualization tool that transforms an entire dataset of temporal event records into an aggregated display, allowing researchers to analyze population-level patterns and trends. As datasets become larger and more varied, however, it becomes increasingly difficult to provide a succinct, summarizing display. This paper presents a series of user-driven data simplifications that allow researchers to pare event records down to their core elements. Furthermore, we present a novel metric for measuring visual complexity, and a language for codifying disjoint strategies into an overarching simplification framework. These simplifications were used by real-world researchers to gain new and valuable insights from initially overwhelming datasets.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Electronic Health Records (EHRs) have emerged as a cost-effective data source for conducting medical research. The difficulty in using EHRs for research purposes, however, is that both patient selection and record analysis must be conducted across very large, and typically very noisy datasets. Our previous work introduced EventFlow, a visualization tool that transforms an entire dataset of temporal event records into an aggregated display, allowing researchers to analyze population-level patterns and trends. As datasets become larger and more varied, however, it becomes increasingly difficult to provide a succinct, summarizing display. This paper presents a series of user-driven data simplifications that allow researchers to pare event records down to their core elements. Furthermore, we present a novel metric for measuring visual complexity, and a language for codifying disjoint strategies into an overarching simplification framework. These simplifications were used by real-world researchers to gain new and valuable insights from initially overwhelming datasets.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Electronic Health Records (EHRs) have emerged as a cost-effective data source for conducting medical research. The difficulty in using EHRs for research purposes, however, is that both patient selection and record analysis must be conducted across very large, and typically very noisy datasets. Our previous work introduced EventFlow, a visualization tool that transforms an entire dataset of temporal event records into an aggregated display, allowing researchers to analyze population-level patterns and trends. As datasets become larger and more varied, however, it becomes increasingly difficult to provide a succinct, summarizing display. This paper presents a series of user-driven data simplifications that allow researchers to pare event records down to their core elements. Furthermore, we present a novel metric for measuring visual complexity, and a language for codifying disjoint strategies into an overarching simplification framework. These simplifications were used by real-world researchers to gain new and valuable insights from initially overwhelming datasets.",
"title": "Temporal Event Sequence Simplification",
"normalizedTitle": "Temporal Event Sequence Simplification",
"fno": "ttg2013122227",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Complexity Theory",
"Electronic Medical Records",
"Data Mining",
"Data Visualization",
"Market Research",
"Electronic Heath Records",
"Complexity Theory",
"Electronic Medical Records",
"Data Mining",
"Data Visualization",
"Market Research",
"Temporal Query",
"Event Sequences",
"Simplification"
],
"authors": [
{
"givenName": "Megan",
"surname": "Monroe",
"fullName": "Megan Monroe",
"affiliation": "Univ. of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Rongjian Lan",
"fullName": "Rongjian Lan",
"affiliation": "Univ. of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": null,
"surname": "Hanseung Lee",
"fullName": "Hanseung Lee",
"affiliation": "Univ. of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Catherine",
"surname": "Plaisant",
"fullName": "Catherine Plaisant",
"affiliation": "Univ. of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Ben",
"surname": "Shneiderman",
"fullName": "Ben Shneiderman",
"affiliation": "Univ. of Maryland, College Park, MD, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2227-2236",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/ichi/2018/5377/0/537701a461",
"title": "Mining Temporal Patterns from Sequential Healthcare Data",
"doi": null,
"abstractUrl": "/proceedings-article/ichi/2018/537701a461/12OmNwudQUA",
"parentPublication": {
"id": "proceedings/ichi/2018/5377/0",
"title": "2018 IEEE International Conference on Healthcare Informatics (ICHI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0/07847074",
"title": "MTPGraph: A Data-Driven Approach to Predict Medical Risk Based on Temporal Profile Graph",
"doi": null,
"abstractUrl": "/proceedings-article/trustcom-bigdatase-i-spa/2016/07847074/12OmNxw5Bcv",
"parentPublication": {
"id": "proceedings/trustcom-bigdatase-i-spa/2016/3205/0",
"title": "2016 IEEE Trustcom/BigDataSE/ISPA",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hicss/2014/2504/0/2504a656",
"title": "Meaningful Use of Electronic Health Records for Physician Collaboration: A Patient Centered Health Care Perspective",
"doi": null,
"abstractUrl": "/proceedings-article/hicss/2014/2504a656/12OmNzayNmc",
"parentPublication": {
"id": "proceedings/hicss/2014/2504/0",
"title": "2014 47th Hawaii International Conference on System Sciences (HICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06875996",
"title": "DecisionFlow: Visual Analytics for High-Dimensional Temporal Event Sequence Data",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06875996/13rRUxE04tA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2012/12/mco2012120073",
"title": "Factorizing Event Sequences",
"doi": null,
"abstractUrl": "/magazine/co/2012/12/mco2012120073/13rRUyft7yh",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccict/2022/7224/0/722400a037",
"title": "A Review on security schemes for Electronic Health Records",
"doi": null,
"abstractUrl": "/proceedings-article/ccict/2022/722400a037/1HpE1MNGQWA",
"parentPublication": {
"id": "proceedings/ccict/2022/7224/0",
"title": "2022 Fifth International Conference on Computational Intelligence and Communication Technologies (CCICT)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/springsim/2020/370/0/09185464",
"title": "Handling the Missing Data Problem in Electronic Health Records for Cancer Prediction",
"doi": null,
"abstractUrl": "/proceedings-article/springsim/2020/09185464/1mP61mbo3mM",
"parentPublication": {
"id": "proceedings/springsim/2020/370/0",
"title": "2020 Spring Simulation Conference (SpringSim)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/blockchain/2020/0495/0/049500a456",
"title": "Secured Inter-Healthcare Patient Health Records Exchange Architecture",
"doi": null,
"abstractUrl": "/proceedings-article/blockchain/2020/049500a456/1pttSXtqMne",
"parentPublication": {
"id": "proceedings/blockchain/2020/0495/0",
"title": "2020 IEEE International Conference on Blockchain (Blockchain)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2020/8316/0/831600a412",
"title": "BiteNet: Bidirectional Temporal Encoder Network to Predict Medical Outcomes",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2020/831600a412/1r54FlmJyVi",
"parentPublication": {
"id": "proceedings/icdm/2020/8316/0",
"title": "2020 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2021/2067/0/206700a014",
"title": "Interactive Cohort Analysis and Hypothesis Discovery by Exploring Temporal Patterns in Population-Level Health Records",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2021/206700a014/1z0yjD3x1VC",
"parentPublication": {
"id": "proceedings/vahc/2021/2067/0",
"title": "2021 IEEE Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122217",
"articleId": "13rRUxlgy3I",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122237",
"articleId": "13rRUxAAT0R",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxAAT0R",
"doi": "10.1109/TVCG.2013.222",
"abstract": "Model selection in time series analysis is a challenging task for domain experts in many application areas such as epidemiology, economy, or environmental sciences. The methodology used for this task demands a close combination of human judgement and automated computation. However, statistical software tools do not adequately support this combination through interactive visual interfaces. We propose a Visual Analytics process to guide domain experts in this task. For this purpose, we developed the TiMoVA prototype that implements this process based on user stories and iterative expert feedback on user experience. The prototype was evaluated by usage scenarios with an example dataset from epidemiology and interviews with two external domain experts in statistics. The insights from the experts' feedback and the usage scenarios show that TiMoVA is able to support domain experts in model selection tasks through interactive visual interfaces with short feedback cycles.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Model selection in time series analysis is a challenging task for domain experts in many application areas such as epidemiology, economy, or environmental sciences. The methodology used for this task demands a close combination of human judgement and automated computation. However, statistical software tools do not adequately support this combination through interactive visual interfaces. We propose a Visual Analytics process to guide domain experts in this task. For this purpose, we developed the TiMoVA prototype that implements this process based on user stories and iterative expert feedback on user experience. The prototype was evaluated by usage scenarios with an example dataset from epidemiology and interviews with two external domain experts in statistics. The insights from the experts' feedback and the usage scenarios show that TiMoVA is able to support domain experts in model selection tasks through interactive visual interfaces with short feedback cycles.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Model selection in time series analysis is a challenging task for domain experts in many application areas such as epidemiology, economy, or environmental sciences. The methodology used for this task demands a close combination of human judgement and automated computation. However, statistical software tools do not adequately support this combination through interactive visual interfaces. We propose a Visual Analytics process to guide domain experts in this task. For this purpose, we developed the TiMoVA prototype that implements this process based on user stories and iterative expert feedback on user experience. The prototype was evaluated by usage scenarios with an example dataset from epidemiology and interviews with two external domain experts in statistics. The insights from the experts' feedback and the usage scenarios show that TiMoVA is able to support domain experts in model selection tasks through interactive visual interfaces with short feedback cycles.",
"title": "Visual Analytics for Model Selection in Time Series Analysis",
"normalizedTitle": "Visual Analytics for Model Selection in Time Series Analysis",
"fno": "ttg2013122237",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Time Series Analysis",
"Analytical Models",
"Autoregressive Processes",
"Mathematical Model",
"Data Models",
"Time Series Analysis",
"Analytical Models",
"Autoregressive Processes",
"Mathematical Model",
"Data Models",
"Coordinated Multiple Views",
"Visual Analytics",
"Model Selection",
"Visual Interaction"
],
"authors": [
{
"givenName": "Markus",
"surname": "Bogl",
"fullName": "Markus Bogl",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Aigner",
"fullName": "Wolfgang Aigner",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Peter",
"surname": "Filzmoser",
"fullName": "Peter Filzmoser",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tim",
"surname": "Lammarsch",
"fullName": "Tim Lammarsch",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Silvia",
"surname": "Miksch",
"fullName": "Silvia Miksch",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Rind",
"fullName": "Alexander Rind",
"affiliation": "Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2237-2246",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/dexa/2011/0982/0/06059835",
"title": "Using R, WEKA and RapidMiner in Time Series Analysis of Sensor Data for Structural Health Monitoring",
"doi": null,
"abstractUrl": "/proceedings-article/dexa/2011/06059835/12OmNvnwVoH",
"parentPublication": {
"id": "proceedings/dexa/2011/0982/0",
"title": "2011 22nd International Workshop on Database and Expert Systems Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2015/8493/0/8493b441",
"title": "Learning the Number of Autoregressive Mixtures in Time Series Using the Gap Statistics",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2015/8493b441/12OmNy9Prhy",
"parentPublication": {
"id": "proceedings/icdmw/2015/8493/0",
"title": "2015 IEEE International Conference on Data Mining Workshop (ICDMW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192719",
"title": "Visual Analytics for Development and Evaluation of Order Selection Criteria for Autoregressive Processes",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192719/13rRUwfZBVo",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2017/01/07536111",
"title": "Visplause: Visual Data Quality Assessment of Many Time Series Using Plausibility Checks",
"doi": null,
"abstractUrl": "/journal/tg/2017/01/07536111/13rRUxD9h5b",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2018/7325/0/732500a342",
"title": "Visual Analytics Interface for Time Series Data Based on Trajectory Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2018/732500a342/17D45WODasq",
"parentPublication": {
"id": "proceedings/wi/2018/7325/0",
"title": "2018 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2018/7202/0/720200a251",
"title": "Visual Analytics for Decomposing Temporal Event Series of Production Lines",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2018/720200a251/17D45WcjjRK",
"parentPublication": {
"id": "proceedings/iv/2018/7202/0",
"title": "2018 22nd International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08807264",
"title": "Tac-Simur: Tactic-based Simulative Visual Analytics of Table Tennis",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08807264/1cG6vo24hRC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/ccgrid/2020/6095/0/09139621",
"title": "Tracking scientific simulation using online time-series modelling",
"doi": null,
"abstractUrl": "/proceedings-article/ccgrid/2020/09139621/1lsspXaXKHS",
"parentPublication": {
"id": "proceedings/ccgrid/2020/6095/0",
"title": "2020 20th IEEE/ACM International Symposium on Cluster, Cloud and Internet Computing (CCGRID)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09224197",
"title": "MultiSegVA: Using Visual Analytics to Segment Biologging Time Series on Multiple Scales",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09224197/1nV6Z3fZjUY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/trex/2020/8514/0/851400a016",
"title": "Towards Trust-Augmented Visual Analytics for Data-Driven Energy Modeling",
"doi": null,
"abstractUrl": "/proceedings-article/trex/2020/851400a016/1pXm2UrVXMc",
"parentPublication": {
"id": "proceedings/trex/2020/8514/0",
"title": "2020 IEEE Workshop on TRust and EXpertise in Visual Analytics (TREX)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122227",
"articleId": "13rRUwjXZSd",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122247",
"articleId": "13rRUxZ0o1A",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYesRx",
"name": "ttg2013122237s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122237s.zip",
"extension": "zip",
"size": "879 kB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTYesRw",
"name": "ttg2013122237s.mov",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122237s.mov",
"extension": "mov",
"size": "20.3 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxZ0o1A",
"doi": "10.1109/TVCG.2013.206",
"abstract": "Time-oriented data play an essential role in many Visual Analytics scenarios such as extracting medical insights from collections of electronic health records or identifying emerging problems and vulnerabilities in network traffic. However, many software libraries for Visual Analytics treat time as a flat numerical data type and insufficiently tackle the complexity of the time domain such as calendar granularities and intervals. Therefore, developers of advanced Visual Analytics designs need to implement temporal foundations in their application code over and over again. We present TimeBench, a software library that provides foundational data structures and algorithms for time-oriented data in Visual Analytics. Its expressiveness and developer accessibility have been evaluated through application examples demonstrating a variety of challenges with time-oriented data and long-term developer studies conducted in the scope of research and student projects.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Time-oriented data play an essential role in many Visual Analytics scenarios such as extracting medical insights from collections of electronic health records or identifying emerging problems and vulnerabilities in network traffic. However, many software libraries for Visual Analytics treat time as a flat numerical data type and insufficiently tackle the complexity of the time domain such as calendar granularities and intervals. Therefore, developers of advanced Visual Analytics designs need to implement temporal foundations in their application code over and over again. We present TimeBench, a software library that provides foundational data structures and algorithms for time-oriented data in Visual Analytics. Its expressiveness and developer accessibility have been evaluated through application examples demonstrating a variety of challenges with time-oriented data and long-term developer studies conducted in the scope of research and student projects.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Time-oriented data play an essential role in many Visual Analytics scenarios such as extracting medical insights from collections of electronic health records or identifying emerging problems and vulnerabilities in network traffic. However, many software libraries for Visual Analytics treat time as a flat numerical data type and insufficiently tackle the complexity of the time domain such as calendar granularities and intervals. Therefore, developers of advanced Visual Analytics designs need to implement temporal foundations in their application code over and over again. We present TimeBench, a software library that provides foundational data structures and algorithms for time-oriented data in Visual Analytics. Its expressiveness and developer accessibility have been evaluated through application examples demonstrating a variety of challenges with time-oriented data and long-term developer studies conducted in the scope of research and student projects.",
"title": "TimeBench: A Data Model and Software Library for Visual Analytics of Time-Oriented Data",
"normalizedTitle": "TimeBench: A Data Model and Software Library for Visual Analytics of Time-Oriented Data",
"fno": "ttg2013122247",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Data Models",
"Data Structures",
"Data Visualization",
"Time Domain Analysis",
"Time",
"Visual Analytics",
"Data Models",
"Data Structures",
"Data Visualization",
"Time Domain Analysis",
"Temporal Data",
"Visual Analytics",
"Information Visualization",
"Toolkits",
"Software Infrastructure"
],
"authors": [
{
"givenName": "Alexander",
"surname": "Rind",
"fullName": "Alexander Rind",
"affiliation": "Inst. of Software Technol. & Interactive Syst., Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tim",
"surname": "Lammarsch",
"fullName": "Tim Lammarsch",
"affiliation": "Inst. of Software Technol. & Interactive Syst., Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Aigner",
"fullName": "Wolfgang Aigner",
"affiliation": "Inst. of Software Technol. & Interactive Syst., Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bilal",
"surname": "Alsallakh",
"fullName": "Bilal Alsallakh",
"affiliation": "Inst. of Software Technol. & Interactive Syst., Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Silvia",
"surname": "Miksch",
"fullName": "Silvia Miksch",
"affiliation": "Inst. of Software Technol. & Interactive Syst., Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2247-2256",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/vast/2014/6227/0/07042498",
"title": "TimeGraph: A data management framework for visual analytics of large multivariate time-oriented networks",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042498/12OmNBlXs3i",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2012/4752/0/06400514",
"title": "Big data exploration through visual analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2012/06400514/12OmNC3XhwY",
"parentPublication": {
"id": "proceedings/vast/2012/4752/0",
"title": "2012 IEEE Conference on Visual Analytics Science and Technology (VAST 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vast/2014/6227/0/07042477",
"title": "Feature-driven visual analytics of soccer data",
"doi": null,
"abstractUrl": "/proceedings-article/vast/2014/07042477/12OmNxcMSiK",
"parentPublication": {
"id": "proceedings/vast/2014/6227/0",
"title": "2014 IEEE Conference on Visual Analytics Science and Technology (VAST)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/hpcc/2016/4297/0/07828556",
"title": "Agile Visual Analytics in Data Science Systems",
"doi": null,
"abstractUrl": "/proceedings-article/hpcc/2016/07828556/12OmNyPQ4Qw",
"parentPublication": {
"id": "proceedings/hpcc/2016/4297/0",
"title": "2016 IEEE 18th International Conference on High-Performance Computing and Communications, IEEE 14th International Conference on Smart City, and IEEE 2nd International Conference on Data Science and Systems (HPCC/SmartCity/DSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/pacificvis/2014/2874/0/2874a352",
"title": "Visualization for Visual Analytics: Micro-visualization, Abstraction, and Physical Appeal",
"doi": null,
"abstractUrl": "/proceedings-article/pacificvis/2014/2874a352/12OmNySG3Oy",
"parentPublication": {
"id": "proceedings/pacificvis/2014/2874/0",
"title": "2014 IEEE Pacific Visualization Symposium (PacificVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdmw/2012/4925/0/4925a894",
"title": "Cubix: A Visual Analytics Tool for Conceptual and Semantic Data",
"doi": null,
"abstractUrl": "/proceedings-article/icdmw/2012/4925a894/12OmNz5JCf2",
"parentPublication": {
"id": "proceedings/icdmw/2012/4925/0",
"title": "2012 IEEE 12th International Conference on Data Mining Workshops",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2015/06/mcg2015060069",
"title": "Visual Analytics for MOOC Data",
"doi": null,
"abstractUrl": "/magazine/cg/2015/06/mcg2015060069/13rRUwInvMP",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/co/2013/08/mco2013080090",
"title": "Bixplorer: Visual Analytics with Biclusters",
"doi": null,
"abstractUrl": "/magazine/co/2013/08/mco2013080090/13rRUwcAqvs",
"parentPublication": {
"id": "mags/co",
"title": "Computer",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2009/02/mcg2009020084",
"title": "Demystifying Visual Analytics",
"doi": null,
"abstractUrl": "/magazine/cg/2009/02/mcg2009020084/13rRUy3gn3z",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2019/2423/0/08945032",
"title": "Towards a Structural Framework for Explicit Domain Knowledge in Visual Analytics",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2019/08945032/1grQj8QowOA",
"parentPublication": {
"id": "proceedings/vahc/2019/2423/0",
"title": "2019 IEEE Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122237",
"articleId": "13rRUxAAT0R",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122257",
"articleId": "13rRUwInuWu",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRUv",
"name": "ttg2013122247s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122247s.mp4",
"extension": "mp4",
"size": "5.66 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInuWu",
"doi": "10.1109/TVCG.2013.178",
"abstract": "We present MotionExplorer, an exploratory search and analysis system for sequences of human motion in large motion capture data collections. This special type of multivariate time series data is relevant in many research fields including medicine, sports and animation. Key tasks in working with motion data include analysis of motion states and transitions, and synthesis of motion vectors by interpolation and combination. In the practice of research and application of human motion data, challenges exist in providing visual summaries and drill-down functionality for handling large motion data collections. We find that this domain can benefit from appropriate visual retrieval and analysis support to handle these tasks in presence of large motion data. To address this need, we developed MotionExplorer together with domain experts as an exploratory search system based on interactive aggregation and visualization of motion states as a basis for data navigation, exploration, and search. Based on an overview-first type visualization, users are able to search for interesting sub-sequences of motion based on a query-by-example metaphor, and explore search results by details on demand. We developed MotionExplorer in close collaboration with the targeted users who are researchers working on human motion synthesis and analysis, including a summative field study. Additionally, we conducted a laboratory design study to substantially improve MotionExplorer towards an intuitive, usable and robust design. MotionExplorer enables the search in human motion capture data with only a few mouse clicks. The researchers unanimously confirm that the system can efficiently support their work.",
"abstracts": [
{
"abstractType": "Regular",
"content": "We present MotionExplorer, an exploratory search and analysis system for sequences of human motion in large motion capture data collections. This special type of multivariate time series data is relevant in many research fields including medicine, sports and animation. Key tasks in working with motion data include analysis of motion states and transitions, and synthesis of motion vectors by interpolation and combination. In the practice of research and application of human motion data, challenges exist in providing visual summaries and drill-down functionality for handling large motion data collections. We find that this domain can benefit from appropriate visual retrieval and analysis support to handle these tasks in presence of large motion data. To address this need, we developed MotionExplorer together with domain experts as an exploratory search system based on interactive aggregation and visualization of motion states as a basis for data navigation, exploration, and search. Based on an overview-first type visualization, users are able to search for interesting sub-sequences of motion based on a query-by-example metaphor, and explore search results by details on demand. We developed MotionExplorer in close collaboration with the targeted users who are researchers working on human motion synthesis and analysis, including a summative field study. Additionally, we conducted a laboratory design study to substantially improve MotionExplorer towards an intuitive, usable and robust design. MotionExplorer enables the search in human motion capture data with only a few mouse clicks. The researchers unanimously confirm that the system can efficiently support their work.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "We present MotionExplorer, an exploratory search and analysis system for sequences of human motion in large motion capture data collections. This special type of multivariate time series data is relevant in many research fields including medicine, sports and animation. Key tasks in working with motion data include analysis of motion states and transitions, and synthesis of motion vectors by interpolation and combination. In the practice of research and application of human motion data, challenges exist in providing visual summaries and drill-down functionality for handling large motion data collections. We find that this domain can benefit from appropriate visual retrieval and analysis support to handle these tasks in presence of large motion data. To address this need, we developed MotionExplorer together with domain experts as an exploratory search system based on interactive aggregation and visualization of motion states as a basis for data navigation, exploration, and search. Based on an overview-first type visualization, users are able to search for interesting sub-sequences of motion based on a query-by-example metaphor, and explore search results by details on demand. We developed MotionExplorer in close collaboration with the targeted users who are researchers working on human motion synthesis and analysis, including a summative field study. Additionally, we conducted a laboratory design study to substantially improve MotionExplorer towards an intuitive, usable and robust design. MotionExplorer enables the search in human motion capture data with only a few mouse clicks. The researchers unanimously confirm that the system can efficiently support their work.",
"title": "MotionExplorer: Exploratory Search in Human Motion Capture Data Based on Hierarchical Aggregation",
"normalizedTitle": "MotionExplorer: Exploratory Search in Human Motion Capture Data Based on Hierarchical Aggregation",
"fno": "ttg2013122257",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Visual Analytics",
"Data Visualization",
"Time Series Analysis",
"Databases",
"Data Collection",
"Data Aggregation",
"Visual Analytics",
"Data Visualization",
"Time Series Analysis",
"Databases",
"Data Collection",
"Cluster Glyph",
"Visual Analytics",
"Exploratory Search",
"Multivariate Time Series",
"Motion Capture Data"
],
"authors": [
{
"givenName": "Jurgen",
"surname": "Bernard",
"fullName": "Jurgen Bernard",
"affiliation": "Fraunhofer Inst. for Comput. Graphics Res. Darmstadt, Darmstadt, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nils",
"surname": "Wilhelm",
"fullName": "Nils Wilhelm",
"affiliation": "Fraunhofer Inst. for Comput. Graphics Res. Darmstadt, Darmstadt, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Bjorn",
"surname": "Kruger",
"fullName": "Bjorn Kruger",
"affiliation": "Inst. of Comput. Sci., Univ. Bonn, Bonn, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Thorsten",
"surname": "May",
"fullName": "Thorsten May",
"affiliation": "Fraunhofer Inst. for Comput. Graphics Res. Darmstadt, Darmstadt, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Tobias",
"surname": "Schreck",
"fullName": "Tobias Schreck",
"affiliation": "Data Anal. & Visualization Group, Univ. Konstanz, Konstanz, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Jorn",
"surname": "Kohlhammer",
"fullName": "Jorn Kohlhammer",
"affiliation": "Fraunhofer Inst. for Comput. Graphics Res. Darmstadt, Darmstadt, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2257-2266",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iv/2015/7568/0/7568a300",
"title": "A Visualization-Analytics-Interaction Workflow Framework for Exploratory and Explanatory Search on Geo-located Search Data Using the Meme Media Digital Dashboard",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2015/7568a300/12OmNqGA5ep",
"parentPublication": {
"id": "proceedings/iv/2015/7568/0",
"title": "2015 19th International Conference on Information Visualisation (iV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2004/8603/2/01394504",
"title": "Feature preserving motion compression based on hierarchical curve simplification",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2004/01394504/12OmNxX3upu",
"parentPublication": {
"id": "proceedings/icme/2004/8603/2",
"title": "2004 IEEE International Conference on Multimedia and Expo (ICME)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iv/2016/8942/0/8942a261",
"title": "A Need for Exploratory Visual Analytics in Big Data Research and for Open Science",
"doi": null,
"abstractUrl": "/proceedings-article/iv/2016/8942a261/12OmNz4SOrY",
"parentPublication": {
"id": "proceedings/iv/2016/8942/0",
"title": "2016 20th International Conference Information Visualisation (IV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icme/2007/1016/0/04285073",
"title": "Content-Based Cross Search for Human Motion Data using Time-Varying Mesh and Motion Capture Data",
"doi": null,
"abstractUrl": "/proceedings-article/icme/2007/04285073/12OmNzJbQTy",
"parentPublication": {
"id": "proceedings/icme/2007/1016/0",
"title": "2007 International Conference on Multimedia & Expo",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/biovis/2011/0003/0/063069vehlow2",
"title": "iHAT: Interactive hierarchical aggregation table",
"doi": null,
"abstractUrl": "/proceedings-article/biovis/2011/063069vehlow2/12OmNzZWbBO",
"parentPublication": {
"id": "proceedings/biovis/2011/0003/0",
"title": "2011 IEEE Symposium on Biological Data Visualization (BioVis).",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2014/12/06876022",
"title": "The Effects of Interactive Latency on Exploratory Visual Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2014/12/06876022/13rRUxYINfd",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/beliv/2018/6884/0/08634103",
"title": "The Garden of Forking Paths in Visualization: A Design Space for Reliable Exploratory Visual Analytics : Position Paper",
"doi": null,
"abstractUrl": "/proceedings-article/beliv/2018/08634103/17D45VsBTXJ",
"parentPublication": {
"id": "proceedings/beliv/2018/6884/0",
"title": "2018 IEEE Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icde/2019/7474/0/747400b666",
"title": "An Indexing Framework for Efficient Visual Exploratory Subgraph Search in Graph Databases",
"doi": null,
"abstractUrl": "/proceedings-article/icde/2019/747400b666/1aDSS2TysF2",
"parentPublication": {
"id": "proceedings/icde/2019/7474/0",
"title": "2019 IEEE 35th International Conference on Data Engineering (ICDE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08809920",
"title": "Motion Browser: Visualizing and Understanding Complex Upper Limb Movement Under Obstetrical Brachial Plexus Injuries",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08809920/1cHEmIfGtgc",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vahc/2021/2067/0/206700a019",
"title": "Enabling Longitudinal Exploratory Analysis of Clinical COVID Data",
"doi": null,
"abstractUrl": "/proceedings-article/vahc/2021/206700a019/1z0yjcoWoE0",
"parentPublication": {
"id": "proceedings/vahc/2021/2067/0",
"title": "2021 IEEE Workshop on Visual Analytics in Healthcare (VAHC)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122247",
"articleId": "13rRUxZ0o1A",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122267",
"articleId": "13rRUILtJzA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgC9",
"name": "ttg2013122257s.wmv",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122257s.wmv",
"extension": "wmv",
"size": "5.91 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgCa",
"name": "ttg2013122257s2.wmv",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122257s2.wmv",
"extension": "wmv",
"size": "1.84 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUILtJzA",
"doi": "10.1109/TVCG.2013.198",
"abstract": "The visual analysis of dynamic networks is a challenging task. In this paper, we introduce a new approach supporting the discovery of substructures sharing a similar trend over time by combining computation, visualization and interaction. With existing techniques, their discovery would be a tedious endeavor because of the number of nodes, edges as well as time points to be compared. First, on the basis of the supergraph, we therefore group nodes and edges according to their associated attributes that are changing over time. Second, the supergraph is visualized to provide an overview of the groups of nodes and edges with similar behavior over time in terms of their associated attributes. Third, we provide specific interactions to explore and refine the temporal clustering, allowing the user to further steer the analysis of the dynamic network. We demonstrate our approach by the visual analysis of a large wireless mesh network.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The visual analysis of dynamic networks is a challenging task. In this paper, we introduce a new approach supporting the discovery of substructures sharing a similar trend over time by combining computation, visualization and interaction. With existing techniques, their discovery would be a tedious endeavor because of the number of nodes, edges as well as time points to be compared. First, on the basis of the supergraph, we therefore group nodes and edges according to their associated attributes that are changing over time. Second, the supergraph is visualized to provide an overview of the groups of nodes and edges with similar behavior over time in terms of their associated attributes. Third, we provide specific interactions to explore and refine the temporal clustering, allowing the user to further steer the analysis of the dynamic network. We demonstrate our approach by the visual analysis of a large wireless mesh network.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The visual analysis of dynamic networks is a challenging task. In this paper, we introduce a new approach supporting the discovery of substructures sharing a similar trend over time by combining computation, visualization and interaction. With existing techniques, their discovery would be a tedious endeavor because of the number of nodes, edges as well as time points to be compared. First, on the basis of the supergraph, we therefore group nodes and edges according to their associated attributes that are changing over time. Second, the supergraph is visualized to provide an overview of the groups of nodes and edges with similar behavior over time in terms of their associated attributes. Third, we provide specific interactions to explore and refine the temporal clustering, allowing the user to further steer the analysis of the dynamic network. We demonstrate our approach by the visual analysis of a large wireless mesh network.",
"title": "Supporting the Visual Analysis of Dynamic Networks by Clustering associated Temporal Attributes",
"normalizedTitle": "Supporting the Visual Analysis of Dynamic Networks by Clustering associated Temporal Attributes",
"fno": "ttg2013122267",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Time Series Analysis",
"Market Research",
"Image Color Analysis",
"Power System Dynamics",
"Current Measurement",
"Time Measurement",
"Visualization",
"Time Series Analysis",
"Market Research",
"Image Color Analysis",
"Power System Dynamics",
"Current Measurement",
"Time Measurement",
"Supergraph Clustering",
"Dynamic Networks"
],
"authors": [
{
"givenName": "Steffen",
"surname": "Hadlak",
"fullName": "Steffen Hadlak",
"affiliation": "Univ. of Rostock, Rostock, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Heidrun",
"surname": "Schumann",
"fullName": "Heidrun Schumann",
"affiliation": "Univ. of Rostock, Rostock, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Clemens H.",
"surname": "Cap",
"fullName": "Clemens H. Cap",
"affiliation": "Univ. of Rostock, Rostock, Germany",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Till",
"surname": "Wollenberg",
"fullName": "Till Wollenberg",
"affiliation": "Univ. of Rostock, Rostock, Germany",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2267-2276",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icisa/2013/0602/0/06579427",
"title": "Big Data Security Hardening Methodology Using Attributes Relationship",
"doi": null,
"abstractUrl": "/proceedings-article/icisa/2013/06579427/12OmNARiM1h",
"parentPublication": {
"id": "proceedings/icisa/2013/0602/0",
"title": "2013 International Conference on Information Science and Applications (ICISA)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icmtma/2009/3583/3/3583c677",
"title": "Attributes of Urban Logistics Nodes and Their Mathematical Description",
"doi": null,
"abstractUrl": "/proceedings-article/icmtma/2009/3583c677/12OmNBcShWD",
"parentPublication": {
"id": null,
"title": null,
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icebe/2016/6119/0/6119a064",
"title": "Clustering-Based Algorithms to Semantic Summarizing Graph with Multi-attributes’ Hierarchical Structures",
"doi": null,
"abstractUrl": "/proceedings-article/icebe/2016/6119a064/12OmNvjyxHD",
"parentPublication": {
"id": "proceedings/icebe/2016/6119/0",
"title": "2016 IEEE 13th International Conference on e-Business Engineering (ICEBE)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2017/3835/0/3835a405",
"title": "Scalable Hashing-Based Network Discovery",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2017/3835a405/12OmNvpNIsv",
"parentPublication": {
"id": "proceedings/icdm/2017/3835/0",
"title": "2017 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2013/5108/0/5108b151",
"title": "Community Detection in Networks with Node Attributes",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2013/5108b151/12OmNx3Zjq2",
"parentPublication": {
"id": "proceedings/icdm/2013/5108/0",
"title": "2013 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2013/2240/0/06785918",
"title": "Role discovery based on sociology attributes clustering in Sina Microblog",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2013/06785918/12OmNzcxYVR",
"parentPublication": {
"id": "proceedings/asonam/2013/2240/0",
"title": "2013 International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tb/2020/03/08476231",
"title": "Inferring Disease-Associated microRNAs in Heterogeneous Networks with Node Attributes",
"doi": null,
"abstractUrl": "/journal/tb/2020/03/08476231/13WBGNbHGd3",
"parentPublication": {
"id": "trans/tb",
"title": "IEEE/ACM Transactions on Computational Biology and Bioinformatics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wi/2018/7325/0/732500a401",
"title": "Clustering in Networks with Multi-Modality Attributes",
"doi": null,
"abstractUrl": "/proceedings-article/wi/2018/732500a401/17D45XfSET2",
"parentPublication": {
"id": "proceedings/wi/2018/7325/0",
"title": "2018 IEEE/WIC/ACM International Conference on Web Intelligence (WI)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icdm/2020/8316/0/831600a900",
"title": "Matrix Profile XXII: Exact Discovery of Time Series Motifs Under DTW",
"doi": null,
"abstractUrl": "/proceedings-article/icdm/2020/831600a900/1r54GFXrdra",
"parentPublication": {
"id": "proceedings/icdm/2020/8316/0",
"title": "2020 IEEE International Conference on Data Mining (ICDM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/asonam/2020/1056/0/09381332",
"title": "Proximity, Communities, and Attributes in Social Network Visualisation",
"doi": null,
"abstractUrl": "/proceedings-article/asonam/2020/09381332/1semDRI9JzW",
"parentPublication": {
"id": "proceedings/asonam/2020/1056/0",
"title": "2020 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122257",
"articleId": "13rRUwInuWu",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122277",
"articleId": "13rRUwd9CLM",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTYet3Y",
"name": "ttg2013122267s.avi",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122267s.avi",
"extension": "avi",
"size": "34.8 MB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwd9CLM",
"doi": "10.1109/TVCG.2013.173",
"abstract": "Rankings are a popular and universal approach to structuring otherwise unorganized collections of items by computing a rank for each item based on the value of one or more of its attributes. This allows us, for example, to prioritize tasks or to evaluate the performance of products relative to each other. While the visualization of a ranking itself is straightforward, its interpretation is not, because the rank of an item represents only a summary of a potentially complicated relationship between its attributes and those of the other items. It is also common that alternative rankings exist which need to be compared and analyzed to gain insight into how multiple heterogeneous attributes affect the rankings. Advanced visual exploration tools are needed to make this process efficient. In this paper we present a comprehensive analysis of requirements for the visualization of multi-attribute rankings. Based on these considerations, we propose LineUp - a novel and scalable visualization technique that uses bar charts. This interactive technique supports the ranking of items based on multiple heterogeneous attributes with different scales and semantics. It enables users to interactively combine attributes and flexibly refine parameters to explore the effect of changes in the attribute combination. This process can be employed to derive actionable insights as to which attributes of an item need to be modified in order for its rank to change. Additionally, through integration of slope graphs, LineUp can also be used to compare multiple alternative rankings on the same set of items, for example, over time or across different attribute combinations. We evaluate the effectiveness of the proposed multi-attribute visualization technique in a qualitative study. The study shows that users are able to successfully solve complex ranking tasks in a short period of time.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Rankings are a popular and universal approach to structuring otherwise unorganized collections of items by computing a rank for each item based on the value of one or more of its attributes. This allows us, for example, to prioritize tasks or to evaluate the performance of products relative to each other. While the visualization of a ranking itself is straightforward, its interpretation is not, because the rank of an item represents only a summary of a potentially complicated relationship between its attributes and those of the other items. It is also common that alternative rankings exist which need to be compared and analyzed to gain insight into how multiple heterogeneous attributes affect the rankings. Advanced visual exploration tools are needed to make this process efficient. In this paper we present a comprehensive analysis of requirements for the visualization of multi-attribute rankings. Based on these considerations, we propose LineUp - a novel and scalable visualization technique that uses bar charts. This interactive technique supports the ranking of items based on multiple heterogeneous attributes with different scales and semantics. It enables users to interactively combine attributes and flexibly refine parameters to explore the effect of changes in the attribute combination. This process can be employed to derive actionable insights as to which attributes of an item need to be modified in order for its rank to change. Additionally, through integration of slope graphs, LineUp can also be used to compare multiple alternative rankings on the same set of items, for example, over time or across different attribute combinations. We evaluate the effectiveness of the proposed multi-attribute visualization technique in a qualitative study. The study shows that users are able to successfully solve complex ranking tasks in a short period of time.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Rankings are a popular and universal approach to structuring otherwise unorganized collections of items by computing a rank for each item based on the value of one or more of its attributes. This allows us, for example, to prioritize tasks or to evaluate the performance of products relative to each other. While the visualization of a ranking itself is straightforward, its interpretation is not, because the rank of an item represents only a summary of a potentially complicated relationship between its attributes and those of the other items. It is also common that alternative rankings exist which need to be compared and analyzed to gain insight into how multiple heterogeneous attributes affect the rankings. Advanced visual exploration tools are needed to make this process efficient. In this paper we present a comprehensive analysis of requirements for the visualization of multi-attribute rankings. Based on these considerations, we propose LineUp - a novel and scalable visualization technique that uses bar charts. This interactive technique supports the ranking of items based on multiple heterogeneous attributes with different scales and semantics. It enables users to interactively combine attributes and flexibly refine parameters to explore the effect of changes in the attribute combination. This process can be employed to derive actionable insights as to which attributes of an item need to be modified in order for its rank to change. Additionally, through integration of slope graphs, LineUp can also be used to compare multiple alternative rankings on the same set of items, for example, over time or across different attribute combinations. We evaluate the effectiveness of the proposed multi-attribute visualization technique in a qualitative study. The study shows that users are able to successfully solve complex ranking tasks in a short period of time.",
"title": "LineUp: Visual Analysis of Multi-Attribute Rankings",
"normalizedTitle": "LineUp: Visual Analysis of Multi-Attribute Rankings",
"fno": "ttg2013122277",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Rankings",
"Data Visualization",
"Encoding",
"Histograms",
"Scalability",
"Multi Faceted",
"Rankings",
"Data Visualization",
"Encoding",
"Histograms",
"Scalability",
"Stacked Bar Charts",
"Ranking Visualization",
"Ranking",
"Scoring",
"Multi Attribute",
"Multifactorial"
],
"authors": [
{
"givenName": "Samuel",
"surname": "Gratzl",
"fullName": "Samuel Gratzl",
"affiliation": "Johannes Kepler Univ. Linz, Linz, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Alexander",
"surname": "Lex",
"fullName": "Alexander Lex",
"affiliation": "Johannes Kepler Univ. Linz, Linz, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Nils",
"surname": "Gehlenborg",
"fullName": "Nils Gehlenborg",
"affiliation": "Harvard Univ., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanspeter",
"surname": "Pfister",
"fullName": "Hanspeter Pfister",
"affiliation": "Harvard Univ., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marc",
"surname": "Streit",
"fullName": "Marc Streit",
"affiliation": "Med. Sch., Harvard Univ., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2277-2286",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/icmla/2009/3926/0/3926a154",
"title": "Discovering Characterization Rules from Rankings",
"doi": null,
"abstractUrl": "/proceedings-article/icmla/2009/3926a154/12OmNwE9Ou0",
"parentPublication": {
"id": "proceedings/icmla/2009/3926/0",
"title": "Machine Learning and Applications, Fourth International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icss/2016/2727/0/2727a165",
"title": "A Recommendation System Based on Multi-attribute",
"doi": null,
"abstractUrl": "/proceedings-article/icss/2016/2727a165/12OmNxHryiQ",
"parentPublication": {
"id": "proceedings/icss/2016/2727/0",
"title": "2016 9th International Conference on Service Science (ICSS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2018/4886/0/488601b671",
"title": "Efficient Multi-attribute Similarity Learning Towards Attribute-Based Fashion Search",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2018/488601b671/12OmNyRg4fV",
"parentPublication": {
"id": "proceedings/wacv/2018/4886/0",
"title": "2018 IEEE Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icgec/2010/4281/0/4281a055",
"title": "Application of Multi-Attribute Rating Matrix in Cold-start Recommendation",
"doi": null,
"abstractUrl": "/proceedings-article/icgec/2010/4281a055/12OmNzYwc02",
"parentPublication": {
"id": "proceedings/icgec/2010/4281/0",
"title": "Genetic and Evolutionary Computing, International Conference on",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/12/07369992",
"title": "ThermalPlot: Visualizing Multi-Attribute Time-Series Data Using a Thermal Metaphor",
"doi": null,
"abstractUrl": "/journal/tg/2016/12/07369992/13rRUygT7yg",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2013/12/ttg2013122496",
"title": "Radial Sets: Interactive Visual Analysis of Large Overlapping Sets",
"doi": null,
"abstractUrl": "/journal/tg/2013/12/ttg2013122496/13rRUytF41A",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a065",
"title": "FairFuse: Interactive Visual Support for Fair Consensus Ranking",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a065/1J6haP1jUt2",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/wacv/2023/9346/0/934600a848",
"title": "Leveraging Off-the-shelf Diffusion Model for Multi-attribute Fashion Image Manipulation",
"doi": null,
"abstractUrl": "/proceedings-article/wacv/2023/934600a848/1L6LCFHWC6Q",
"parentPublication": {
"id": "proceedings/wacv/2023/9346/0",
"title": "2023 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2017/11/07776926",
"title": "Submodular Attribute Selection for Visual Recognition",
"doi": null,
"abstractUrl": "/journal/tp/2017/11/07776926/1oCiZJsBsT6",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/04/09288641",
"title": "Imma Sort by Two or More Attributes With Interpretable Monotonic Multi-Attribute Sorting",
"doi": null,
"abstractUrl": "/journal/tg/2021/04/09288641/1pq6982P34Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122267",
"articleId": "13rRUILtJzA",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122287",
"articleId": "13rRUwInv4o",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgNX",
"name": "ttg2013122277s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122277s.mp4",
"extension": "mp4",
"size": "42.1 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXFgNY",
"name": "ttg2013122277s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122277s.zip",
"extension": "zip",
"size": "887 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUwInv4o",
"doi": "10.1109/TVCG.2013.122",
"abstract": "Many application domains deal with multi-variate data that consist of both categorical and numerical information. Small-multiple displays are a powerful concept for comparing such data by juxtaposition. For comparison by overlay or by explicit encoding of computed differences, however, a specification of references is necessary. In this paper, we present a formal model for defining semantically meaningful comparisons between many categories in a small-multiple display. Based on pivotized data that are hierarchically partitioned by the categories assigned to the x and y axis of the display, we propose two alternatives for structure-based comparison within this hierarchy. With an absolute reference specification, categories are compared to a fixed reference category. With a relative reference specification, in contrast, a semantic ordering of the categories is considered when comparing them either to the previous or subsequent category each. Both reference specifications can be defined at multiple levels of the hierarchy (including aggregated summaries), enabling a multitude of useful comparisons. We demonstrate the general applicability of our model in several application examples using different visualizations that compare data by overlay or explicit encoding of differences.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Many application domains deal with multi-variate data that consist of both categorical and numerical information. Small-multiple displays are a powerful concept for comparing such data by juxtaposition. For comparison by overlay or by explicit encoding of computed differences, however, a specification of references is necessary. In this paper, we present a formal model for defining semantically meaningful comparisons between many categories in a small-multiple display. Based on pivotized data that are hierarchically partitioned by the categories assigned to the x and y axis of the display, we propose two alternatives for structure-based comparison within this hierarchy. With an absolute reference specification, categories are compared to a fixed reference category. With a relative reference specification, in contrast, a semantic ordering of the categories is considered when comparing them either to the previous or subsequent category each. Both reference specifications can be defined at multiple levels of the hierarchy (including aggregated summaries), enabling a multitude of useful comparisons. We demonstrate the general applicability of our model in several application examples using different visualizations that compare data by overlay or explicit encoding of differences.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Many application domains deal with multi-variate data that consist of both categorical and numerical information. Small-multiple displays are a powerful concept for comparing such data by juxtaposition. For comparison by overlay or by explicit encoding of computed differences, however, a specification of references is necessary. In this paper, we present a formal model for defining semantically meaningful comparisons between many categories in a small-multiple display. Based on pivotized data that are hierarchically partitioned by the categories assigned to the x and y axis of the display, we propose two alternatives for structure-based comparison within this hierarchy. With an absolute reference specification, categories are compared to a fixed reference category. With a relative reference specification, in contrast, a semantic ordering of the categories is considered when comparing them either to the previous or subsequent category each. Both reference specifications can be defined at multiple levels of the hierarchy (including aggregated summaries), enabling a multitude of useful comparisons. We demonstrate the general applicability of our model in several application examples using different visualizations that compare data by overlay or explicit encoding of differences.",
"title": "A Model for Structure-Based Comparison of Many Categories in Small-Multiple Displays",
"normalizedTitle": "A Model for Structure-Based Comparison of Many Categories in Small-Multiple Displays",
"fno": "ttg2013122287",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Encoding",
"Data Visualization",
"Displays",
"Computational Modeling",
"Trellis Displays",
"Encoding",
"Data Visualization",
"Displays",
"Computational Modeling",
"Categorical Data",
"Comparative Visualization",
"Small Multiple Displays"
],
"authors": [
{
"givenName": "Johannes",
"surname": "Kehrer",
"fullName": "Johannes Kehrer",
"affiliation": "VRVis Res. Center, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Harald",
"surname": "Piringer",
"fullName": "Harald Piringer",
"affiliation": "VRVis Res. Center, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Wolfgang",
"surname": "Berger",
"fullName": "Wolfgang Berger",
"affiliation": "VRVis Res. Center, Vienna, Austria",
"__typename": "ArticleAuthorType"
},
{
"givenName": "M. Eduard",
"surname": "Groller",
"fullName": "M. Eduard Groller",
"affiliation": "Inst. of Comput. Graphics & Algorithms, Vienna Univ. of Technol., Vienna, Austria",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2287-2296",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/3dv/2015/8332/0/8332a309",
"title": "Multi-label Object Categorization Using Histograms of Global Relations",
"doi": null,
"abstractUrl": "/proceedings-article/3dv/2015/8332a309/12OmNqIzgXY",
"parentPublication": {
"id": "proceedings/3dv/2015/8332/0",
"title": "2015 International Conference on 3D Vision (3DV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391a064",
"title": "Pose Induction for Novel Object Categories",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391a064/12OmNxiKs3g",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/icpr/2012/2216/0/06460860",
"title": "Mining sub-categories for object detection",
"doi": null,
"abstractUrl": "/proceedings-article/icpr/2012/06460860/12OmNywxlLP",
"parentPublication": {
"id": "proceedings/icpr/2012/2216/0",
"title": "2012 21st International Conference on Pattern Recognition (ICPR 2012)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/e-science/2015/9325/0/9325a001",
"title": "Adventures of Categories: Modelling the Evolution of Categories During Scientific Investigation",
"doi": null,
"abstractUrl": "/proceedings-article/e-science/2015/9325a001/12OmNz2kqrN",
"parentPublication": {
"id": "proceedings/e-science/2015/9325/0",
"title": "2015 IEEE 11th International Conference on e-Science (e-Science)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07194832",
"title": "Task-Driven Comparison of Topic Models",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07194832/13rRUxBJhFA",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/09/08636969",
"title": "Aggregated Dendrograms for Visual Comparison between Many Phylogenetic Trees",
"doi": null,
"abstractUrl": "/journal/tg/2020/09/08636969/17D45WXIkG8",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvprw/2018/6100/0/610000c054",
"title": "Representation of Categories in Filters of Deep Neural Networks",
"doi": null,
"abstractUrl": "/proceedings-article/cvprw/2018/610000c054/17D45WaTkkK",
"parentPublication": {
"id": "proceedings/cvprw/2018/6100/0",
"title": "2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/lics/2019/3608/0/08785687",
"title": "Describing free <tex>Z_$\\omega$_Z</tex> -categories",
"doi": null,
"abstractUrl": "/proceedings-article/lics/2019/08785687/1cdOpn0hax2",
"parentPublication": {
"id": "proceedings/lics/2019/3608/0",
"title": "2019 34th Annual ACM/IEEE Symposium on Logic in Computer Science (LICS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccvw/2019/5023/0/502300c797",
"title": "A Refined 3D Pose Dataset for Fine-Grained Object Categories",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2019/502300c797/1i5mJJVu6Uo",
"parentPublication": {
"id": "proceedings/iccvw/2019/5023/0",
"title": "2019 IEEE/CVF International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/ta/2023/01/09265409",
"title": "Computation of Sensory-Affective Relationships Depending on Material Categories of Pictorial Stimuli",
"doi": null,
"abstractUrl": "/journal/ta/2023/01/09265409/1oUyrN8PG7u",
"parentPublication": {
"id": "trans/ta",
"title": "IEEE Transactions on Affective Computing",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122277",
"articleId": "13rRUwd9CLM",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122297",
"articleId": "13rRUyYjKaf",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXFgEG",
"name": "ttg2013122287s.zip",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122287s.zip",
"extension": "zip",
"size": "162 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUyYjKaf",
"doi": "10.1109/TVCG.2013.140",
"abstract": "Visualizations are great tools of communications-they summarize findings and quickly convey main messages to our audience. As designers of charts we have to make sure that information is shown with a minimum of distortion. We have to also consider illusions and other perceptual limitations of our audience. In this paper we discuss the effect and strength of the line width illusion, a Muller-Lyer type illusion, on designs related to displaying associations between categorical variables. Parallel sets and hammock plots are both affected by line width illusions. We introduce the common-angle plot as an alternative method for displaying categorical data in a manner that minimizes the effect from perceptual illusions. Results from user studies both highlight the need for addressing line-width illusions in displays and provide evidence that common angle charts successfully resolve this issue.",
"abstracts": [
{
"abstractType": "Regular",
"content": "Visualizations are great tools of communications-they summarize findings and quickly convey main messages to our audience. As designers of charts we have to make sure that information is shown with a minimum of distortion. We have to also consider illusions and other perceptual limitations of our audience. In this paper we discuss the effect and strength of the line width illusion, a Muller-Lyer type illusion, on designs related to displaying associations between categorical variables. Parallel sets and hammock plots are both affected by line width illusions. We introduce the common-angle plot as an alternative method for displaying categorical data in a manner that minimizes the effect from perceptual illusions. Results from user studies both highlight the need for addressing line-width illusions in displays and provide evidence that common angle charts successfully resolve this issue.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "Visualizations are great tools of communications-they summarize findings and quickly convey main messages to our audience. As designers of charts we have to make sure that information is shown with a minimum of distortion. We have to also consider illusions and other perceptual limitations of our audience. In this paper we discuss the effect and strength of the line width illusion, a Muller-Lyer type illusion, on designs related to displaying associations between categorical variables. Parallel sets and hammock plots are both affected by line width illusions. We introduce the common-angle plot as an alternative method for displaying categorical data in a manner that minimizes the effect from perceptual illusions. Results from user studies both highlight the need for addressing line-width illusions in displays and provide evidence that common angle charts successfully resolve this issue.",
"title": "Common Angle Plots as Perception-True Visualizations of Categorical Associations",
"normalizedTitle": "Common Angle Plots as Perception-True Visualizations of Categorical Associations",
"fno": "ttg2013122297",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Biological Cells",
"Biochemistry",
"Data Visualization",
"Parallel Processing",
"Hammock Plots",
"Biological Cells",
"Biochemistry",
"Data Visualization",
"Parallel Processing",
"Muller Lyer Illusion",
"Linewidth Illusion",
"Data Visualization",
"High Dimensional Displays",
"Parallel Sets"
],
"authors": [
{
"givenName": "Heike",
"surname": "Hofmann",
"fullName": "Heike Hofmann",
"affiliation": null,
"__typename": "ArticleAuthorType"
},
{
"givenName": "Marie",
"surname": "Vendettuoli",
"fullName": "Marie Vendettuoli",
"affiliation": null,
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2297-2305",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/pdp/2015/8491/0/8491a725",
"title": "Parallel Exploration of the Nuclear Chromosome Conformation with NuChart-II",
"doi": null,
"abstractUrl": "/proceedings-article/pdp/2015/8491a725/12OmNvAAtvV",
"parentPublication": {
"id": "proceedings/pdp/2015/8491/0",
"title": "2015 23rd Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122287",
"articleId": "13rRUwInv4o",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122306",
"articleId": "13rRUxE04tz",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxE04tz",
"doi": "10.1109/TVCG.2013.234",
"abstract": "An ongoing debate in the Visualization community concerns the role that visualization types play in data understanding. In human cognition, understanding and memorability are intertwined. As a first step towards being able to ask questions about impact and effectiveness, here we ask: 'What makes a visualization memorable?' We ran the largest scale visualization study to date using 2,070 single-panel visualizations, categorized with visualization type (e.g., bar chart, line graph, etc.), collected from news media sites, government reports, scientific journals, and infographic sources. Each visualization was annotated with additional attributes, including ratings for data-ink ratios and visual densities. Using Amazon's Mechanical Turk, we collected memorability scores for hundreds of these visualizations, and discovered that observers are consistent in which visualizations they find memorable and forgettable. We find intuitive results (e.g., attributes like color and the inclusion of a human recognizable object enhance memorability) and less intuitive results (e.g., common graphs are less memorable than unique visualization types). Altogether our findings suggest that quantifying memorability is a general metric of the utility of information, an essential step towards determining how to design effective visualizations.",
"abstracts": [
{
"abstractType": "Regular",
"content": "An ongoing debate in the Visualization community concerns the role that visualization types play in data understanding. In human cognition, understanding and memorability are intertwined. As a first step towards being able to ask questions about impact and effectiveness, here we ask: 'What makes a visualization memorable?' We ran the largest scale visualization study to date using 2,070 single-panel visualizations, categorized with visualization type (e.g., bar chart, line graph, etc.), collected from news media sites, government reports, scientific journals, and infographic sources. Each visualization was annotated with additional attributes, including ratings for data-ink ratios and visual densities. Using Amazon's Mechanical Turk, we collected memorability scores for hundreds of these visualizations, and discovered that observers are consistent in which visualizations they find memorable and forgettable. We find intuitive results (e.g., attributes like color and the inclusion of a human recognizable object enhance memorability) and less intuitive results (e.g., common graphs are less memorable than unique visualization types). Altogether our findings suggest that quantifying memorability is a general metric of the utility of information, an essential step towards determining how to design effective visualizations.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "An ongoing debate in the Visualization community concerns the role that visualization types play in data understanding. In human cognition, understanding and memorability are intertwined. As a first step towards being able to ask questions about impact and effectiveness, here we ask: 'What makes a visualization memorable?' We ran the largest scale visualization study to date using 2,070 single-panel visualizations, categorized with visualization type (e.g., bar chart, line graph, etc.), collected from news media sites, government reports, scientific journals, and infographic sources. Each visualization was annotated with additional attributes, including ratings for data-ink ratios and visual densities. Using Amazon's Mechanical Turk, we collected memorability scores for hundreds of these visualizations, and discovered that observers are consistent in which visualizations they find memorable and forgettable. We find intuitive results (e.g., attributes like color and the inclusion of a human recognizable object enhance memorability) and less intuitive results (e.g., common graphs are less memorable than unique visualization types). Altogether our findings suggest that quantifying memorability is a general metric of the utility of information, an essential step towards determining how to design effective visualizations.",
"title": "What Makes a Visualization Memorable?",
"normalizedTitle": "What Makes a Visualization Memorable?",
"fno": "ttg2013122306",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Data Visualization",
"Taxonomy",
"Information Technology",
"Encoding",
"Information Visualization",
"Data Visualization",
"Taxonomy",
"Information Technology",
"Encoding",
"Memorability",
"Visualization Taxonomy"
],
"authors": [
{
"givenName": "Michelle A.",
"surname": "Borkin",
"fullName": "Michelle A. Borkin",
"affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Azalea A.",
"surname": "Vo",
"fullName": "Azalea A. Vo",
"affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Zoya",
"surname": "Bylinskii",
"fullName": "Zoya Bylinskii",
"affiliation": "Comput. Sci. & Artificial Intell. Lab., Massachusetts Inst. of Technol., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Phillip",
"surname": "Isola",
"fullName": "Phillip Isola",
"affiliation": "Dept. of Brain & Cognitive Sci., Massachusetts Inst. of Technol., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Shashank",
"surname": "Sunkavalli",
"fullName": "Shashank Sunkavalli",
"affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Aude",
"surname": "Oliva",
"fullName": "Aude Oliva",
"affiliation": "Comput. Sci. & Artificial Intell. Lab., Massachusetts Inst. of Technol., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Hanspeter",
"surname": "Pfister",
"fullName": "Hanspeter Pfister",
"affiliation": "Sch. of Eng. & Appl. Sci., Harvard Univ., Cambridge, MA, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2306-2315",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "proceedings/iccvw/2017/1034/0/1034c730",
"title": "Show and Recall: Learning What Makes Videos Memorable",
"doi": null,
"abstractUrl": "/proceedings-article/iccvw/2017/1034c730/12OmNAle6td",
"parentPublication": {
"id": "proceedings/iccvw/2017/1034/0",
"title": "2017 IEEE International Conference on Computer Vision Workshop (ICCVW)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iccv/2015/8391/0/8391b089",
"title": "What Makes an Object Memorable?",
"doi": null,
"abstractUrl": "/proceedings-article/iccv/2015/8391b089/12OmNC1Y5s2",
"parentPublication": {
"id": "proceedings/iccv/2015/8391/0",
"title": "2015 IEEE International Conference on Computer Vision (ICCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/cvpr/2011/0394/0/05995721",
"title": "What makes an image memorable?",
"doi": null,
"abstractUrl": "/proceedings-article/cvpr/2011/05995721/12OmNsd6vhy",
"parentPublication": {
"id": "proceedings/cvpr/2011/0394/0",
"title": "CVPR 2011",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/iscv/2017/4062/0/08054945",
"title": "Visual content learning for visualizations memorability classification",
"doi": null,
"abstractUrl": "/proceedings-article/iscv/2017/08054945/12OmNz6iOlU",
"parentPublication": {
"id": "proceedings/iscv/2017/4062/0",
"title": "2017 Intelligent Systems and Computer Vision (ISCV)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tp/2014/07/06629991",
"title": "What Makes a Photograph Memorable?",
"doi": null,
"abstractUrl": "/journal/tp/2014/07/06629991/13rRUx0xPoc",
"parentPublication": {
"id": "trans/tp",
"title": "IEEE Transactions on Pattern Analysis & Machine Intelligence",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2016/01/07192646",
"title": "Beyond Memorability: Visualization Recognition and Recall",
"doi": null,
"abstractUrl": "/journal/tg/2016/01/07192646/13rRUxASuME",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/scivis/2018/6882/0/08823764",
"title": "Toward A Deep Understanding of What Makes a Scientific Visualization Memorable",
"doi": null,
"abstractUrl": "/proceedings-article/scivis/2018/08823764/1d5kxlHWMEg",
"parentPublication": {
"id": "proceedings/scivis/2018/6882/0",
"title": "2018 IEEE Scientific Visualization Conference (SciVis)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2019/4941/0/08933570",
"title": "Visualization Assessment: A Machine Learning Approach",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2019/08933570/1fTgFdztlbq",
"parentPublication": {
"id": "proceedings/vis/2019/4941/0",
"title": "2019 IEEE Visualization Conference (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09492011",
"title": "A Survey of Perception-Based Visualization Studies by Task",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09492011/1volPuHGMdW",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/12/09528956",
"title": "Multiscale Visualization: A Structured Literature Analysis",
"doi": null,
"abstractUrl": "/journal/tg/2022/12/09528956/1wB2xUo1WKY",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122297",
"articleId": "13rRUyYjKaf",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122316",
"articleId": "13rRUxASuhA",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [
{
"id": "17ShDTXWRJH",
"name": "ttg2013122306s.pdf",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122306s.pdf",
"extension": "pdf",
"size": "1.96 MB",
"__typename": "WebExtraType"
},
{
"id": "17ShDTXWRJG",
"name": "ttg2013122306s.mp4",
"location": "https://www.computer.org/csdl/api/v1/extra/ttg2013122306s.mp4",
"extension": "mp4",
"size": "562 kB",
"__typename": "WebExtraType"
}
],
"articleVideos": []
} |
{
"issue": {
"id": "12OmNCbCrUN",
"title": "Dec.",
"year": "2013",
"issueNum": "12",
"idPrefix": "tg",
"pubType": "journal",
"volume": "19",
"label": "Dec.",
"downloadables": {
"hasCover": false,
"__typename": "PeriodicalIssueDownloadablesType"
},
"__typename": "PeriodicalIssue"
},
"article": {
"id": "13rRUxASuhA",
"doi": "10.1109/TVCG.2013.183",
"abstract": "The visual system can make highly efficient aggregate judgements about a set of objects, with speed roughly independent of the number of objects considered. While there is a rich literature on these mechanisms and their ramifications for visual summarization tasks, this prior work rarely considers more complex tasks requiring multiple judgements over long periods of time, and has not considered certain critical aggregation types, such as the localization of the mean value of a set of points. In this paper, we explore these questions using a common visualization task as a case study: relative mean value judgements within multi-class scatterplots. We describe how the perception literature provides a set of expected constraints on the task, and evaluate these predictions with a large-scale perceptual study with crowd-sourced participants. Judgements are no harder when each set contains more points, redundant and conflicting encodings, as well as additional sets, do not strongly affect performance, and judgements are harder when using less salient encodings. These results have concrete ramifications for the design of scatterplots.",
"abstracts": [
{
"abstractType": "Regular",
"content": "The visual system can make highly efficient aggregate judgements about a set of objects, with speed roughly independent of the number of objects considered. While there is a rich literature on these mechanisms and their ramifications for visual summarization tasks, this prior work rarely considers more complex tasks requiring multiple judgements over long periods of time, and has not considered certain critical aggregation types, such as the localization of the mean value of a set of points. In this paper, we explore these questions using a common visualization task as a case study: relative mean value judgements within multi-class scatterplots. We describe how the perception literature provides a set of expected constraints on the task, and evaluate these predictions with a large-scale perceptual study with crowd-sourced participants. Judgements are no harder when each set contains more points, redundant and conflicting encodings, as well as additional sets, do not strongly affect performance, and judgements are harder when using less salient encodings. These results have concrete ramifications for the design of scatterplots.",
"__typename": "ArticleAbstractType"
}
],
"normalizedAbstract": "The visual system can make highly efficient aggregate judgements about a set of objects, with speed roughly independent of the number of objects considered. While there is a rich literature on these mechanisms and their ramifications for visual summarization tasks, this prior work rarely considers more complex tasks requiring multiple judgements over long periods of time, and has not considered certain critical aggregation types, such as the localization of the mean value of a set of points. In this paper, we explore these questions using a common visualization task as a case study: relative mean value judgements within multi-class scatterplots. We describe how the perception literature provides a set of expected constraints on the task, and evaluate these predictions with a large-scale perceptual study with crowd-sourced participants. Judgements are no harder when each set contains more points, redundant and conflicting encodings, as well as additional sets, do not strongly affect performance, and judgements are harder when using less salient encodings. These results have concrete ramifications for the design of scatterplots.",
"title": "Perception of Average Value in Multiclass Scatterplots",
"normalizedTitle": "Perception of Average Value in Multiclass Scatterplots",
"fno": "ttg2013122316",
"hasPdf": true,
"idPrefix": "tg",
"keywords": [
"Encoding",
"Shape Analysis",
"Color Imaging",
"Visual Systems",
"Information Visualization",
"Encoding",
"Shape Analysis",
"Color Imaging",
"Visual Systems",
"Perceptual Study",
"Psychophysics"
],
"authors": [
{
"givenName": "Michael",
"surname": "Gleicher",
"fullName": "Michael Gleicher",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin - Madison, Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Michael",
"surname": "Correll",
"fullName": "Michael Correll",
"affiliation": "Dept. of Comput. Sci., Univ. of Wisconsin - Madison, Madison, WI, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Christine",
"surname": "Nothelfer",
"fullName": "Christine Nothelfer",
"affiliation": "Dept. of Psychol., Northwestern Univ., Chicago, IL, USA",
"__typename": "ArticleAuthorType"
},
{
"givenName": "Steven",
"surname": "Franconeri",
"fullName": "Steven Franconeri",
"affiliation": "Dept. of Psychol., Northwestern Univ., Chicago, IL, USA",
"__typename": "ArticleAuthorType"
}
],
"replicability": null,
"showBuyMe": true,
"showRecommendedArticles": true,
"isOpenAccess": false,
"issueNum": "12",
"pubDate": "2013-12-01 00:00:00",
"pubType": "trans",
"pages": "2316-2325",
"year": "2013",
"issn": "1077-2626",
"isbn": null,
"notes": null,
"notesType": null,
"__typename": "ArticleType"
},
"recommendedArticles": [
{
"id": "trans/tg/2018/01/08017638",
"title": "Assessing the Graphical Perception of Time and Speed on 2D+Time Trajectories",
"doi": null,
"abstractUrl": "/journal/tg/2018/01/08017638/13rRUx0xPTV",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2019/01/08440853",
"title": "Optimizing Color Assignment for Perception of Class Separability in Multiclass Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2019/01/08440853/17D45VTRoxJ",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09826389",
"title": "Automatic Scatterplot Design Optimization for Clustering Identification",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09826389/1EVdDTX0i2I",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/5555/01/09839572",
"title": "Evaluating Graphical Perception of Visual Motion for Quantitative Data Encoding",
"doi": null,
"abstractUrl": "/journal/tg/5555/01/09839572/1FisKWeqz8Q",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2023/01/09904433",
"title": "Evaluating the Use of Uncertainty Visualisations for Imputations of Data Missing At Random in Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2023/01/09904433/1H1gkkbe0hy",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "proceedings/vis/2022/8812/0/881200a125",
"title": "Color Coding of Large Value Ranges Applied to Meteorological Data",
"doi": null,
"abstractUrl": "/proceedings-article/vis/2022/881200a125/1J6hfQGSJj2",
"parentPublication": {
"id": "proceedings/vis/2022/8812/0",
"title": "2022 IEEE Visualization and Visual Analytics (VIS)",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2020/01/08805427",
"title": "Biased Average Position Estimates in Line and Bar Graphs: Underestimation, Overestimation, and Perceptual Pull",
"doi": null,
"abstractUrl": "/journal/tg/2020/01/08805427/1cG4xtnomys",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2021/02/09222295",
"title": "Modeling the Influence of Visual Density on Cluster Perception in Scatterplots Using Topology",
"doi": null,
"abstractUrl": "/journal/tg/2021/02/09222295/1nTqtC45a12",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "mags/cg/2021/05/09495208",
"title": "Visual Clustering Factors in Scatterplots",
"doi": null,
"abstractUrl": "/magazine/cg/2021/05/09495208/1vyjCkbMBvW",
"parentPublication": {
"id": "mags/cg",
"title": "IEEE Computer Graphics and Applications",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
},
{
"id": "trans/tg/2022/01/09556578",
"title": "The Weighted Average Illusion: Biases in Perceived Mean Position in Scatterplots",
"doi": null,
"abstractUrl": "/journal/tg/2022/01/09556578/1xlvYaEQTNC",
"parentPublication": {
"id": "trans/tg",
"title": "IEEE Transactions on Visualization & Computer Graphics",
"__typename": "ParentPublication"
},
"__typename": "RecommendedArticleType"
}
],
"adjacentArticles": {
"previous": {
"fno": "ttg2013122306",
"articleId": "13rRUxE04tz",
"__typename": "AdjacentArticleType"
},
"next": {
"fno": "ttg2013122326",
"articleId": "13rRUyuNswX",
"__typename": "AdjacentArticleType"
},
"__typename": "AdjacentArticlesType"
},
"webExtras": [],
"articleVideos": []
} |
Subsets and Splits