2017

  • B. Y. -L. Chang, S. D. Scott, and M. Hancock, Usage of Interactive Event Timelines in Collaborative Digital Tabletops Involving Automation, Springer International Publishing, 2017.
    @book{Chang:2017:CMIS2017_timeline,
    author = {Y.-L. Betty Chang and Stacey D. Scott and Mark Hancock},title = {Usage of Interactive Event Timelines in Collaborative Digital Tabletops Involving Automation},
    booktitle = {Collaboration Meets Interactive Spaces (CMIS)},
    year = {2017},
    isbn = {978-3-319-45853-3},
    pages = {203-232},
    numpages = {20},
    url = {http://www.springer.com/us/book/9783319458526},
    doi = {10.1007/978-3-319-45853-3},
    publisher = {Springer International Publishing},pdf = {2017-CMIS-Interactive-Event-Timeline-Author-Draft.pdf},
    subtype = {inbook}
    }

2016

  • B. Y. -L. Chang, Supporting Situation Awareness and Workspace Awareness in Co-located Collaborative Systems Involving Dynamic Data, PhD Thesis, University of Waterloo, 2016.
    @PHDTHESIS{Chang:2016:thesis,
    author = {Y.-L. Betty Chang},
    title = {Supporting Situation Awareness and Workspace Awareness in Co-located Collaborative Systems Involving Dynamic Data},
    school = {University of Waterloo},
    year = {2016},url = {https://uwspace.uwaterloo.ca/handle/10012/10755},
    }

  • A. J. Bradley, T. Kirton, M. Hancock, and S. Carpendale, Language DNA: Visualizing a language decomposition, Digital Humanities Quarterly, vol. 10, iss. 4, 2016.

    In the Digital Humanities, there is a fast-growing body of research that uses data visualization to explore the structures of language. While new techniques are proliferating they still fall short of offering whole language experimentation. We provide a mathematical technique that maps words and symbols to ordered unique numerical values, showing that this mapping is one-to-one and onto. We demonstrate this technique through linear, planar, and volumetric visualizations of data sets as large as the Oxford English Dictionary and as small as a single poem. The visualizations of this space have been designed to engage the viewer in the analogic practice of comparison already in use by literary critics but on a scale inaccessible by other means. We studied our visualization with expert participants from many fields including English studies, Information Visualization, Human-Computer Interaction, and Computer Graphics. We present our findings from this study and discuss both the criticisms and validations of our approach.

    @article{Bradley:2016:LDNA, author = {Adam James Bradley and Travis Kirton and Mark Hancock and Sheelagh Carpendale}, title = {Language {DNA}: Visualizing a language decomposition}, journal = {Digital Humanities Quarterly}, issue_date = {2016}, volume = {10}, number = {4}, year = {2016}, issn = {1938-4122}, publisher = {ADHO}, abstract = {In the Digital Humanities, there is a fast-growing body of research that uses data visualization to explore the structures of language. While new techniques are proliferating they still fall short of offering whole language experimentation. We provide a mathematical technique that maps words and symbols to ordered unique numerical values, showing that this mapping is one-to-one and onto. We demonstrate this technique through linear, planar, and volumetric visualizations of data sets as large as the Oxford English Dictionary and as small as a single poem. The visualizations of this space have been designed to engage the viewer in the analogic practice of comparison already in use by literary critics but on a scale inaccessible by other means. We studied our visualization with expert participants from many fields including English studies, Information Visualization, Human-Computer Interaction, and Computer Graphics. We present our findings from this study and discuss both the criticisms and validations of our approach.}, url = {http://www.digitalhumanities.org/dhq/vol/10/4/000259/000259.html}, subtype = {journal} }

  • J. Harris, M. Hancock, and S. D. Scott, Leveraging Asymmetries in Multiplayer Games: Investigating Design Elements of Interdependent Play, in Proceedings of the 2016 Annual Symposium on Computer-Human Interaction in Play, New York, NY, USA, 2016, pp. 350-361.

    Many people develop lasting social bonds by playing games together, and there are a variety of games available so that individuals are likely to find games that appeal to their specific play preferences, abilities, and available time. However, there are many instances where people might want to play together, but would normally choose vastly different games for themselves, due to these various asymmetries in play experiences, such as grandparents and grandchildren, highly skilled players and novices, or even simply two players that enjoy different games. In this work, we aim to improve the design of asymmetric games-games that are designed to embrace and leverage differences between players to improve multiplayer engagement. This paper builds upon prior work to describe the elements of asymmetry that can be used to design such games, and uses these elements in the design of an asymmetric game, Beam Me ‘Round Scotty’! We present the results of a thematic analysis of a player experience study, discuss these findings, and propose an initial conceptual framework for discussion of design elements relevant to asymmetric games.

    @inproceedings{Harris:2016:LAM:2967934.2968113, author = {John Harris and Mark Hancock and Stacey D. Scott}, title = {Leveraging Asymmetries in Multiplayer Games: Investigating Design Elements of Interdependent Play}, abstract = {Many people develop lasting social bonds by playing games together, and there are a variety of games available so that individuals are likely to find games that appeal to their specific play preferences, abilities, and available time. However, there are many instances where people might want to play together, but would normally choose vastly different games for themselves, due to these various asymmetries in play experiences, such as grandparents and grandchildren, highly skilled players and novices, or even simply two players that enjoy different games. In this work, we aim to improve the design of asymmetric games-games that are designed to embrace and leverage differences between players to improve multiplayer engagement. This paper builds upon prior work to describe the elements of asymmetry that can be used to design such games, and uses these elements in the design of an asymmetric game, Beam Me 'Round Scotty'! We present the results of a thematic analysis of a player experience study, discuss these findings, and propose an initial conceptual framework for discussion of design elements relevant to asymmetric games.}, booktitle = {Proceedings of the 2016 Annual Symposium on Computer-Human Interaction in Play}, series = {CHI PLAY '16}, year = {2016}, isbn = {978-1-4503-4456-2}, location = {Austin, Texas, USA}, pages = {350--361}, numpages = {12}, doi = {10.1145/2967934.2968113}, acmid = {2968113}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {asymmetric games, game design, player experience testing}, pdf={p350-harris.pdf}, subtype={conference} }

  • J. Sadeghi, C. Perin, T. Flemisch, M. Hancock, and S. Carpendale, Flexible Trees: Sketching Tree Layouts, in Proceedings of the International Working Conference on Advanced Visual Interfaces, New York, NY, USA, 2016, pp. 84-87.

    We introduce Flexible Trees, a sketch-based layout adjustment technique. Although numerous tree layout algorithms exist, these algorithms are usually bound to fit within standard shapes such as rectangles, circles and triangles. In order to provide the possibility of interactively customizing a tree layout, we offer a free-form sketch-based interaction through which one can re-define the boundary constraints for the tree layouts by combining ray-line intersection and line segment intersection. Flexible Trees offer topology preserving adjustments; can be used with a variety of tree layouts; and offer a simple way of authoring tree layouts for infographic purposes.

    @inproceedings{Sadeghi:2016:FTS:2909132.2909274, author = {Javad Sadeghi and Charles Perin and Tamara Flemisch and Mark Hancock and Sheelagh Carpendale}, title = {Flexible Trees: Sketching Tree Layouts}, abstract = {We introduce Flexible Trees, a sketch-based layout adjustment technique. Although numerous tree layout algorithms exist, these algorithms are usually bound to fit within standard shapes such as rectangles, circles and triangles. In order to provide the possibility of interactively customizing a tree layout, we offer a free-form sketch-based interaction through which one can re-define the boundary constraints for the tree layouts by combining ray-line intersection and line segment intersection. Flexible Trees offer topology preserving adjustments; can be used with a variety of tree layouts; and offer a simple way of authoring tree layouts for infographic purposes.}, booktitle = {Proceedings of the International Working Conference on Advanced Visual Interfaces}, series = {AVI '16}, year = {2016}, isbn = {978-1-4503-4131-8}, location = {Bari, Italy}, pages = {84--87}, numpages = {4}, doi = {10.1145/2909132.2909274}, acmid = {2909274}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {Visualization, authoring, infographics, interaction, sketching, trees}, pdf={p130-sadeghi.pdf}, subtype={conference} }

  • M. Nacenta, M. Hancock, C. Gutwin, and S. Carpendale, The effects of changing projection geometry on perception of 3D objects on and around tabletops, ACM Transactions on Computer-Human Interaction (ToCHI), vol. 23, iss. 2, p. 11:1–11:54, 2016.

    Displaying 3D objects on horizontal displays can cause problems in the way that the virtual scene is presented on the 2D surface; inappropriate choices in how 3D is represented can lead to distorted images and incorrect object interpretations. We present four experiments that test 3D perception. We varied projection geometry in three ways: type of projection (perspective/parallel), separation between the observer’s point of view and the projection’s center (discrepancy), and the presence of motion parallax (with/without parallax). Projection geometry had strong effects different for each task. Reducing discrepancy is desirable for orientation judgments, but not for object recognition or internal angle judgments. Using a fixed center of projection above the table reduces error and improves accuracy in most tasks. The results have far-reaching implications for the design of 3D views on tables, in particular, for multi-user applications where projections that appear correct for one person will not be perceived correctly by another.

    @article{Nacenta:2016:Projection, author = {Miguel Nacenta and Mark Hancock and Carl Gutwin and Sheelagh Carpendale}, title = {The effects of changing projection geometry on perception of {3D} objects on and around tabletops}, journal = {ACM Transactions on Computer-Human Interaction (ToCHI)}, issue_date = {May 2016}, volume = {23}, number = {2}, month = may, year = {2016}, issn = {1073-0516}, pages = {11:1--11:54}, articleno = {11}, numpages = {54}, doi = {10.1145/2845081}, acmid = {2845081}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {3D perception, 3D representation, 3D visualization, Interactive tabletops, center-of-projection, fish-tank virtual reality, point-of-view, projection}, abstract={Displaying 3D objects on horizontal displays can cause problems in the way that the virtual scene is presented on the 2D surface; inappropriate choices in how 3D is represented can lead to distorted images and incorrect object interpretations. We present four experiments that test 3D perception. We varied projection geometry in three ways: type of projection (perspective/parallel), separation between the observer's point of view and the projection's center (discrepancy), and the presence of motion parallax (with/without parallax). Projection geometry had strong effects different for each task. Reducing discrepancy is desirable for orientation judgments, but not for object recognition or internal angle judgments. Using a fixed center of projection above the table reduces error and improves accuracy in most tasks. The results have far-reaching implications for the design of 3D views on tables, in particular, for multi-user applications where projections that appear correct for one person will not be perceived correctly by another.}, pdf = {a11-nacenta.pdf}, subtype = {journal} }

  • M. Azmandian, M. Hancock, H. Benko, E. Ofek, and A. D. Wilson, Haptic Retargeting Video Showcase: Dynamic Repurposing of Passive Haptics for Enhanced Virtual Reality Experience, in Proc. CHI EA, New York, NY, USA, 2016, pp. 3-3.

    Manipulating a virtual object with appropriate passive haptic cues provides a satisfying sense of presence in virtual reality. However, scaling such experiences to support multiple virtual objects is a challenge as each one needs to be accompanied with a precisely-located haptic proxy object. We showcase a solution that overcomes this limitation by hacking human perception. Our framework for repurposing passive haptics, called haptic retargeting, leverages the dominance of vision when our senses conflict. With haptic retargeting, a single physical prop can provide passive haptics for multiple virtual objects. We introduce three approaches for dynamically aligning physical and virtual objects: body manipulation, world manipulation and a hybrid technique which combines both world and body warping. This video accompanies our CHI paper.

    @inproceedings{Azmandian:2016:HRV:2851581.2889441, author = {Mahdi Azmandian and Mark Hancock and Hrvoje Benko and Eyal Ofek and Andrew D. Wilson}, title = {Haptic Retargeting Video Showcase: Dynamic Repurposing of Passive Haptics for Enhanced Virtual Reality Experience}, abstract={Manipulating a virtual object with appropriate passive haptic cues provides a satisfying sense of presence in virtual reality. However, scaling such experiences to support multiple virtual objects is a challenge as each one needs to be accompanied with a precisely-located haptic proxy object. We showcase a solution that overcomes this limitation by hacking human perception. Our framework for repurposing passive haptics, called haptic retargeting, leverages the dominance of vision when our senses conflict. With haptic retargeting, a single physical prop can provide passive haptics for multiple virtual objects. We introduce three approaches for dynamically aligning physical and virtual objects: body manipulation, world manipulation and a hybrid technique which combines both world and body warping. This video accompanies our CHI paper.}, booktitle = {Proc. CHI EA}, series = {CHI EA '16}, year = {2016}, isbn = {978-1-4503-4082-3}, location = {Santa Clara, California, USA}, pages = {3--3}, numpages = {1}, doi = {10.1145/2851581.2889441}, acmid = {2889441}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {haptics, perception, virtual reality}, subtype={other} }

  • M. Azmandian, M. Hancock, H. Benko, E. Ofek, and A. D. Wilson, A Demonstration of Haptic Retargeting: Dynamic Repurposing of Passive Haptics for Enhanced Virtual Reality Experience, in Proc. CHI EA, New York, NY, USA, 2016, pp. 3647-3650.

    Manipulating a virtual object with appropriate passive haptic cues provides a satisfying sense of presence in virtual reality. However, scaling such experiences to support multiple virtual objects is a challenge as each one needs to be accompanied with a precisely-located haptic proxy object. We showcase a solution that overcomes this limitation by hacking human perception. Our framework for repurposing passive haptics, called haptic retargeting, leverages the dominance of vision when our senses conflict. With haptic retargeting, a single physical prop can provide passive haptics for multiple virtual objects. We introduce three approaches for dynamically aligning physical and virtual objects: body manipulation, world manipulation and a hybrid technique which combines both world and body manipulation. This demonstration accompanies our CHI 2016 paper.

    @inproceedings{Azmandian:2016:DHR:2851581.2890265, author = {Mahdi Azmandian and Mark Hancock and Hrvoje Benko and Eyal Ofek and Andrew D. Wilson}, title = {A Demonstration of Haptic Retargeting: Dynamic Repurposing of Passive Haptics for Enhanced Virtual Reality Experience}, booktitle = {Proc. CHI EA}, abstract={Manipulating a virtual object with appropriate passive haptic cues provides a satisfying sense of presence in virtual reality. However, scaling such experiences to support multiple virtual objects is a challenge as each one needs to be accompanied with a precisely-located haptic proxy object. We showcase a solution that overcomes this limitation by hacking human perception. Our framework for repurposing passive haptics, called haptic retargeting, leverages the dominance of vision when our senses conflict. With haptic retargeting, a single physical prop can provide passive haptics for multiple virtual objects. We introduce three approaches for dynamically aligning physical and virtual objects: body manipulation, world manipulation and a hybrid technique which combines both world and body manipulation. This demonstration accompanies our CHI 2016 paper.}, series = {CHI EA '16}, year = {2016}, isbn = {978-1-4503-4082-3}, location = {Santa Clara, California, USA}, pages = {3647--3650}, numpages = {4}, doi = {10.1145/2851581.2890265}, acmid = {2890265}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {haptics, perception, virtual reality}, subtype={other} }

  • M. Azmandian, M. Hancock, H. Benko, E. Ofek, and A. D. Wilson, Haptic Retargeting: Dynamic Repurposing of Passive Haptics for Enhanced Virtual Reality Experiences, in Proc. CHI, New York, NY, USA, 2016, pp. 1968-1979.

    Manipulating a virtual object with appropriate passive haptic cues provides a satisfying sense of presence in virtual reality. However, scaling such experiences to support multiple virtual objects is a challenge as each one needs to be accompanied with a precisely-located haptic proxy object. We propose a solution that overcomes this limitation by hacking human perception. We have created a framework for repurposing passive haptics, called haptic retargeting, that leverages the dominance of vision when our senses conflict. With haptic retargeting, a single physical prop can provide passive haptics for multiple virtual objects. We introduce three approaches for dynamically aligning physical and virtual objects: world manipulation, body manipulation and a hybrid technique which combines both world and body manipulation. Our study results indicate that all our haptic retargeting techniques improve the sense of presence when compared to typical wand-based 3D control of virtual objects. Furthermore, our hybrid haptic retargeting achieved the highest satisfaction and presence scores while limiting the visible side-effects during interaction.

    @inproceedings{Azmandian:2016:HRD:2858036.2858226, author = {Mahdi Azmandian and Mark Hancock and Hrvoje Benko and Eyal Ofek and Andrew D. Wilson}, title = {Haptic Retargeting: Dynamic Repurposing of Passive Haptics for Enhanced Virtual Reality Experiences}, abstract = {Manipulating a virtual object with appropriate passive haptic cues provides a satisfying sense of presence in virtual reality. However, scaling such experiences to support multiple virtual objects is a challenge as each one needs to be accompanied with a precisely-located haptic proxy object. We propose a solution that overcomes this limitation by hacking human perception. We have created a framework for repurposing passive haptics, called haptic retargeting, that leverages the dominance of vision when our senses conflict. With haptic retargeting, a single physical prop can provide passive haptics for multiple virtual objects. We introduce three approaches for dynamically aligning physical and virtual objects: world manipulation, body manipulation and a hybrid technique which combines both world and body manipulation. Our study results indicate that all our haptic retargeting techniques improve the sense of presence when compared to typical wand-based 3D control of virtual objects. Furthermore, our hybrid haptic retargeting achieved the highest satisfaction and presence scores while limiting the visible side-effects during interaction.}, booktitle = {Proc. CHI}, series = {CHI '16}, year = {2016}, isbn = {978-1-4503-3362-7}, location = {Santa Clara, California, USA}, pages = {1968--1979}, numpages = {12}, doi = {10.1145/2858036.2858226}, acmid = {2858226}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {haptics, perception, virtual reality}, pdf={HapticRetargetingCHI2016_final.pdf}, youtube={SiH3IHEdmR0}, subtype={conference} }

  • R. R. Wehbe, D. K. Watson, G. F. Tondello, M. Ganaba, M. Stocco, A. Lee, and L. E. Nacke, ABOVE WATER: An Educational Game for Anxiety, in Proceedings of the 2016 Annual Symposium on Computer-Human Interaction in Play Companion Extended Abstracts, New York, NY, USA, 2016, pp. 79-84.
    @inproceedings{Wehbe:2016:AWE:2968120.2971804, author = {Wehbe, Rina R. and Watson, Diane K. and Tondello, Gustavo F. and Ganaba, Marim and Stocco, Melissa and Lee, Alvin and Nacke, Lennart E.}, title = {ABOVE WATER: An Educational Game for Anxiety}, booktitle = {Proceedings of the 2016 Annual Symposium on Computer-Human Interaction in Play Companion Extended Abstracts}, series = {CHI PLAY Companion '16}, year = {2016}, isbn = {978-1-4503-4458-6}, location = {Austin, Texas, USA}, pages = {79--84}, numpages = {6}, url = {http://doi.acm.org/10.1145/2968120.2971804}, doi = {10.1145/2968120.2971804}, acmid = {2971804}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {games for health, mental health, psychology}, }

2015

  • J. Harris, M. Hancock, and S. D. Scott, "Beam Me ‘Round, Scotty!": Studying Asymmetry and Interdependence in a Prototype Cooperative Game, in Proc. CHI PLAY, New York, NY, USA, 2015, pp. 775-778, (People’s Choice Award & Student Game Design Competition Winner).

    In "Beam Me ‘Round, Scotty!", pairs of players engage with asymmetric gameplay mechanics and interfaces (e.g. leading vs. support, action vs. strategy, gamepad vs. mouse interaction) in a cooperative adventure to escape a hostile alien world. "Beam Me ‘Round, Scotty!" presents a multi-faceted play experience designed to bridge differences in player skills, styles, and interests. By introducing deliberate interdependence through asymmetry, different types of players can come together and have fun overcoming obstacles, defeating enemies, and escaping the alien planet via their unique contributions.

    @inproceedings{Harris:2015:BMR:2793107.2810274,
    author = {Harris, John and Hancock, Mark and Scott, Stacey D.},
    title = {{"Beam Me 'Round, Scotty!"}: Studying Asymmetry and Interdependence in a Prototype Cooperative Game},
    booktitle = {Proc. CHI PLAY},
    series = {CHI PLAY '15},
    year = {2015},
    isbn = {978-1-4503-3466-2},
    location = {London, United Kingdom},
    pages = {775--778},
    numpages = {4},
    acmid = {2810274},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {asymmetric games, cooperative games, player typology},
    abstract={In "Beam Me 'Round, Scotty!", pairs of players engage with asymmetric gameplay mechanics and interfaces (e.g. leading vs. support, action vs. strategy, gamepad vs. mouse interaction) in a cooperative adventure to escape a hostile alien world. "Beam Me 'Round, Scotty!" presents a multi-faceted play experience designed to bridge differences in player skills, styles, and interests. By introducing deliberate interdependence through asymmetry, different types of players can come together and have fun overcoming obstacles, defeating enemies, and escaping the alien planet via their unique contributions.},
    note = {(People's Choice Award & Student Game Design Competition Winner)},
    pdf={p775-harris.pdf},
    doi = {10.1145/2793107.2810274},
    movie = {beam-me-round-scotty.mp4},
    subtype={other}
    }

  • A. Bradley, C. MacArthur, M. Hancock, and S. Carpendale, Gendered or Neutral? Considering the Language of HCI, in Proc. GI, Toronto, Ont., Canada, Canada, 2015, pp. 163-170.

    In this paper, we present a Mechanical Turk study that explores how the most common words that have been used to refer to people in recent HCI literature are received by non-experts. The top five CHI 2014 people words are: user, participant, person, designer, and researcher. We asked participants to think about one of these words for ten seconds and then to draw an image of it. After the drawing was done we asked simple demographic questions about both the participant and the created image. Our results show that while generally our participants did perceive most of these words as predominately male, there were two notable exceptions. Women appear to perceive the terms "person" and "participant" as gender neutral. That is, they were just as likely to draw a person or a participant as male or female. So while these two words are not exactly gender neutral in that men largely perceived them as male, at least women did not appear to feel excluded by these terms. We offer an increased understanding of the perception of HCIs people words and discuss the challenges this poses to our community in striving toward gender inclusiveness.

    @inproceedings{Bradley:2015:Users,
    author = {Adam Bradley and Cayley MacArthur and Mark Hancock and Sheelagh Carpendale},
    title = {Gendered or Neutral? Considering the Language of {HCI}},
    booktitle = {Proc. GI},
    series = {GI '15},
    year = {2015},
    location = {Halifax, Nova Scotia, Canada},
    pages = {163--170},
    publisher = {Canadian Information Processing Society},
    address = {Toronto, Ont., Canada, Canada},
    abstract={In this paper, we present a Mechanical Turk study that explores how the most common words that have been used to refer to people in recent HCI literature are received by non-experts. The top five CHI 2014 people words are: user, participant, person, designer, and researcher. We asked participants to think about one of these words for ten seconds and then to draw an image of it. After the drawing was done we asked simple demographic questions about both the participant and the created image. Our results show that while generally our participants did perceive most of these words as predominately male, there were two notable exceptions. Women appear to perceive the terms "person" and "participant" as gender neutral. That is, they were just as likely to draw a person or a participant as male or female. So while these two words are not exactly gender neutral in that men largely perceived them as male, at least women did not appear to feel excluded by these terms. We offer an increased understanding of the perception of HCIs people words and discuss the challenges this poses to our community in striving toward gender inclusiveness.},
    pdf={gi2015-gendered-language.pdf},
    subtype={conference}
    }

  • D. Valtchanov and M. Hancock, EnviroPulse: Providing Feedback About the Expected Affective Valence of the Environment, in Proc. CHI, New York, NY, USA, 2015, pp. 2073-2082.

    Interacting with nature is beneficial to a person’s mental state, but it can sometimes be difficult to find environments that will induce positive affect (e.g., when planning a run). In this paper, we describe EnviroPulsea system for automatically determining and communicating the expected affective valence (EAV) of environments to individuals. We describe a prototype that allows this to be used in real-time on a smartphone, but EnviroPulse could easily be incorporated into GPS systems, mapping services, or image-based systems. Our work differs from existing work in affective computing in that, rather than detecting a user’s affect directly, we automatically determine the EAV of the environment through visual analysis. We present results that suggest our system can determine the EAV of environments. We also introduce real-time affective visual feedback of the calculated EAV of the images, and present results from an informal study suggesting that real-time visual feedback can be used for induction of affect.

    @inproceedings{Valtchanov:2015:EPF:2702123.2702510,
    author = {Deltcho Valtchanov and Mark Hancock},
    title = {EnviroPulse: Providing Feedback About the Expected Affective Valence of the Environment},
    abstract = {Interacting with nature is beneficial to a person's mental state, but it can sometimes be difficult to find environments that will induce positive affect (e.g., when planning a run). In this paper, we describe EnviroPulsea system for automatically determining and communicating the expected affective valence (EAV) of environments to individuals. We describe a prototype that allows this to be used in real-time on a smartphone, but EnviroPulse could easily be incorporated into GPS systems, mapping services, or image-based systems. Our work differs from existing work in affective computing in that, rather than detecting a user's affect directly, we automatically determine the EAV of the environment through visual analysis. We present results that suggest our system can determine the EAV of environments. We also introduce real-time affective visual feedback of the calculated EAV of the images, and present results from an informal study suggesting that real-time visual feedback can be used for induction of affect.},
    booktitle = {Proc. CHI},
    series = {CHI '15},
    year = {2015},
    isbn = {978-1-4503-3145-6},
    location = {Seoul, Republic of Korea},
    pages = {2073--2082},
    numpages = {10},
    doi = {10.1145/2702123.2702510},
    acmid = {2702510},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {affective computing, human factors, mobile interfaces},
    pdf = {p2073-valtchanov.pdf},
    movie = {EnviroPulse.mp4},
    youtube = {Ijzhy7GwXak},
    subtype = {conference}
    }

  • B. Y. -L. Chang, C. Fong, E. Tse, M. Hancock, and S. D. Scott, "Callout Bubble Saved My Life": Workspace Awareness Support in BYOD Classrooms, in Proc. ITS, New York, NY, USA, 2015, pp. 73-82.
    @inproceedings{Chang:2015:CBS:2817721.2817733,
    author = {Y.-L. Betty Chang and Cresencia Fong and Edward Tse and Mark Hancock and Stacey D. Scott},
    title = {"Callout Bubble Saved My Life": Workspace Awareness Support in BYOD Classrooms},
    booktitle = {Proc. ITS},
    series = {ITS '15},
    year = {2015},
    isbn = {978-1-4503-3899-8},
    location = {Madeira, Portugal},
    pages = {73--82},
    numpages = {10},
    url = {http://doi.acm.org/10.1145/2817721.2817733},
    doi = {10.1145/2817721.2817733},
    acmid = {2817733},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {byod classrooms, education, shared canvas, workspace awareness},
    }

2014

  • M. Soroush, M. Hancock, and V. Bohns, Self-control in casual games: The relationship between Candy Crush Saga players in-app purchases and self-control, in Proc. GEM, 2014.

    Casual Games and free-to-play games have recently rapidly increased in popularity, perhaps in part because of the success of in-app purchases and micro-transactions as an economic model. While these games are often touted for their success in the gaming market, the effect on players when faced with such frequent purchasing decisions in-game is not well-studied. Theories of self-control suggest that people have limited resource pools of self-control, and facing frequent frustration and purchasing decisions may deplete this resource. In this paper, we present the results of a Mechanical Turk study on a popular casual game, Candy Crush Saga, to investigate various factors impacting player behaviour, with a specific focus on self-control. Our study reveals that the amount players spend on in-app purchases is correlated with lower levels of self-control. On the other hand, purchases and self-control levels were not significantly correlated with the amount of time people play, game addiction, or problem video game playing. We present design recommendations which can be applied to existing or new game designs in terms of both the economics of games and the psychology of games, including mechanics to account for low self-control and to avoid negative effects on self-control.

    @InProceedings{soroush:2014:candycrush,
    author= {Milad Soroush and Mark Hancock and Vanessa Bohns},
    title = {Self-control in casual games: The relationship between {C}andy {C}rush {S}aga players in-app purchases and self-control},
    abstract = {Casual Games and free-to-play games have recently
    rapidly increased in popularity, perhaps in part because of the
    success of in-app purchases and micro-transactions as an economic
    model. While these games are often touted for their success
    in the gaming market, the effect on players when faced with
    such frequent purchasing decisions in-game is not well-studied.
    Theories of self-control suggest that people have limited resource
    pools of self-control, and facing frequent frustration and purchasing
    decisions may deplete this resource. In this paper, we
    present the results of a Mechanical Turk study on a popular casual
    game, Candy Crush Saga, to investigate various factors
    impacting player behaviour, with a specific focus on self-control.
    Our study reveals that the amount players spend on in-app purchases
    is correlated with lower levels of self-control. On the other
    hand, purchases and self-control levels were not significantly
    correlated with the amount of time people play, game addiction,
    or problem video game playing. We present design recommendations
    which can be applied to existing or new game designs in
    terms of both the economics of games and the psychology of
    games, including mechanics to account for low self-control and to
    avoid negative effects on self-control.},
    booktitle = {Proc. GEM},
    year = {2014},
    numpages = {6},
    publisher = {IEEE},
    pdf = {ieee-gem2014_submission_56.pdf},
    subtype = {conference}
    }

  • R. Langer, M. Hancock, and S. D. Scott, Suspenseful design: Engaging emotionally with complex applications through compelling narratives, in Proc. GEM, 2014.

    Although story is a critical component of many games, stories and storytelling techniques are rarely used in other kinds of applications. In this paper, we introduce a framework for constructing suspenseful, narrative-based software applications. While many interface designs aim to reduce confusion and complexity, we introduce an alternative for inherently complex software by engaging people through narrative. We describe a framework for incorporating suspenseful elements into an interface and apply it to a proto-type of a suspenseful tutorial. We conducted a controlled experiment that compared this suspenseful tutorial to two more traditional tutorial designs. Participants who used the narrative-based tutorial reported greater feelings of hopeful suspense, which previous studies have found to be correlated with enjoyment and interest.

    @InProceedings{langer:2014:suspense,
    author= {Rebecca Langer and Mark Hancock and Stacey D. Scott},
    title = {Suspenseful design: Engaging emotionally with complex applications through compelling narratives},
    abstract = {Although story is a critical component of many games, stories and storytelling techniques are rarely used in other kinds of applications. In this paper, we introduce a framework for constructing suspenseful, narrative-based software applications. While many interface designs aim to reduce confusion and complexity, we introduce an alternative for inherently complex software by engaging people through narrative. We describe a framework for incorporating suspenseful elements into an interface and apply it to a proto-type of a suspenseful tutorial. We conducted a controlled experiment that compared this suspenseful tutorial to two more traditional tutorial designs. Participants who used the narrative-based tutorial reported greater feelings of hopeful suspense, which previous studies have found to be correlated with enjoyment and interest.},
    booktitle = {Proc. GEM},
    year = {2014},
    numpages = {8},
    publisher = {IEEE},
    pdf = {ieee-gem2014_submission_38.pdf},
    subtype = {conference}
    }

  • A. Alzayat, M. Hancock, and M. Nacenta, Quantitative Measurement of Virtual vs. Physical Object Embodiment Through Kinesthetic Figural After Effects, in Proc. CHI, New York, NY, USA, 2014, pp. 2903-2912.

    Over the past decade, multi-touch surfaces have become commonplace, with many researchers and practitioners describing the benefits of their natural, physical-like interactions. We present a pair of studies that empirically investigates the psychophysical effects of direct interaction with both physical and virtual artefacts. We use the phenomenon of Kinesthetic Figural After Effects-a change in understanding of the physical size of an object after a period of exposure to an object of different size. Our studies show that, while this effect is robustly reproducible when using physical artefacts, this same effect does not manifest when manipulating virtual artefacts on a direct, multi-touch tabletop display. We contribute quantitative evidence suggesting a psychophysical difference in our response to physical vs. virtual objects, and discuss future research directions to explore measurable phenomena to evaluate the presence of physical-like changes from virtual on-screen objects.

    @inproceedings{Alzayat:2014:QMV:2611247.2557282,
    author = {Ayman Alzayat and Mark Hancock and Miguel Nacenta},
    title = {Quantitative Measurement of Virtual vs. Physical Object Embodiment Through Kinesthetic Figural After Effects},
    abstract={Over the past decade, multi-touch surfaces have become commonplace, with many researchers and practitioners describing the benefits of their natural, physical-like interactions. We present a pair of studies that empirically investigates the psychophysical effects of direct interaction with both physical and virtual artefacts. We use the phenomenon of Kinesthetic Figural After Effects-a change in understanding of the physical size of an object after a period of exposure to an object of different size. Our studies show that, while this effect is robustly reproducible when using physical artefacts, this same effect does not manifest when manipulating virtual artefacts on a direct, multi-touch tabletop display. We contribute quantitative evidence suggesting a psychophysical difference in our response to physical vs. virtual objects, and discuss future research directions to explore measurable phenomena to evaluate the presence of physical-like changes from virtual on-screen objects.},
    booktitle = {Proc. CHI},
    series = {CHI '14},
    year = {2014},
    isbn = {978-1-4503-2473-1},
    location = {Toronto, Ontario, Canada},
    pages = {2903--2912},
    numpages = {10},
    acmid = {2557282},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {embodied interaction, multi-touch, physical interaction, tabletop displays, tangible user interfaces},
    doi = {10.1145/2556288.2557282},
    pdf = {p2903-alzayat.pdf},
    subtype = {conference}
    }

  • M. Mostafapour and M. Hancock, Exploring narrative gestures on digital surfaces, in Proc. ITS, 2014, pp. 5-14.

    A significant amount of research on digital tables has traditionally investigated the use of hands and fingers to control 2D and 3D artifacts, has even investigated peoples expectations when interacting with these devices. However, people often use their hands and body to communicate and express ideas to others. In this work, we explore narrative gestures on a digital table for the purpose of telling stories. We present the results of an observational study of people illustrating stories on a digital table with virtual figurines, and in both a physical sandbox and water with physical figurines. Our results show that the narrative gestures people use to tell stories with objects are highly varied and, in some cases, fundamentally different from the gestures designers and researchers have suggested for controlling digital content. In contrast to smooth, pre-determined drags for movement and rotation, people use jiggling, repeated lifting, and bimanual actions to express rich, simultaneous, and independent actions by multiple characters in a story. Based on these results, we suggest that future storytelling designs consider the importance of touch actions for narration, in-place manipulations, the (possibly non-linear) path of a drag, allowing expression through manipulations, and two-handed simultaneous manipulation of multiple objects.

    @InProceedings{Mostafapour:2014:narrative,
    author= {Mehrnaz Mostafapour and Mark Hancock},
    title = {Exploring narrative gestures on digital surfaces},
    abstract = {A significant amount of research on digital tables has traditionally investigated the use of hands and fingers to control 2D and 3D artifacts, has even investigated peoples expectations when interacting with these devices. However, people often use their hands and body to communicate and express ideas to others. In this work, we explore narrative gestures on a digital table for the purpose of telling stories. We present the results of an observational study of people illustrating stories on a digital table with virtual figurines, and in both a physical sandbox and water with physical figurines. Our results show that the narrative gestures people use to tell stories with objects are highly varied and, in some cases, fundamentally different from the gestures designers and researchers have suggested for controlling digital content. In contrast to smooth, pre-determined drags for movement and rotation, people use jiggling, repeated lifting, and bimanual actions to express rich, simultaneous, and independent actions by multiple characters in a story. Based on these results, we suggest that future storytelling designs consider the importance of touch actions for narration, in-place manipulations, the (possibly non-linear) path of a drag, allowing expression through manipulations, and two-handed simultaneous manipulation of multiple objects.},
    booktitle = {Proc. ITS},
    year = {2014},
    doi = {10.1145/2669485.2669510},
    pdf = {p5-mostafapour.pdf},
    pages = {5--14},
    publisher = {ACM},
    subtype = {conference}
    }

  • J. Harris, M. Hancock, and S. Scott, "Beam Me ‘Round, Scotty!": Exploring the Effect of Interdependence in Asymmetric Cooperative Games, in Proceedings of the First ACM SIGCHI Annual Symposium on Computer-human Interaction in Play, New York, NY, USA, 2014, pp. 417-418.
    @inproceedings{Harris:2014:BMR:2658537.2661311,
    author = {Harris, John and Hancock, Mark and Scott, Stacey},
    title = {{"Beam Me 'Round, Scotty!"}: Exploring the Effect of Interdependence in Asymmetric Cooperative Games},
    booktitle = {Proceedings of the First ACM SIGCHI Annual Symposium on Computer-human Interaction in Play},
    series = {CHI PLAY '14},
    year = {2014},
    isbn = {978-1-4503-3014-5},
    location = {Toronto, Ontario, Canada},
    pages = {417--418},
    numpages = {2},
    pdf = {p417-harris},
    doi = {10.1145/2658537.2661311},
    acmid = {2661311},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {asymmetric games, cooperatives games, game design, interdependence, symbiotic play},
    subtype = {poster}
    }

  • B. Y. -L. Chang, S. D. Scott, and M. Hancock, Supporting Situation Awareness in Collaborative Tabletop Systems with Automation, in Proc. ITS, New York, NY, USA, 2014, pp. 185-194.
    @inproceedings{Chang:2014:SSA:2669485.2669496,
    author = {Y.-L. Betty Chang and Stacey D. Scott and Mark Hancock},
    title = {Supporting Situation Awareness in Collaborative Tabletop Systems with Automation},
    booktitle = {Proc. ITS},
    series = {ITS '14},
    year = {2014},
    isbn = {978-1-4503-2587-5},
    location = {Dresden, Germany},
    pages = {185--194},
    numpages = {10},
    url = {http://doi.acm.org/10.1145/2669485.2669496},
    doi = {10.1145/2669485.2669496},
    acmid = {2669496},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {automation, collaboration, digital tabletop, gaming, interaction design, situation awareness},
    }

  • V. Cheung, D. Watson, J. Vermeulen, M. Hancock, and S. Scott, Overcoming Interaction Barriers in Large Public Displays Using Personal Devices, in Extended Abtracts of the Ninth ACM International Conference on Interactive Tabletops and Surfaces, New York, NY, USA, 2014, pp. 375-380.
    @inproceedings{Cheung:2014:OIB:2669485.2669549,
    author = {Cheung, Victor and Watson, Diane and Vermeulen, Jo and Hancock, Mark and Scott, Stacey},
    title = {Overcoming Interaction Barriers in Large Public Displays Using Personal Devices},
    booktitle = {Extended Abtracts of the Ninth ACM International Conference on Interactive Tabletops and Surfaces},
    series = {ITS '14},
    year = {2014},
    isbn = {978-1-4503-2587-5},
    location = {Dresden, Germany},
    pages = {375--380},
    numpages = {6},
    url = {http://doi.acm.org/10.1145/2669485.2669549},
    doi = {10.1145/2669485.2669549},
    acmid = {2669549},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {interaction design, large interactive displays, personal devices},
    }

  • D. Watson, D. Valtchanov, M. Hancock, and R. Mandryk, Designing a Gameful System to Support the Collection, Curation, Exploration, and Sharing of Sports Memorabilia, in Extended Abtracts of the First ACM SIGCHI Annual Symposium on Computer-human Interaction in Play, New York, NY, USA, 2014, pp. 451-452.
    @inproceedings{Watson:2014:DGS:2658537.2661322,
    author = {Watson, Diane and Valtchanov, Deltcho and Hancock, Mark and Mandryk, Regan},
    title = {Designing a Gameful System to Support the Collection, Curation, Exploration, and Sharing of Sports Memorabilia},
    booktitle = {Extended Abtracts of the First ACM SIGCHI Annual Symposium on Computer-human Interaction in Play},
    series = {CHI PLAY '14},
    year = {2014},
    isbn = {978-1-4503-3014-5},
    location = {Toronto, Ontario, Canada},
    pages = {451--452},
    numpages = {2},
    url = {http://doi.acm.org/10.1145/2658537.2661322},
    doi = {10.1145/2658537.2661322},
    acmid = {2661322},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {collectors, gameful design, gamification, hockey},
    }

2013

  • D. Lindlbauer, M. Haller, M. Hancock, S. Scott, and W. Stuerzlinger, Perceptual Grouping: Selection Assistance for Digital Sketching, in Proc. ITS, 2013, pp. 51-60.

    Modifying a digital sketch may require multiple selections before a particular editing tool can be applied. Especially on large interactive surfaces, such interactions can be fatiguing. To address this issue, we propose a method, called Suggero, to facilitate the selection process of digital ink. Suggero identifies groups of perceptually related drawing objects. These "perceptual groups" are then used to suggest possible selection extensions in response to a persons initial selection. Two studies were conducted. First, a background study investigated participants expectations of such a selection assistance tool. Then, an empirical study compared the effectiveness of Suggero with an existing manual selection technique. Study results revealed that selections required fewer pen interactions and less pen movement with Suggero, suggesting that Suggero helps minimize fatigue during digital sketching.

    @InProceedings{lindlbauer:2013:suggero,
    author={David Lindlbauer and Michael Haller and Mark Hancock and Stacey Scott and Wolfgang Stuerzlinger},
    title={Perceptual Grouping: Selection Assistance for Digital Sketching},
    booktitle = {Proc. ITS},
    year = {2013},
    abstract = {Modifying a digital sketch may require multiple selections before a particular editing tool can be applied. Especially on large interactive surfaces, such interactions can be fatiguing. To address this issue, we propose a method, called Suggero, to facilitate the selection process of digital ink. Suggero identifies groups of perceptually related drawing objects. These "perceptual groups" are then used to suggest possible selection extensions in response to a persons initial selection. Two studies were conducted. First, a background study investigated participants expectations of such a selection assistance tool. Then, an empirical study compared the effectiveness of Suggero with an existing manual selection technique. Study results revealed that selections required fewer pen interactions and less pen movement with Suggero, suggesting that Suggero helps minimize fatigue during digital sketching.},
    pages = {51--60},
    numpages = {10},
    doi = {10.1145/2512349.2512801},
    pdf = {p51-lindlbauer.pdf},
    movie = {its142.mp4},
    youtube = {PObiFxR70JQ},
    subtype = {conference}
    }

  • A. Irannejad, Designing Privacy-Enhanced Interfaces on Digital Tabletops for Public Settings, , University of Waterloo, MASc Thesis, 2013.

    Protection of personal information has become a critical issue in the digital world. Many companies and service provider websites have adopted privacy policies and practices to protect users’ personal information to some extent. In addition, various governments are adopting privacy protection legislation. System developers, service providers, and interface designers play an important role in determining how to make systems fulfill legal requirements and satisfy users. The human factor requirements for effective privacy interface design can be categorized into four groups: (1) comprehension, (2) consciousness, (3) control, and (4) consent (Patrick & Kenny, 2003). Moreover, the type of technology that people are engaged with has a crucial role in determining what type of practices should be adopted. As Weiser (1996) envisioned, we are now in an “ubiquitous computing” (Ubicomp) era in which technologies such as digital tabletops (what Weiser called LiveBoards) are emerging for use in public settings. The collaborative and open nature of this type of smart device introduces new privacy threats that have not yet been thoroughly investigated and as a result have not been addressed in companies’ and governmental privacy statements and legislation. In this thesis, I provide an analytical description of the privacy threats unique to tabletop display environments. I then present several design suggestions for a tabletop display interface that addresses and mitigates these threats, followed by a qualitative evaluation of these designs based on Patrick and Kenny’s (2003) model. Results show that most participants have often experienced being shoulder-surfed or had privacy issues when sharing information with someone in a collaborative environment. Therefore, they found most of the techniques designed in this thesis helpful in providing information privacy for them when they are engaged with online social activities on digital tabletops in public settings. Among all of the proposed tested designs, the first three have proven to be effective in providing the required privacy. However, designs 4 and 5 had some shortfalls that made them less helpful for participants. The main problem with these two designs was that participants had difficulty understanding what they had to do in order to complete the given tasks.

    @MASTERSTHESIS{irannejad:2013:privacy-tabletops,
    author = {Arezoo Irannejad},
    title = {Designing Privacy-Enhanced Interfaces on Digital Tabletops for Public Settings},
    school = {University of Waterloo},
    year = {2013},
    note = {MASc Thesis},
    abstract = {Protection of personal information has become a critical issue in the digital world. Many companies and service provider websites have adopted privacy policies and practices to protect users’ personal information to some extent. In addition, various governments are adopting privacy protection legislation. System developers, service providers, and interface designers play an important role in determining how to make systems fulfill legal requirements and satisfy users. The human factor requirements for effective privacy interface design can be categorized into four groups: (1) comprehension, (2) consciousness, (3) control, and (4) consent (Patrick & Kenny, 2003). Moreover, the type of technology that people are engaged with has a crucial role in determining what type of practices should be adopted. As Weiser (1996) envisioned, we are now in an “ubiquitous computing” (Ubicomp) era in which technologies such as digital tabletops (what Weiser called LiveBoards) are emerging for use in public settings. The collaborative and open nature of this type of smart device introduces new privacy threats that have not yet been thoroughly investigated and as a result have not been addressed in companies’ and governmental privacy statements and legislation. In this thesis, I provide an analytical description of the privacy threats unique to tabletop display environments. I then present several design suggestions for a tabletop display interface that addresses and mitigates these threats, followed by a qualitative evaluation of these designs based on Patrick and Kenny’s (2003) model. Results show that most participants have often experienced being shoulder-surfed or had privacy issues when sharing information with someone in a collaborative environment. Therefore, they found most of the techniques designed in this thesis helpful in providing information privacy for them when they are engaged with online social activities on digital tabletops in public settings. Among all of the proposed tested designs, the first three have proven to be effective in providing the required privacy. However, designs 4 and 5 had some shortfalls that made them less helpful for participants. The main problem with these two designs was that participants had difficulty understanding what they had to do in order to complete the given tasks.},
    pdf = {Designing.pdf},
    url={http://hdl.handle.net/10012/7366}
    }

  • B. Y. -L. Chang, S. D. Scott, and M. Hancock, Improving Awareness of Automated Actions using an Interactive Event Timeline, in Proc. ITS, 2013, pp. 353-356.

    Digital tabletops provide an opportunity for automating complex tasks in collaborative domains involving planning and decision-making, such as strategic simulation in command and control. However, when automation leads to modification of the system’s state, users may fail to understand how or why the state has changed, resulting in lower situation awareness and incorrect or suboptimal decisions. We present the design of an interactive event timeline that aims to improve situation awareness in tabletop systems that use automation. Our timeline enables exploration and analysis of automated system actions in a collaborative environment. We discuss two factors in the design of the timeline: the ownership of the timeline in multi-user situations and the location of the detailed visual feedback resulting from interaction with the timeline. We use a collaborative digital tabletop board game to illustrate this design concept.

    @InProceedings{chang:2013:timelineITS,
    author = {Y.-L. Betty Chang and Stacey D. Scott and Mark Hancock},
    title = {Improving Awareness of Automated Actions using an Interactive Event Timeline},
    booktitle = {Proc. ITS},
    year = {2013},
    abstract = {Digital tabletops provide an opportunity for automating complex tasks in collaborative domains involving planning and decision-making, such as strategic simulation in command and control. However, when automation leads to modification of the system's state, users may fail to understand how or why the state has changed, resulting in lower situation awareness and incorrect or suboptimal decisions. We present the design of an interactive event timeline that aims to improve situation awareness in tabletop systems that use automation. Our timeline enables exploration and analysis of automated system actions in a collaborative environment. We discuss two factors in the design of the timeline: the ownership of the timeline in multi-user situations and the location of the detailed visual feedback resulting from interaction with the timeline. We use a collaborative digital tabletop board game to illustrate this design concept.},
    pages = {353-356},
    subtype = {conference}
    }

  • D. Watson, R. Mandryk, and K. Stanley, The Design and Evaluation of a Classroom Exergame, in Gamification 2013, 2013, pp. 34-41.

    Balancing academic, physical and emotional needs of students while maintaining student interest is increasingly challenging in the resource constrained environments of the modern classroom. To answer this need we created and evaluated an exergame system called Vortex Mountain, which leverages the physical benefits of exercise and the motivational benefits of educational games to provide a healthy and engaging classroom activity for middle school students. Through a controlled study, we demonstrate that our classroom exergame provides similar affective, engagement, and learning benefits to an exercise or game intervention, while leveraging the valuable ancillary benefits of each. Thus, we believe that exergames have a future in the modern classroom and possess significant potential for future technical and pedagogical research.

    @InProceedings{watson:2013:vortexMountain,
    author = {Diane Watson and Regan Mandryk and Kevin Stanley},
    title = {The Design and Evaluation of a Classroom Exergame},
    booktitle = {Gamification 2013},
    year = {2013},
    abstract = {Balancing academic, physical and emotional needs of students while maintaining student interest is increasingly challenging in the resource constrained environments of the modern classroom. To answer this need we created and evaluated an exergame system called Vortex Mountain, which leverages the physical benefits of exercise and the motivational benefits of educational games to provide a healthy and engaging classroom activity for middle school students. Through a controlled study, we demonstrate that our classroom exergame provides similar affective, engagement, and learning benefits to an exercise or game intervention, while leveraging the valuable ancillary benefits of each. Thus, we believe that exergames have a future in the modern classroom and possess significant potential for future technical and pedagogical research. },
    pages = {34-41},
    pdf = {VortexMountain.pdf},
    subtype = {conference}
    }

  • D. Watson, R. Mandryk, and K. Stanley, The Design and Evaluation of a Classroom Exergame, in Gamification 2013, 2013, pp. 34-41.

    Balancing academic, physical and emotional needs of students while maintaining student interest is increasingly challenging in the resource constrained environments of the modern classroom. To answer this need we created and evaluated an exergame system called Vortex Mountain, which leverages the physical benefits of exercise and the motivational benefits of educational games to provide a healthy and engaging classroom activity for middle school students. Through a controlled study, we demonstrate that our classroom exergame provides similar affective, engagement, and learning benefits to an exercise or game intervention, while leveraging the valuable ancillary benefits of each. Thus, we believe that exergames have a future in the modern classroom and possess significant potential for future technical and pedagogical research.

    @InProceedings{watson:2013:vortexMountain,
    author = {Diane Watson and Regan Mandryk and Kevin Stanley},
    title = {The Design and Evaluation of a Classroom Exergame},
    booktitle = {Gamification 2013},
    year = {2013},
    abstract = {Balancing academic, physical and emotional needs of students while maintaining student interest is increasingly challenging in the resource constrained environments of the modern classroom. To answer this need we created and evaluated an exergame system called Vortex Mountain, which leverages the physical benefits of exercise and the motivational benefits of educational games to provide a healthy and engaging classroom activity for middle school students. Through a controlled study, we demonstrate that our classroom exergame provides similar affective, engagement, and learning benefits to an exercise or game intervention, while leveraging the valuable ancillary benefits of each. Thus, we believe that exergames have a future in the modern classroom and possess significant potential for future technical and pedagogical research. },
    pages = {34-41},
    pdf = {VortexMountain.pdf},
    subtype = {conference}
    }

  • D. Watson, M. Hancock, and R. Mandryk, Gamifying Behaviour that Leads to Learning, in Gamification 2013, 2013, pp. 87-90.

    Many courses require self-study to succeed. This is espe-cially true of online courses. However, self-study activities, such as reading the textbook and completing the associated workbook, are not motivating and do not contribute directly to grades. As a result many students do not complete these activities and this may lead to a lower understanding of the material and a lower overall grade in the class. In this paper we present the prototype of a casual game, Reading Garden, which encourages self-study through casual gameplay

    @InProceedings{watson:2013:readinggarden,
    author = {Diane Watson and Mark Hancock and Regan Mandryk},
    title = {Gamifying Behaviour that Leads to Learning},
    booktitle = {Gamification 2013},
    year = {2013},
    abstract = {Many courses require self-study to succeed. This is espe-cially true of online courses. However, self-study activities, such as reading the textbook and completing the associated workbook, are not motivating and do not contribute directly to grades. As a result many students do not complete these activities and this may lead to a lower understanding of the material and a lower overall grade in the class. In this paper we present the prototype of a casual game, Reading Garden, which encourages self-study through casual gameplay },
    pages = {87-90},
    pdf = {ReadingGarden.pdf},
    subtype = {conference}
    }

  • D. Watson, M. Hancock, R. Mandryk, and M. Birk, Deconstructing the Touch Experience, in In Proc ITS, St. Andrews, Scotland, 2013, pp. 199-208.

    In this paper, we evaluate the performance and experience differences between direct touch and mouse input on horizontal and vertical surfaces using a simple application and several validated scales. We find that, not only are both speed and accuracy improved when using the multi-touch display over a mouse, but that participants were happier and more engaged. They also felt more competent, in control, related to other people, and immersed. Surprisingly, these results cannot be explained by the intuitiveness of the controller, and the benefits of touch did not come at the expense of perceived workload. Our work shows the added value of considering experience in addition to traditional measures of performance, and demonstrates an effective and efficient method for gathering experience during inter-action with surface applications. We conclude by discussing how an understanding of this experience can help in designing touch applications.

    @InProceedings{watson:2013:touchexperience,
    author = {Diane Watson and Mark Hancock and Regan Mandryk and Max Birk},
    title = {Deconstructing the Touch Experience},
    booktitle = {In Proc ITS},
    year = {2013},
    abstract = {In this paper, we evaluate the performance and experience differences between direct touch and mouse input on horizontal and vertical surfaces using a simple application and several validated scales. We find that, not only are both speed and accuracy improved when using the multi-touch display over a mouse, but that participants were happier and more engaged. They also felt more competent, in control, related to other people, and immersed. Surprisingly, these results cannot be explained by the intuitiveness of the controller, and the benefits of touch did not come at the expense of perceived workload. Our work shows the added value of considering experience in addition to traditional measures of performance, and demonstrates an effective and efficient method for gathering experience during inter-action with surface applications. We conclude by discussing how an understanding of this experience can help in designing touch applications. },
    year = {2013},
    address = {St. Andrews, Scotland},
    pages = {199-208},
    pdf = {touchexperience.pdf},
    doi = {http://dx.doi.org/10.1145/2512349.2512819},
    subtype = {conference}
    }

  • R. Langer, A. West, M. Hancock, and N. Randall, Applications as stories, in Designing gamification: Creating gameful and playful experiencesCHI 2013 Extended Abstracts, 2013, To appear.

    Narrative engages people both emotionally and intellectually, shaping the way we perceive, interpret, and interact with the world. Our group is putting that power to new uses by experimenting with applications that are also stories: applications that use the principles of narrative to grab and keep people’s attention, that guide novice users through the process of becoming experts, and that provide experiences that are as emotional and reflective as they are efficient and powerful.

    @InProceedings{langer:2013:stories,
    author = {Rebecca Langer and Amber West and Mark Hancock and Neil Randall},
    title = {Applications as stories},
    booktitle = {Designing gamification: Creating gameful and playful experiencesCHI 2013 Extended Abstracts},
    year = {2013},
    abstract = {Narrative engages people both emotionally and intellectually, shaping the way we perceive, interpret, and interact with the world. Our group is putting that power to new uses by experimenting with applications that are also stories: applications that use the principles of narrative to grab and keep people's attention, that guide novice users through the process of becoming experts, and that provide experiences that are as emotional and reflective as they are efficient and powerful.},
    note = {To appear},
    subtype = {other}
    }

  • A. Ion, B. Y. -L. Chang, M. Haller, M. Hancock, and S. D. Scott, Canyon: Providing location awareness of multiple moving objects in a detail view on large displays, in Proc. CHI, New York, NY, USA, 2013, (Best Paper Honorable Mention Award).

    Overview+Detail interfaces can be used to examine the details of complex data while retaining the datas overall context. Dynamic data introduce challenges for these interfaces, however, as moving objects may exit the detail view, as well as a persons field of view if they are working at a large interactive surface. To address this "off-view" problem, we propose a new information visualization technique, called Canyon. This technique attaches a small view of an off-view object, including some surrounding context, to the external boundary of the detail view. The area between the detail view and the region containing the off-view object is virtually "folded" to conserve space. A comparison study was conducted contrasting the benefits and limitations of Canyon to an established technique, called Wedge. Canyon was more accurate across a number of tasks, especially more complex tasks, and was comparably efficient.

    @INPROCEEDINGS{ion:2013:canyon,
    author = {Alexandra Ion and Y.-L. Betty Chang and Michael Haller and Mark Hancock and Stacey D. Scott},
    title = {Canyon: Providing location awareness of multiple moving objects in a detail view on large displays},
    booktitle = {Proc. CHI},
    year = {2013},
    address = {New York, NY, USA},
    publisher = {ACM Press},
    note = {(Best Paper Honorable Mention Award)},
    abstract = {Overview+Detail interfaces can be used to examine the details of complex data while retaining the datas overall context. Dynamic data introduce challenges for these interfaces, however, as moving objects may exit the detail view, as well as a persons field of view if they are working at a large interactive surface. To address this "off-view" problem, we propose a new information visualization technique, called Canyon. This technique attaches a small view of an off-view object, including some surrounding context, to the external boundary of the detail view. The area between the detail view and the region containing the off-view object is virtually "folded" to conserve space. A comparison study was conducted contrasting the benefits and limitations of Canyon to an established technique, called Wedge. Canyon was more accurate across a number of tasks, especially more complex tasks, and was comparably efficient.},
    subtype = {conference}
    }

2012

  • D. Pyryeskin, M. Hancock, and J. Hoey, Comparing Elicited Gestures to Designer-Created Gestures for Selection above a Multitouch Surface, in Proc. ITS, 2012, pp. 1-10.

    Many new technologies are emerging that make it possible to extend interaction into the three-dimensional space directly above or in front of a multitouch surface. Such techniques allow people to control these devices by performing hand gestures in the air. In this paper, we present a method of extending interactions into the space above a multitouch surface using only a standard diffused surface illumination (DSI) device, without any additional sensors. Then we focus on interaction techniques for activating graphical widgets located in this above-surface space. We have conducted a study to elicit gestures for above-table widget activation. A follow-up study was conducted to evaluate and compare these gestures based on their performance. Our results showed that there was no clear agreement on what gestures should be used to select objects in mid-air, and that performance was better when using gestures that were chosen less frequently, but predicted to be better by the designers, as opposed to those most frequently suggested by participants.

    @InProceedings{Pyryeskin:2012:hoverspace,
    author = {Dmitry Pyryeskin and Mark Hancock and Jesse Hoey},
    title = {Comparing Elicited Gestures to Designer-Created Gestures for Selection above a Multitouch Surface},
    abstract = {Many new technologies are emerging that make it possible
    to extend interaction into the three-dimensional space directly
    above or in front of a multitouch surface. Such techniques
    allow people to control these devices by performing
    hand gestures in the air. In this paper, we present a method
    of extending interactions into the space above a multitouch
    surface using only a standard diffused surface illumination
    (DSI) device, without any additional sensors. Then we focus
    on interaction techniques for activating graphical widgets
    located in this above-surface space. We have conducted
    a study to elicit gestures for above-table widget activation.
    A follow-up study was conducted to evaluate and compare
    these gestures based on their performance. Our results
    showed that there was no clear agreement on what gestures
    should be used to select objects in mid-air, and that performance
    was better when using gestures that were chosen less
    frequently, but predicted to be better by the designers, as
    opposed to those most frequently suggested by participants.},
    booktitle = {Proc. ITS},
    year = {2012},
    pages = {1-10},
    numpages = {10},
    pdf = {pyryeskin-hoverspace.pdf},
    doi = {http://dx.doi.org/10.1145/2396636.2396638},
    movie = {pyryeskin-hoverspace.mp4},
    subtype = {conference}
    }

  • M. A. Seto, S. D. Scott, and M. Hancock, Investigating Menu Discoverability on a Digital Tabletop in a Public Setting, in Proc. ITS, 2012, pp. 71-80.

    A common challenge to the design of digital tabletops for public settings is how to effectively invite and guide pass-ersbywho often have no prior experience with such tech-nologyto interact using unfamiliar interaction methods and interfaces. We characterize such enticement from the system interface as the systems discoverability. A particu-lar challenge to modern surface interfaces is the discovera-bility of system functionality: does the system require ges-tures? are there system menus? if so, how are they invoked? This research focuses on the discoverability of system men-us on digital tabletops designed for public settings. An ob-servational study of menu invocation methods in a museum setting is reported. Study findings suggest that discernible and recognizable interface elements, such as buttons, sup-ported by the use of animation, can effectively attract and guide the discovery of menus. Design recommendations for improving menu discoverability are also presented.

    @InProceedings{Seto:2012:menus,
    author = {A. Mindy Seto and Stacey D. Scott and Mark Hancock},
    title = {Investigating Menu Discoverability on a Digital Tabletop in a Public Setting},
    abstract = {A common challenge to the design of digital tabletops for public settings is how to effectively invite and guide pass-ersbywho often have no prior experience with such tech-nologyto interact using unfamiliar interaction methods and interfaces. We characterize such enticement from the system interface as the systems discoverability. A particu-lar challenge to modern surface interfaces is the discovera-bility of system functionality: does the system require ges-tures? are there system menus? if so, how are they invoked? This research focuses on the discoverability of system men-us on digital tabletops designed for public settings. An ob-servational study of menu invocation methods in a museum setting is reported. Study findings suggest that discernible and recognizable interface elements, such as buttons, sup-ported by the use of animation, can effectively attract and guide the discovery of menus. Design recommendations for improving menu discoverability are also presented.},
    booktitle = {Proc. ITS},
    year = {2012},
    pages = {71-80},
    numpages = {10},
    pdf = {seto-menus.pdf},
    doi = {http://dx.doi.org/10.1145/2396636.2396647},
    movie = {seto-menus.wmv},
    subtype = {conference}
    }

  • V. Cheung, B. Y. -L. Chang, and S. D. Scott, Communication channels and awareness cues in collocated collaborative time-critical gaming, in Proc. CSCW, New York, NY, USA, 2012, pp. 569-578.
    @inproceedings{Cheung:2012:shooter,
    author = {Victor Cheung and Y.-L. Betty Chang and Stacey D. Scott},
    title = {Communication channels and awareness cues in collocated collaborative time-critical gaming},
    booktitle = {Proc. CSCW},
    series = {CSCW 2012},
    year = {2012},
    isbn = {978-1-4503-1086-4},
    location = {Seattle, Washington, USA},
    pages = {569--578},
    numpages = {10},
    url = {http://doi.acm.org/10.1145/2145204.2145291},
    doi = {10.1145/2145204.2145291},
    acmid = {2145291},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {awareness, collaborative gameplay, collocated gaming, communication channels, computer-supported cooperative play (cscp), console games, cues},
    subtype = {conference}
    }

  • B. Y. -L. Chang, M. Hancock, S. D. Scott, J. Pape, and N. T. C. Graham, Improving the social gaming experience by comparing physical and digital tabletop board games, in Extended Proceedings of the 4th International Conference on Fun and Games, Toulouse, France, 2012, pp. 108-111.
    @inproceedings{Chang:2012:boardgames,
    author = {Y.-L. Betty Chang and Mark Hancock and Stacey D. Scott and Joseph Pape and T.C. Nicholas Graham},
    title = {Improving the social gaming experience by comparing physical and digital tabletop board games},
    booktitle = {Extended Proceedings of the 4th International Conference on Fun and Games},
    series = {FnG 2012},
    year = {2012},
    isbn = {978-2-917490-21-1},
    location = {Toulouse, France},
    pages = {108-111},
    numpages = {4},
    publisher = {IRIT Press},
    address = {Toulouse, France},
    keywords = {social gaming experience; digital tabletops; collaborative board games; automation; mixed-methods approach},
    subtype = {conference}
    }

  • Y. Chang, P. Alencar, E. S. Barrenechea, R. Blanco, and D. Cowan, Context-aware systems: models and functionality, PA, USA: IGI Global, 2012.
    @book{Chang:2012:context_survey,
    author = {Yu-Ling Chang and Paulo Alencar and Eduardo S. Barrenechea and Rolando Blanco and Donald Cowan},
    title = {Context-aware systems: models and functionality},
    booktitle = {Handbook of Research on Mobile Software Engineering: Design, Implementation and Applications},
    series = {Handbook of Research on Mobile Software Engineering: Design, Implementation and Applications},
    year = {2012},
    isbn = {1615206558},
    pages = {427-441},
    numpages = {15},
    url = {http://www.igi-global.com/chapter/context-aware-systems/66490},
    doi = {10.4018/978-1-61520-655-1},
    publisher = {IGI Global},
    address = {PA, USA},
    subtype = {inbook}
    }

  • X. Yuan, J. Shum, K. Langer, M. Hancock, and J. Histon, Investigating Collaborative Behaviors on Interactive Tabletop Displays in Complex Task Environments, in Proc. HFES 2012 (Poster), 2012, To Appear.
    @InProceedings{yuan:2012:table-game,
    author = {Xiaochen Yuan and Joseph Shum and Kimberly Langer and Mark Hancock and Jonathan Histon},
    title = {Investigating Collaborative Behaviors on Interactive Tabletop Displays in Complex Task Environments},
    booktitle = {Proc. HFES 2012 (Poster)},
    year = {2012},
    note = {To Appear},
    subtype = {other}
    }

  • R. Holmes, D. Notkin, and M. Hancock, Industrially validating longitudinal static and dynamic analyses, in Proc. USER 2012 – workshop in conjuction with the ICSI 2012, 2012, pp. 43-44.

    Software systems gradually evolve over time, becoming increasingly difficult to understand as new features are added and old defects are repaired. Some modifications are harder to understand than others; e.g., an explicit method call is usually easy to trace in the source code, while a reflective method call may perplex both developers and analysis tools. Our tool, the Inconsistency Inspector, collects static and dynamic call graphs of systems and composes them to help developers more systematically address the static and dynamic implications of a change to a system.

    @InProceedings{holmes:2012:venn,
    author = {Reid Holmes and David Notkin and Mark Hancock},
    title = {Industrially validating longitudinal static and dynamic analyses},
    booktitle = {Proc. USER 2012 - workshop in conjuction with the ICSI 2012},
    year = {2012},
    pages={43 -44},
    abstract={Software systems gradually evolve over time, becoming increasingly difficult to understand as new features are added and old defects are repaired. Some modifications are harder to understand than others; e.g., an explicit method call is usually easy to trace in the source code, while a reflective method call may perplex both developers and analysis tools. Our tool, the Inconsistency Inspector, collects static and dynamic call graphs of systems and composes them to help developers more systematically address the static and dynamic implications of a change to a system.},
    doi={http://dx.doi.org/10.1109/USER.2012.6226582},
    pdf={holmes-venn.pdf},
    subtype = {other}
    }

  • T. Isenberg and M. Hancock, Gestures vs. postures: ‘Gestural’ touch interaction in 3D environments, in Proc. CHI 2012 Extended Abstracts, 2012.
    @InProceedings{isenberg:2012:gestures-postures,
    author = {Tobias Isenberg and Mark Hancock},
    title = {Gestures vs. postures: 'Gestural' touch interaction in 3D environments},
    booktitle = {Proc. CHI 2012 Extended Abstracts},
    year = {2012},
    subtype = {other}
    }

  • J. R. Wallace, J. Pape, B. Y. -L. Chang, P. J. McClelland, N. T. C. Graham, S. D. Scott, and M. Hancock, Exploring automation in digital tabletop board game, in Proc. CSCW Companion (Poster), New York, NY, USA, 2012, pp. 231-234.

    Digital tabletops present the opportunity to combine the social advantages of traditional tabletop games with the automation and streamlined gameplay of video games. However, it is unclear whether the addition of automation enhances or detracts from the game experience. A study was performed where groups played three versions of the cooperative board game Pandemic, with varying degrees of automation. The study revealed that while game automation can provide advantages to players, it can also negatively impact enjoyment, game state awareness, and flexibility in game play.

    @inproceedings{Wallace:2012:EAD:2141512.2141585,
    author = {James R. Wallace and Joseph Pape and Y.-L. Betty Chang and Phillip J. McClelland and T.C. Nicholas Graham and Stacey D. Scott and Mark Hancock},
    title = {Exploring automation in digital tabletop board game},
    booktitle = {Proc. CSCW Companion (Poster)},
    series = {CSCW 2012},
    year = {2012},
    isbn = {978-1-4503-1051-2},
    location = {Seattle, Washington, USA},
    pages = {231-234},
    numpages = {4},
    doi = {http://doi.acm.org/10.1145/2141512.2141585},
    acmid = {2141585},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {automation, gaming, interaction design, tabletop display},
    abstract = {Digital tabletops present the opportunity to combine the social advantages of traditional tabletop games with the automation and streamlined gameplay of video games. However, it is unclear whether the addition of automation enhances or detracts from the game experience. A study was performed where groups played three versions of the cooperative board game Pandemic, with varying degrees of automation. The study revealed that while game automation can provide advantages to players, it can also negatively impact enjoyment, game state awareness, and flexibility in game play.},
    pdf = {p231-wallace.pdf},
    subtype = {other}
    }

  • A. Azad, D. Vogel, J. Ruiz, M. Hancock, and E. Lank, Territoriality and behaviour on and around large vertical publicly-shared displays, in Proc. DIS, New York, NY, USA, 2012, pp. 468-477.

    We investigate behaviours on, and around, large vertical displays during concurrent usage. Using an observational field study, we identify fundamental patterns of how people use existing public displays: their orientation, positioning, group identification, and behaviour within and between social groups just-before, during, and just-after usage. These results are then used to motivate a controlled experiment where two individuals, or two pairs of individuals, complete tasks concurrently on a simulated large vertical display. Results from our controlled study demonstrates that vertical surface territories are similar to those found in horizontal tabletops in function, but their definitions and social conventions are different. In addition, the nature of use-whilestanding systems results in more complex and dynamic physical territories around the display. We show that the anthropological notion of personal space must be slightly refined for application to vertical displays.

    @inproceedings{azad:2012:kiosks,
    author = {Alec Azad and Daniel Vogel and Jaime Ruiz and Mark Hancock and Edward Lank},
    title = {Territoriality and behaviour on and around large vertical publicly-shared displays},
    booktitle = {Proc. DIS},
    series = {DIS '12},
    year = {2012},
    isbn = {978-1-4503-1210-3},
    location = {Newcastle Upon Tyne, United Kingdom},
    pages = {468--477},
    numpages = {10},
    doi = {http://dx.doi.org/10.1145/2317956.2318025},
    acmid = {2318025},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {large display, public, shared display, territoriality},
    abstract = {We investigate behaviours on, and around, large vertical displays during concurrent usage. Using an observational field study, we identify fundamental patterns of how people use existing public displays: their orientation, positioning, group identification, and behaviour within and between social groups just-before, during, and just-after usage. These results are then used to motivate a controlled experiment where two individuals, or two pairs of individuals, complete tasks concurrently on a simulated large vertical display. Results from our controlled study demonstrates that vertical surface territories are similar to those found in horizontal tabletops in function, but their definitions and social conventions are different. In addition, the nature of use-whilestanding systems results in more complex and dynamic physical territories around the display. We show that the anthropological notion of personal space must be slightly refined for application to vertical displays.},
    pdf = {p468-azad.pdf},
    subtype = {conference}
    }

2011

  • D. Pyryeskin, M. Hancock, and J. Hoey, Extending interactions into hoverspace using reflected light, in Proc. ITS, New York, NY, USA, 2011, pp. 262-263.

    Multi-touch tables are becoming increasingly popular and much research is dedicated to developing suitable interaction paradigms. There also exist multiple techniques aimed at extending interactions into the hoverspace—the space directly above a multi-touch table. We propose a novel hover-space method that does not require any additional hardware or modification of existing vision-based multi-touch tables. Our prototype system was developed on a Diffused Surface Illumination (DSI) vision-based multi-touch set up, and uses light reflected from a person’s palm to estimate its position in 3D space above the table.

    @inproceedings{Pyryeskin:2011:EIH:2076354.2076406,
    author = {Dmitry Pyryeskin and Mark Hancock and Jesse Hoey},
    title = {Extending interactions into hoverspace using reflected light},
    booktitle = {Proc. ITS},
    series = {ITS '11},
    year = {2011},
    isbn = {978-1-4503-0871-7},
    location = {Kobe, Japan},
    pages = {262--263},
    numpages = {2},
    doi = {http://doi.acm.org/10.1145/2076354.2076406},
    acmid = {2076406},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {hoverspace, multi-touch, multimodal interaction, natural human computer interaction, surface computing},
    abstract = {Multi-touch tables are becoming increasingly popular and much research is dedicated to developing suitable interaction paradigms. There also exist multiple techniques aimed at extending interactions into the hoverspace---the space directly above a multi-touch table. We propose a novel hover-space method that does not require any additional hardware or modification of existing vision-based multi-touch tables. Our prototype system was developed on a Diffused Surface Illumination (DSI) vision-based multi-touch set up, and uses light reflected from a person's palm to estimate its position in 3D space above the table.},
    pdf = {p262-pyryeskin.pdf},
    youtube = {DZCOXVEGp8U},
    subtype = {other}
    }

  • D. Pyryeskin, M. Hancock, and J. Hoey, Extending interactions into hoverspace using reflected light, ITS 2011 Poster , 2011.
    @MISC{Pyryeskin:2011,
    author = {Dmitry Pyryeskin and Mark Hancock and Jesse Hoey},
    title = {Extending interactions into hoverspace using reflected light},
    howpublished = {ITS 2011 Poster},
    year = {2011},
    pdf = {pyryeskin-2011-poster.pdf},
    subtype = {poster}
    }

  • K. Mikulecky, M. Hancock, J. Brosz, and S. Carpendale, Exploring physical information cloth on a multitouch table, in Proc. ITS, New York, NY, USA, 2011, pp. 140-149.

    We expand multitouch tabletop information exploration by placing 2D information on a physically-based cloth in a shallow 3D viewing environment. Instead of offering 2D information on a rigid window or screen, we place our information on a soft flexible cloth that can be draped, pulled, stretched, and folded with multiple fingers and hands, supporting any number of information views. Combining our multitouch flexible information cloth with simple manipulable objects provides a physically-based information viewing environment that offers similar advantages to complex detailin- context viewing. Previous detail-in-context views can be re-created by draping cloth over virtual objects in this physics simulation, thereby approximating many of the existing techniques by providing zoomed-in information in the context of zoomed-out information. These detail-in-context views are approximated because, rather than use distortion, the draped cloth naturally drapes and folds showing magnified regions within a physically understandable context. In addition, the information cloth remains flexibly responsive, allowing one to tweak, unfold, and smooth out regions as desired.

    @InProceedings{mikulecky:2011:cloth,
    author = {Kimberly Mikulecky and Mark Hancock and John Brosz and Sheelagh Carpendale},
    title = {Exploring physical information cloth on a multitouch table},
    booktitle = {Proc. ITS},
    year = {2011},
    isbn = {978-1-4503-0871-7},
    location = {Kobe, Japan},
    pages = {140--149},
    numpages = {10},
    acmid = {2076381},
    publisher = {ACM},
    address = {New York, NY, USA},
    keywords = {cloth, detail in context, information visualization, multi-touch, physics, simulation, tabletop display},
    abstract = {We expand multitouch tabletop information exploration by placing 2D information on a physically-based cloth in a shallow 3D viewing environment. Instead of offering 2D information on a rigid window or screen, we place our information on a soft flexible cloth that can be draped, pulled, stretched, and folded with multiple fingers and hands, supporting any number of information views. Combining our multitouch flexible information cloth with simple manipulable objects provides a physically-based information viewing environment that offers similar advantages to complex detailin- context viewing. Previous detail-in-context views can be re-created by draping cloth over virtual objects in this physics simulation, thereby approximating many of the existing techniques by providing zoomed-in information in the context of zoomed-out information. These detail-in-context views are approximated because, rather than use distortion, the draped cloth naturally drapes and folds showing magnified regions within a physically understandable context. In addition, the information cloth remains flexibly responsive, allowing one to tweak, unfold, and smooth out regions as desired.},
    doi = {http://dx.doi.org/10.1145/2076354.2076381},
    pdf = {cloth-smaller.pdf},
    youtube = {ps_CLSHP1uY},
    subtype = {conference}
    }

  • A. Azad, J. Ruiz, M. Hancock, and E. Lank, Group Behaviours around Public Displays, University of Waterloo, Technical Report CS-2011-24, , 2011.

    Information kiosks often decorate large public areas to provide basic information to inquisitive patrons. This paper presents an observational study examining groups interacting with public kiosks. We identify fundamental issues regarding patterns in user orientation and layout, group identification, and behaviour both within and between social groups during the entire period of interaction. Based on observations from our study, we present a foundation of guidelines and principles that informs the design of public (vertical) large-screen surfaces.

    @TECHREPORT{Azad:GroupBehaviours:2011,
    author = {Alec Azad and Jaime Ruiz and Mark Hancock and Edward Lank},
    title = {Group Behaviours around Public Displays},
    institution = {University of Waterloo},
    year = {2011},
    type = {Technical Report},
    number = {CS-2011-24},
    abstract = {Information kiosks often decorate large public areas to provide basic information to inquisitive patrons. This paper presents an observational study examining groups interacting with public kiosks. We identify fundamental issues regarding patterns in user orientation and layout, group identification, and behaviour both within and between social groups during the entire period of interaction. Based on observations from our study, we present a foundation of guidelines and principles that informs the design of public (vertical) large-screen surfaces.},
    pdf = {AzadGroupBehaviours.pdf}
    }

2010

  • Y. Chang, N. Zhang, and E. Lank, Perception and decision making on electronic tax software for the younger population, in Proceedings of the 10th European Conference on e-Government, Reading, UK, 2010, pp. 99-106.
    @inproceedings{Chang:2010:etax,
    author = {Yu-Ling Chang and Nathan Zhang and Edward Lank},
    title = {Perception and decision making on electronic tax software for the younger population},
    booktitle = {Proceedings of the 10th European Conference on e-Government},
    series = {ECEG 2010},
    year = {2010},
    isbn = {978-1-906638-63-4},
    location = {Limerick, Ireland},
    pages = {99-106},
    numpages = {8},
    publisher = {Academic Publishing Limited},
    address = {Reading, UK},
    subtype = {conference}
    }

  • Y. Chang, E. Barrenechea, and P. Alencar, Dynamic user-centric mobile context model, in Digital Information Management (ICDIM), 2010 Fifth International Conference on, 2010, pp. 442-447.
    @INPROCEEDINGS{Chang:2010:context_model,
    author={Yu-Ling Chang and Eduardo Barrenechea and Paulo Alencar},
    booktitle={Digital Information Management (ICDIM), 2010 Fifth International Conference on},
    title={Dynamic user-centric mobile context model},
    year={2010},
    month={july},
    volume={},
    number={},
    pages={442-447},
    keywords={context aware systems;dynamic user centric mobile context model;mobile services;model driven approach;mobile computing;virtual reality;},
    doi={10.1109/ICDIM.2010.5662575},
    ISSN={},
    url = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?reload=true&arnumber=5662575&contentType=Conference+Publications},
    subtype = {conference}
    }

  • M. Hancock, 3D Tabletop Display Interaction, PhD Thesis, University of Calgary, 2010.

    Recent advances in digital technology, both in research prototypes and commercial products, have introduced a plethora of multitouch horizontal display surfaces. Perhaps because most of these devices are flat, initially most of the multitouch interfaces were 2D in nature. However, on traditional tables, people frequently make use of the third dimension; they pick up, turn over, stack, build, and otherwise manipulate objects on physical tables. Furthermore, they frequently use the visual cues made available by the third dimension, such as viewing the different sides of an object or scene, or hiding something underneath another object. Successful interaction with 3D objects on tabletops involves both manipulation and visual feedback. Thus, I simultaneously explore the research questions of both viewing and interacting with 3D virtual artifacts on digital tables. [[<<]] [[<<]] The use of 3D virtual objects on a tabletop display introduces many research questions. For example, most applications that support 3D graphics do so by assuming a single viewpoint directly in front of the display. This assumption is no longer valid when using a large horizontal surface that affords many people working at different sides viewing the 3D virtual scene. It is an open question to what extent this discrepancy in viewing angle is problematic, and, when necessary, how it can be mitigated. Furthermore, the horizontal table imposes a physical barrier to the 3D virtual world, meaning that "touch" input will be within the 2D plane. Another open question is how this 2D information can be used to control 3D virtual objects "below" the table’s surface and whether interacting through this surface can enable the kinds of 3D abilities common to physical artifacts. [[<<]] [[<<]] To address the research questions about the discrepancy in viewing angle between the different people around a table and the viewpoint used to render a scene, I empirically study this perceptual phenomenon. Results show that, in a tabletop display setting, viewing projected 3D virtual objects from multiple viewpoints is indeed problematic and becomes more problematic as the discrepancy in viewing angle increases. In this dissertation, I describe how to apply these results to 3D applications, either through an understanding of the compromises implied by each design or by using mitigating techniques to reduce the problem. [[<<]] [[<<]] In this dissertation, I also build on previous work that explores manipulation of a virtual 3D object by introducing several techniques which use the 2D touch input provided by multiple fingers or contact points. Results of a comparative user study showed that both performance and preference increased as participants were provided with more touches to control the virtual objects. While this study only explored 2D movement and 3D rotation (the techniques did not allow lifting of virtual objects), the insight gained was used to create sticky fingers and opposable thumbs, which extend the three-touch technique to allow lifting (the sixth degree of freedom). By combining the power of this full control over any 3D virtual object with physically-based reactions of other virtual objects and interface components, sticky tools provides a framework for 3D tabletop interfaces that eliminates the need for specialized gestures or an abstract menu system. [[<<]] [[<<]] This framework, together with insights gained from the exploration of viewpoint discrepancy, were applied to the practical application of enabling sandtray therapy, a form of art therapy for children, on a digital table. This application was cooperatively designed with therapists who use sandtray therapy in their regular practice. This application serves as a demonstration of how to apply the concepts in this dissertation to the design of 3D interaction on a tabletop display.

    @PHDTHESIS{hancock:2010:thesis,
    author = {Mark Hancock},
    title = {3D Tabletop Display Interaction},
    school = {University of Calgary},
    year = {2010},
    abstract = {Recent advances in digital technology, both in research prototypes and commercial products, have introduced a plethora of multitouch horizontal display surfaces. Perhaps because most of these devices are flat, initially most of the multitouch interfaces were 2D in nature. However, on traditional tables, people frequently make use of the third dimension; they pick up, turn over, stack, build, and otherwise manipulate objects on physical tables. Furthermore, they frequently use the visual cues made available by the third dimension, such as viewing the different sides of an object or scene, or hiding something underneath another object. Successful interaction with 3D objects on tabletops involves both manipulation and visual feedback. Thus, I simultaneously explore the research questions of both viewing and interacting with 3D virtual artifacts on digital tables.
    [[<<]]
    [[<<]]
    The use of 3D virtual objects on a tabletop display introduces many research questions. For example, most applications that support 3D graphics do so by assuming a single viewpoint directly in front of the display. This assumption is no longer valid when using a large horizontal surface that affords many people working at different sides viewing the 3D virtual scene. It is an open question to what extent this discrepancy in viewing angle is problematic, and, when necessary, how it can be mitigated. Furthermore, the horizontal table imposes a physical barrier to the 3D virtual world, meaning that "touch" input will be within the 2D plane. Another open question is how this 2D information can be used to control 3D virtual objects "below" the table's surface and whether interacting through this surface can enable the kinds of 3D abilities common to physical artifacts.
    [[<<]]
    [[<<]]
    To address the research questions about the discrepancy in viewing angle between the different people around a table and the viewpoint used to render a scene, I empirically study this perceptual phenomenon. Results show that, in a tabletop display setting, viewing projected 3D virtual objects from multiple viewpoints is indeed problematic and becomes more problematic as the discrepancy in viewing angle increases. In this dissertation, I describe how to apply these results to 3D applications, either through an understanding of the compromises implied by each design or by using mitigating techniques to reduce the problem.
    [[<<]]
    [[<<]]
    In this dissertation, I also build on previous work that explores manipulation of a virtual 3D object by introducing several techniques which use the 2D touch input provided by multiple fingers or contact points. Results of a comparative user study showed that both performance and preference increased as participants were provided with more touches to control the virtual objects. While this study only explored 2D movement and 3D rotation (the techniques did not allow lifting of virtual objects), the insight gained was used to create sticky fingers and opposable thumbs, which extend the three-touch technique to allow lifting (the sixth degree of freedom). By combining the power of this full control over any 3D virtual object with physically-based reactions of other virtual objects and interface components, sticky tools provides a framework for 3D tabletop interfaces that eliminates the need for specialized gestures or an abstract menu system.
    [[<<]]
    [[<<]]
    This framework, together with insights gained from the exploration of viewpoint discrepancy, were applied to the practical application of enabling sandtray therapy, a form of art therapy for children, on a digital table. This application was cooperatively designed with therapists who use sandtray therapy in their regular practice. This application serves as a demonstration of how to apply the concepts in this dissertation to the design of 3D interaction on a tabletop display.},
    pdf = {thesis-one-side.pdf}
    }

  • M. H. T. I. S. C. Luc Vlaming Christopher Collins, Integrating 2D Mouse Emulation with 3D Manipulation for Visualizations on a Multi-Touch Table, in Proc. ITS, 2010, pp. 221-230.

    We present the Rizzo, a multitouch virtual mouse that has been designed to provide the fine grained interaction for information visualization on a multi-touch table. Our solution enables touch interaction for existing mouse-based visualizations. Previously, this transition to a multi-touch environment was difficult because the mouse emulation of touch surfaces is often insufficient to provide full information visualization functionality. We present a unified design, combining many Rizzos that have been designed not only to provide mouse capabilities but also to act as zoomable lenses that make precise information access feasible. The Rizzos and the information visualizations all exist within a touch-enabled 3D window management system. Our approach permits touch interaction with both the 3D windowing environment as well as with the contents of the individual windows contained therein. We describe an implementation of our technique that augments the VisLink 3D visualization environment to demonstrate how to enable multi-touch capabilities on all visualizations written with the popular prefuse visualization toolkit.

    @InProceedings{vlaming:2010:integrating,
    author = {Luc Vlaming, Christopher Collins, Mark Hancock, Tobias Isenberg, Sheelagh Carpendale},
    title = {Integrating 2D Mouse Emulation with 3D Manipulation for Visualizations on a Multi-Touch Table},
    booktitle = {Proc. ITS},
    year = {2010},
    pages = {221--230},
    abstract = {We present the Rizzo, a multitouch virtual mouse that has been designed to provide the fine grained interaction for information visualization on a multi-touch table. Our solution enables touch interaction for existing mouse-based visualizations. Previously, this transition to a multi-touch environment was difficult because the mouse emulation of touch surfaces is often insufficient to provide full information visualization functionality. We present a unified design, combining many Rizzos that have been designed not only to provide mouse capabilities but also to act as zoomable lenses that make precise information access feasible. The Rizzos and the information visualizations all exist within a touch-enabled 3D window management system. Our approach permits touch interaction with both the 3D windowing environment as well as with the contents of the individual windows contained therein. We describe an implementation of our technique that augments the VisLink 3D visualization environment to demonstrate how to enable multi-touch capabilities on all visualizations written with the popular prefuse visualization toolkit.},
    doi = {http://doi.acm.org/10.1145/1936652.1936693},
    pdf = {touchmouse-its2010.pdf},
    youtube = {Eb8Qu-Z0ERw},
    subtype = {conference}
    }

  • M. Hancock, T. ten Cate, S. Carpendale, and Tobias Isenberg, Supporting Sandtray Therapy on an Interactive Tabletop, in Proc. CHI, 2010, pp. 2133-2142.

    We present the iterative design of a virtual sandtray application for a tabletop display. The purpose of our prototype is to support sandtray therapy, a form of art therapy typically used for younger clients. A significant aspect of this therapy is the insight gained by the therapist as they observe the client interact with the figurines they use to create a scene in the sandtray. In this manner, the therapist can gain increased understanding of the clients psyche. We worked with three sandtray therapists throughout the evolution of our prototype. We describe the details of the three phases of this design process: initial face-to-face meetings, iterative design and development via distance collaboration, and a final face-to-face feedback session. This process revealed that our prototype was sufficient for therapists to gain insight about a persons psyche through their interactions with the virtual sandtray.

    @INPROCEEDINGS{hancock:2010:supporting,
    author = {Mark Hancock and Thomas ten Cate and Sheelagh Carpendale and Tobias
    Isenberg},
    title = {Supporting Sandtray Therapy on an Interactive Tabletop},
    booktitle = {Proc. CHI},
    year = {2010},
    pages = {2133--2142},
    abstract = {We present the iterative design of a virtual sandtray application
    for a tabletop display. The purpose of our prototype is to support
    sandtray therapy, a form of art therapy typically used for younger
    clients. A significant aspect of this therapy is the insight gained
    by the therapist as they observe the client interact with the figurines
    they use to create a scene in the sandtray. In this manner, the therapist
    can gain increased understanding of the clients psyche. We worked
    with three sandtray therapists throughout the evolution of our prototype.
    We describe the details of the three phases of this design process:
    initial face-to-face meetings, iterative design and development via
    distance collaboration, and a final face-to-face feedback session.
    This process revealed that our prototype was sufficient for therapists
    to gain insight about a persons psyche through their interactions
    with the virtual sandtray.},
    doi = {http://doi.acm.org/10.1145/1753326.1753651},
    movie = {pap1350-hancock.mov},
    youtube = {C1V2CyXfwKw},
    pdf = {pap1350-hancock.pdf},
    subtype = {conference}
    }

2009

  • R. Davies, M. Hancock, and A. Condon, Perspectives: Canadian Women in Computer Science, Encyclopedia of Computer Science and Engineering, 2009.

    This article provides a brief overview of statistics on the participation of women in computing in Canada, the factors that contribute to the low participation of women, and current programs that aim to increase the participation of women in computing fields.

    @ARTICLE{davies:2009:perspectives:,
    author = {Rhian Davies and Mark Hancock and Anne Condon},
    title = {Perspectives: Canadian Women in Computer Science},
    journal = {Encyclopedia of Computer Science and Engineering},
    year = {2009},
    month = {Jan},
    abstract = {This article provides a brief overview of statistics on the participation
    of women in computing in Canada, the factors that contribute to the
    low participation of women, and current programs that aim to increase
    the participation of women in computing fields.},
    pdf = {canadawomenincs.pdf},
    subtype = {inbook}
    }

  • M. Hancock, T. ten Cate, and S. Carpendale, Sticky Tools: Full 6DOF Force-Based Interaction for Multi-Touch Tables, in Proc. ITS, 2009, pp. 145-152.

    Tabletop computing techniques are using physically familiar force-based interactions to enable compelling interfaces that provide a feeling of being embodied with a virtual object. We introduce an interaction paradigm that has the benefits of force-based interaction complete with full 6DOF manipulation. Only multi-touch input, such as that provided by the Microsoft Surface and the SMART Table, is necessary to achieve this interaction freedom. This paradigm is realized through sticky tools: a combination of sticky fingers, a physically familiar technique for moving, spinning, and lifting virtual objects; opposable thumbs, a method for flipping objects over; and virtual tools, a method for propagating behaviour to other virtual objects in the scene. We show how sticky tools can introduce richer meaning to tabletop computing by drawing a parallel between sticky tools and the discussion in Urp [20] around the meaning of tangible devices in terms of nouns, verbs, reconfigurable tools, attributes, and pure objects. We then relate this discussion to other force-based interaction techniques by describing how a designer can introduce complexity in how people can control both physical and virtual objects, how physical objects can control both physical and virtual objects, and how virtual objects can control virtual objects.

    @INPROCEEDINGS{hancock:2009:sticky,
    author = {Mark Hancock and Thomas ten Cate and Sheelagh Carpendale},
    title = {Sticky Tools: Full {6DOF} Force-Based Interaction for Multi-Touch
    Tables},
    booktitle = {Proc. ITS},
    year = {2009},
    pages = {145--152},
    abstract = {Tabletop computing techniques are using physically familiar force-based
    interactions to enable compelling interfaces that provide a feeling
    of being embodied with a virtual object. We introduce an interaction
    paradigm that has the benefits of force-based interaction complete
    with full 6DOF manipulation. Only multi-touch input, such as that
    provided by the Microsoft Surface and the SMART Table, is necessary
    to achieve this interaction freedom. This paradigm is realized through
    sticky tools: a combination of sticky fingers, a physically familiar
    technique for moving, spinning, and lifting virtual objects; opposable
    thumbs, a method for flipping objects over; and virtual tools, a
    method for propagating behaviour to other virtual objects in the
    scene. We show how sticky tools can introduce richer meaning to tabletop
    computing by drawing a parallel between sticky tools and the discussion
    in Urp [20] around the meaning of tangible devices in terms of nouns,
    verbs, reconfigurable tools, attributes, and pure objects. We then
    relate this discussion to other force-based interaction techniques
    by describing how a designer can introduce complexity in how people
    can control both physical and virtual objects, how physical objects
    can control both physical and virtual objects, and how virtual objects
    can control virtual objects.},
    doi = {http://doi.acm.org/10.1145/1731903.1731930},
    youtube = {DW8vmbhxKO4},
    movie = {stickytools.mov},
    pdf = {stickytools.pdf},
    subtype = {conference}
    }

  • M. Hancock, O. Hilliges, C. Collins, Dominikus Baur, and S. Carpendale, Exploring Tangible and Direct Touch Interfaces for Manipulating 2D and 3D Information on a Digital Table, in Proc. ITS, 2009, pp. 85-92.

    On traditional tables, people often manipulate a variety of physical objects, both 2D in nature (e.g., paper) and 3D in nature (e.g., books, pens, models, etc.). Current advances in hardware technology for tabletop displays introduce the possibility of mimicking these physical interactions through direct-touch or tangible user interfaces. While both promise intuitive physical interaction, they are rarely discussed in combination in the literature. In this paper, we present a study that explores the advantages and disadvantages of tangible and touch interfaces, specifically in relation to one another. We discuss our results in terms of how effective each technique was for accomplishing both a 3D object manipulation task and a 2D information visualization exploration task. Results suggest that people can more quickly move and rotate objects in 2D with our touch interaction, but more effectively navigate the visualization using tangible interaction. We discuss how our results can be used to inform future designs of tangible and touch interaction.

    @INPROCEEDINGS{hancock:2009:exploring,
    author = {Mark Hancock and Otmar Hilliges and Christopher Collins and Dominikus
    Baur and Sheelagh Carpendale},
    title = {Exploring Tangible and Direct Touch Interfaces for Manipulating 2D
    and 3D Information on a Digital Table},
    booktitle = {Proc. ITS},
    year = {2009},
    pages = {85--92},
    abstract = {On traditional tables, people often manipulate a variety of
    physical objects, both 2D in nature (e.g., paper) and 3D in nature
    (e.g., books, pens, models, etc.). Current advances in hardware technology
    for tabletop displays introduce the possibility of mimicking these
    physical interactions through direct-touch or tangible user interfaces.
    While both promise intuitive physical interaction, they are rarely
    discussed in combination in the literature. In this paper, we present
    a study that explores the advantages and disadvantages of tangible
    and touch interfaces, specifically in relation to one another. We
    discuss our results in terms of how effective each technique was
    for accomplishing both a 3D object manipulation task and a 2D information
    visualization exploration task. Results suggest that people can more
    quickly move and rotate objects in 2D with our touch interaction,
    but more effectively navigate the visualization using tangible interaction.
    We discuss how our results can be used to inform future designs of
    tangible and touch interaction.},
    doi = {http://doi.acm.org/10.1145/1731903.1731921},
    youtube = {o9SQFrjGgyo},
    movie = {docudial.mp4},
    pdf = {docudial.pdf},
    subtype = {conference}
    }

  • M. Hancock, M. Nacenta, C. Gutwin, and S. Carpendale, The Effects of Changing Projection Geometry on the Interpretation of 3D Orientation on Tabletops, in Proc. ITS, 2009, pp. 175-182.

    Applications with 3D models are now becoming more common on tabletop displays. Displaying 3D objects on tables, however, presents problems in the way that the 3D virtual scene is presented on the 2D surface; different choices in the way the projection is designed can lead to distorted images and difficulty interpreting angles and orientations. To investigate these problems, we studied peoples ability to judge object orientations under different projection conditions. We found that errors increased significantly as the center of projection diverged from the observers viewpoint, showing that designers must take this divergence into consideration, particularly for multi-user tables. In addition, we found that a neutral center of projection combined with parallel projection geometry provided a reasonable compromise for multi-user situations.

    @INPROCEEDINGS{hancock:2009:effects,
    author = {Mark Hancock and Miguel Nacenta and Carl Gutwin and Sheelagh Carpendale},
    title = {The Effects of Changing Projection Geometry on the Interpretation
    of 3D Orientation on Tabletops},
    booktitle = {Proc. ITS},
    year = {2009},
    pages = {175--182},
    abstract = {Applications with 3D models are now becoming more common on tabletop
    displays. Displaying 3D objects on tables, however, presents problems
    in the way that the 3D virtual scene is presented on the 2D surface;
    different choices in the way the projection is designed can lead
    to distorted images and difficulty interpreting angles and orientations.
    To investigate these problems, we studied peoples ability to judge
    object orientations under different projection conditions. We found
    that errors increased significantly as the center of projection diverged
    from the observers viewpoint, showing that designers must take this
    divergence into consideration, particularly for multi-user tables.
    In addition, we found that a neutral center of projection combined
    with parallel projection geometry provided a reasonable compromise
    for multi-user situations.},
    doi = {http://doi.acm.org/10.1145/1731903.1731934},
    pdf = {3Dprojection.pdf},
    subtype = {conference}
    }

2007

  • J. Grubert, M. Hancock, S. Carpendale, Edward Tse, and T. Isenberg, Interacting with stroke-based rendering on a wall display, University of Calgary, Technical Report TR-2007-882-34, , 2007.

    We introduce two new interaction techniques for creating and interacting with non-photorealistic images using strokebased rendering. We provide bimanual control of a large interactive canvas through both remote pointing and direct touch. Remote pointing allows people to sit and interact at a distance with an overview of the entire display, while direct-touch interaction provides more precise control. We performed a user study to compare these two techniques in both a controlled setting with constrained tasks and an exploratory setting where participants created their own painting. We found that, although the direct-touch interaction outperformed remote pointing, participants had mixed preferences and did not consistently choose one or the other to create their own painting. Some participants also chose to switch between techniques to achieve different levels of precision and control for different tasks.

    @TECHREPORT{grubert:2007:interacting,
    author = {Jens Grubert and Mark Hancock and Sheelagh Carpendale and Edward
    Tse and Tobias Isenberg},
    title = {Interacting with stroke-based rendering on a wall display},
    institution = {University of Calgary},
    year = {2007},
    type = {Technical Report},
    number = {TR-2007-882-34},
    abstract = {We introduce two new interaction techniques for creating and interacting
    with non-photorealistic images using strokebased rendering. We provide
    bimanual control of a large interactive canvas through both remote
    pointing and direct touch. Remote pointing allows people to sit and
    interact at a distance with an overview of the entire display, while
    direct-touch interaction provides more precise control. We performed
    a user study to compare these two techniques in both a controlled
    setting with constrained tasks and an exploratory setting where participants
    created their own painting. We found that, although the direct-touch
    interaction outperformed remote pointing, participants had mixed
    preferences and did not consistently choose one or the other to create
    their own painting. Some participants also chose to switch between
    techniques to achieve different levels of precision and control for
    different tasks.},
    pdf = {painting.pdf},
    url = {http://hdl.handle.net/1880/45786}
    }

  • M. Hancock and S. Carpendale, Supporting Multiple Off-Axis Viewpoints at a Tabletop Display, in Proc. Tabletop, 2007, pp. 171-178.

    A growing body of research is investigating the use of tabletop displays, in particular to support collaborative work. People often interact directly with these displays, typically with a stylus or touch. The current common focus of limiting interaction to 2D prevents people from performing actions familiar to them in the 3D world, including piling, flipping and stacking. However, a problem arises when viewing 3D on large displays that are intended for proximal use; the view angle can be extremely oblique and lead to distortion in the perception of the 3D projection. We present a simplified model that compensates for off-axis viewing for a single user and extend this technique for multiple viewers interacting with the same large display. We describe several implications of our approach to collaborative activities. We also describe other display configurations for which our technique may prove useful, including proximal use of a wall or multiple-display configurations.

    @INPROCEEDINGS{hancock:2007:supporting,
    author = {Mark Hancock and Sheelagh Carpendale},
    title = {Supporting Multiple Off-Axis Viewpoints at a Tabletop Display},
    booktitle = {Proc. Tabletop},
    year = {2007},
    pages = {171--178},
    abstract = {A growing body of research is investigating the use of tabletop displays,
    in particular to support collaborative work. People often interact
    directly with these displays, typically with a stylus or touch. The
    current common focus of limiting interaction to 2D prevents people
    from performing actions familiar to them in the 3D world, including
    piling, flipping and stacking. However, a problem arises when viewing
    3D on large displays that are intended for proximal use; the view
    angle can be extremely oblique and lead to distortion in the perception
    of the 3D projection. We present a simplified model that compensates
    for off-axis viewing for a single user and extend this technique
    for multiple viewers interacting with the same large display. We
    describe several implications of our approach to collaborative activities.
    We also describe other display configurations for which our technique
    may prove useful, including proximal use of a wall or multiple-display
    configurations.},
    doi = {http://doi.ieeecomputersociety.org/10.1109/TABLETOP.2007.9},
    pdf = {hancock-OffAxisViewpoints.pdf},
    subtype = {other}
    }

  • M. Hancock, S. Carpendale, and A. Cockburn, Shallow-Depth 3D Interaction: Design and Evaluation of One-, Two- and Three-Touch Techniques, in Proc. CHI 2007, New York, NY, USA, 2007, pp. 1147-1156, (Nominated for Best Paper Award).

    On traditional tables, people frequently use the third dimension to pile, sort and store objects. However, while effective and informative for organization, this use of the third dimension does not usually extend far above the table. To enrich interaction with digital tables, we present the concept of shallow-depth 3D — 3D interaction with limited depth. Within this shallow-depth 3D environment several common interaction methods need to be reconsidered. Starting from any of one, two and three touch points, we present interaction techniques that provide control of all types of 3D rotation coupled with translation (6DOF) on a direct-touch tabletop display. The different techniques exemplify a wide range of interaction possibilities: from the one-touch technique, which is designed to be simple and natural, but inherits a degree of imprecision from its simplicity; through to three-touch interaction, which allows precise bimanual simultaneous control of multiple degrees of freedom, but at the cost of simplicity. To understand how these techniques support interaction in shallow-depth 3D, we present a user study that examines the efficiency of, and preferences for, the techniques developed. Results show that users are fastest and most accurate when using the three-touch technique and that their preferences were also strongly in favour of the expressive power available from three-touch.

    @INPROCEEDINGS{hancock:2007:shallow-depth,
    author = {Mark Hancock and Sheelagh Carpendale and Andy Cockburn},
    title = {Shallow-Depth 3D Interaction: Design and Evaluation of One-, Two-
    and Three-Touch Techniques},
    booktitle = {Proc. CHI 2007},
    year = {2007},
    pages = {1147--1156},
    address = {New York, NY, USA},
    publisher = {ACM Press},
    note = {(Nominated for Best Paper Award)},
    abstract = {On traditional tables, people frequently use the third dimension to
    pile, sort and store objects. However, while effective and informative
    for organization, this use of the third dimension does not usually
    extend far above the table. To enrich interaction with digital tables,
    we present the concept of shallow-depth 3D -- 3D interaction with
    limited depth. Within this shallow-depth 3D environment several common
    interaction methods need to be reconsidered. Starting from any of
    one, two and three touch points, we present interaction techniques
    that provide control of all types of 3D rotation coupled with translation
    (6DOF) on a direct-touch tabletop display. The different techniques
    exemplify a wide range of interaction possibilities: from the one-touch
    technique, which is designed to be simple and natural, but inherits
    a degree of imprecision from its simplicity; through to three-touch
    interaction, which allows precise bimanual simultaneous control of
    multiple degrees of freedom, but at the cost of simplicity. To understand
    how these techniques support interaction in shallow-depth 3D, we
    present a user study that examines the efficiency of, and preferences
    for, the techniques developed. Results show that users are fastest
    and most accurate when using the three-touch technique and that their
    preferences were also strongly in favour of the expressive power
    available from three-touch.},
    doi = {http://doi.acm.org/10.1145/1240624.1240798},
    youtube = {bjzZY0x9DTM},
    isbn = {978-1-59593-593-9},
    location = {San Jose, California, USA},
    movie = {threed.mov},
    pdf = {paper364-hancock.pdf},
    subtype = {conference}
    }

2004

  • M. Hancock, Handedness issues for pen input, Advanced Systems Institute Poster , 2004.
    @MISC{hancock:2004:handedness,
    author = {Mark Hancock},
    title = {Handedness issues for pen input},
    howpublished = {Advanced Systems Institute Poster},
    year = {2004},
    pdf = {hancock-asi-2004.pdf},
    subtype = {poster}
    }

  • M. Hancock, Improving Menu Placement Strategies for Pen Input, , The University of British Columbia, MSc Thesis, 2004.

    Pen-based interaction is becoming a commonplace two degree-of-freedom alternative to the mouse. The use of pen input allows users to acquire targets directly on a computer display. This style of interaction introduces a unique form factor and a new set of considerations in the design of applications for such devices. This thesis presents a series of experiments designed to evaluate the use of pen-input devices on a variety of display setups. In particular, user performance is investigated in terms of menu selections in circular and rectangular pop-up menus using stylus-driven direct input on horizontal and vertical display surfaces. These studies help to clarify effects of hand posture and hand preference. The results of these studies show that both left-handed and right-handed users demonstrate a consistent, but mirrored pattern of selection times that is corroborated by qualitative measures of user preference. This pattern is different for both vertical and horizontal displays due to a change in hand posture. Implementation details are provided for an automatic menu placement strategy for a tabletop display. Details are presented on how to detect which hand is being used to hold the device and on how to apply the results of the study to display rectangular pop-up menus in a co-located collaborative environment.

    @MASTERSTHESIS{hancock:2004:improving,
    author = {Mark Hancock},
    title = {Improving Menu Placement Strategies for Pen Input},
    school = {The University of British Columbia},
    note = {MSc Thesis},
    year = {2004},
    abstract = {Pen-based interaction is becoming a commonplace two degree-of-freedom
    alternative to the mouse. The use of pen input allows users to acquire
    targets directly on a computer display. This style of interaction
    introduces a unique form factor and a new set of considerations in
    the design of applications for such devices. This thesis presents
    a series of experiments designed to evaluate the use of pen-input
    devices on a variety of display setups. In particular, user performance
    is investigated in terms of menu selections in circular and rectangular
    pop-up menus using stylus-driven direct input on horizontal and vertical
    display surfaces. These studies help to clarify effects of hand posture
    and hand preference. The results of these studies show that both
    left-handed and right-handed users demonstrate a consistent, but
    mirrored pattern of selection times that is corroborated by qualitative
    measures of user preference. This pattern is different for both vertical
    and horizontal displays due to a change in hand posture. Implementation
    details are provided for an automatic menu placement strategy for
    a tabletop display. Details are presented on how to detect which
    hand is being used to hold the device and on how to apply the results
    of the study to display rectangular pop-up menus in a co-located
    collaborative environment.},
    pdf = {thesis.pdf}
    }

2003

  • M. Hancock and C. Swindells, User Profiling at an Interactive Tabletop Display, Advanced Systems Institute Poster , 2003.
    @MISC{hancock:2003:user,
    author = {Mark Hancock and Colin Swindells},
    title = {User Profiling at an Interactive Tabletop Display},
    howpublished = {Advanced Systems Institute Poster},
    year = {2003},
    pdf = {hancock-asi-poster.pdf},
    subtype = {poster}
    }

  • M. S. Hancock, A Bayesian Network Model of a Collaborative Interactive Tabletop Display, University of British Columbia, Technical Report TR-2003-18, , 2003.

    In this paper, I explore the use of Bayesian Networks to model the use of an interactive tabletop display in a collaborative environment. Specifically, this model is intended to extract user-profile information for each user including their location at the table as well as their handedness. The network uses input from a six-degrees-of-freedom stylus device as its source of observable information. This paper introduces a first attempt at a model to support these requirements as well as a preliminary evaluation of the model. Results show that the model is sufficiently accurate to obtain a user profile in real time in a Tabletop Display environment.

    @TECHREPORT{hancock:2003:bayesian,
    author = {Mark S. Hancock},
    title = {A Bayesian Network Model of a Collaborative Interactive Tabletop
    Display},
    institution = {University of British Columbia},
    year = {2003},
    type = {Technical Report},
    number = {TR-2003-18},
    abstract = {In this paper, I explore the use of Bayesian Networks to model the
    use of an interactive tabletop display in a collaborative environment.
    Specifically, this model is intended to extract user-profile information
    for each user including their location at the table as well as their
    handedness. The network uses input from a six-degrees-of-freedom
    stylus device as its source of observable information. This paper
    introduces a first attempt at a model to support these requirements
    as well as a preliminary evaluation of the model. Results show that
    the model is sufficiently accurate to obtain a user profile in real
    time in a Tabletop Display environment.},
    pdf = {hancock-ubc-techreport2003.pdf}
    }