@COMMENT This file was generated by bib2html.pl version 0.94 @COMMENT written by Patrick Riley @COMMENT This file came from Gal A. Kaminka's publication pages at @COMMENT http://www.cs.biu.ac.il/~galk/publications/ @InProceedings{haidm15, author = {Mor Vered and Gal A. Kaminka}, title = {If you can draw it, you can recognize it: Mirroring for Sketch Recognition}, booktitle = {Proceedings of the AAMAS Workshop on Human-Agent Interaction Design and Models}, OPTcrossref = {}, OPTkey = {}, OPTpages = {}, year = {2015}, OPTeditor = {}, OPTvolume = {}, OPTnumber = {}, OPTseries = {}, OPTaddress = {}, OPTmonth = {}, OPTorganization = {}, OPTpublisher = {}, OPTnote = {}, OPTannote = {}, OPTurl = {}, OPTurldate = {}, OPTlastchecked = {}, OPTdoi = {}, OPTisbn = {}, OPTissn = {}, OPTlocalfile = {}, abstract = {Humans use sketches drawn on paper, on a computer, or via hand gestures in the air as part of their communications. To recognize shapes in sketches, most existing work focuses on offline (post-drawing) recognition methods, trained on large sets of examples which serve as a plan library for the recognition method. These methods do not allow on-line recognition, and require a very large library (or expensive pre-processing) in order to recognize shapes that have been translated, rotated or scaled. Inspired by mirroring processes in human brains we present an online shape recognizer that identifies multi-stroke geometric shapes without a plan library. Instead, the recognizer uses a shape-drawing planner for drawn-shape recognition, i.e., a form of plan recognition by planning. This method (1) allows recognition of shapes that is immune to geometric translations, rotations, and scale; (2) eliminates the need for storing a library of shapes to be matched against drawings (instead, only needs a set of possible Goals and a planner that can instantiate them in any manner); and (3) allows fast on-line recognition. The method is particularly suited to complete agents, that must not only recognize sketches, but also produce them, and therefore necessarily have a drawing planner already. We compare the performance of different variants of the recognizer to that of humans, and show that its recognition level is close to that of humans, while making less recognition errors early in the recognition process. }, wwwnote = {}, OPTkeywords = {}, }