@COMMENT This file was generated by bib2html.pl version 0.94 @COMMENT written by Patrick Riley @COMMENT This file came from Gal A. Kaminka's publication pages at @COMMENT http://www.cs.biu.ac.il/~galk/publications/ @techreport{visionswarmtech23, author = {David L. Krongauz and Amir Ayali and Gal A. Kaminka}, title = {Vision-Based Collective Motion: A Locust-Inspired Reductionist Model}, year = {2023}, number = {2023.01.17.524210}, institution = {bioRxiv}, doi = {https://doi.org/10.1101/2023.01.17.524210}, wwwnote = {}, abstract = { Collective motion is an omnipresent, fascinating phenomenon. Swarming individuals aggregate, coordinate and move, utilizing only local social cues projected by conspecifics in their vicinity. Major theoretical studies assumed perfect information availability, where agents rely on complete and exact knowledge of inter-agent distances and velocities. However, the sensory modalities that are responsible for the acquisition of environmental information were often ignored. Vision plays a central role in animal perception, and in many cases of collective motion, it is the sole source of social information. Here we investigate a vision-based collective motion model inspired by locust marching bands. We address two major challenges: the evaluation of distance and velocity and visual occlusions. We compare three strategies an agent can use to interpret partially occluded visual information. These differ in the visual cognition capabilities of the agent and the respective computational requirements. In silico experiments conducted in various geometrical conditions show that the three approaches display different rates of convergence to an ordered state: The least computationally-demanding approach, in which no peer recognition takes place, shows slower convergence in geometrically-constrained environments. Our findings provide insights into the visual processing requirements from biological as well as artificial swarming agents in complex settings. }, }