@COMMENT This file was generated by bib2html.pl version 0.94 @COMMENT written by Patrick Riley @COMMENT This file came from Gal A. Kaminka's publication pages at @COMMENT http://www.cs.biu.ac.il/~galk/publications/ @inproceedings{ala26ws-erel, title = {A Harmonic-Mean Formulation of Average-Reward Reinforcement Learning in {SMDP}s }, author = { Erel Shtossel and Alicia Vidler and Uri Shaham and Gal A. Kaminka }, booktitle = {Proceedings of the {AAMAS} Workshop on Adaptive and Learning Agents ({ALA})}, year = {2026}, abstract = { Recent research has revived and amplified interest in algorithms for undiscounted average-reward reinforcement learning in infinite-horizon, non-episodic (continuing) tasks. Semi-Markov decision processes (SMDPs) are of particular interest. In SMDPs, discrete actions stochastically generate both rewards and durations, and the objective is to optimize the average reward rate. Existing algorithms approach this by optimizing the ratio of rewards to durations. However, when rewards and durations are non-stationary (in the infinite horizon), this can be incorrect. This paper presents a novel modified harmonic mean operator that correctly computes reward rates even under such conditions. This yields model-free learning algorithms that can work with SMDPs, while maintaining robustness to non-stationary reward and duration distributions over time. We prove theoretical properties of the modified harmonic mean operator, and empirically demonstrate its efficacy in comparison to existing algorithms. }, }