@article{Zhao_Villamil_Xia_2018, title={Learning Mixtures of Random Utility Models}, volume={32}, url={https://ojs.aaai.org/index.php/AAAI/article/view/11727}, abstractNote={ <p> We tackle the problem of identifiability and efficient learning of mixtures of Random Utility Models (RUMs). We show that when the PDFs of utility distributions are symmetric, the mixture of k RUMs (denoted by k-RUM) is not identifiable when the number of alternatives m is no more than 2k-1. On the other hand, when m ≥ max{4k-2,6}, any k-RUM is generically identifiable. We then propose three algorithms for learning mixtures of RUMs: an EM-based algorithm, which we call E-GMM, a direct generalized-method-of-moments (GMM) algorithm, and a sandwich (GMM-E-GMM) algorithm that combines the other two. Experiments on synthetic data show that the sandwich algorithm achieves the highest statistical efficiency and GMM is the most computationally efficient. Experiments on real-world data at Preflib show that Gaussian k-RUMs provide better fitness than a single Gaussian RUM, the Plackett-Luce model, and mixtures of Plackett-Luce models w.r.t. commonly-used model fitness criteria. To the best of our knowledge, this is the first work on learning mixtures of general RUMs. </p> }, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Zhibing and Villamil, Tristan and Xia, Lirong}, year={2018}, month={Apr.} }