@inproceedings{ee9f9960e3774bb598944a0585e3c4a6,
title = "Continual Learning with Differential Privacy",
abstract = "In this paper, we focus on preserving differential privacy (DP) in continual learning (CL), in which we train ML models to learn a sequence of new tasks while memorizing previous tasks. We first introduce a notion of continual adjacent databases to bound the sensitivity of any data record participating in the training process of CL. Based upon that, we develop a new DP-preserving algorithm for CL with a data sampling strategy to quantify the privacy risk of training data in the well-known Averaged Gradient Episodic Memory (A-GEM) approach by applying a moments accountant. Our algorithm provides formal guarantees of privacy for data records across tasks in CL. Preliminary theoretical analysis and evaluations show that our mechanism tightens the privacy loss while maintaining a promising model utility.",
keywords = "Continual learning, Deep learning, Differential privacy",
author = "Pradnya Desai and Phung Lai and Phan, {Nhat Hai} and Thai, {My T.}",
note = "Publisher Copyright: {\textcopyright} 2021, Springer Nature Switzerland AG.; 28th International Conference on Neural Information Processing, ICONIP 2021 ; Conference date: 08-12-2021 Through 12-12-2021",
year = "2021",
doi = "10.1007/978-3-030-92310-5_39",
language = "American English",
isbn = "9783030923099",
series = "Communications in Computer and Information Science",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "334--343",
editor = "Teddy Mantoro and Minho Lee and Ayu, {Media Anugerah} and Wong, {Kok Wai} and Hidayanto, {Achmad Nizar}",
booktitle = "Neural Information Processing - 28th International Conference, ICONIP 2021, Proceedings",
address = "Germany",
}