@inproceedings{a87e1735f1d6401ebbcea27249444573,
title = "Protecting the Visual Fidelity of Machine Learning Datasets Using QR Codes",
abstract = "Machine learning is becoming increasingly popular in a variety of modern technology. However, research has demonstrated that machine learning models are vulnerable to adversarial examples in their inputs. Potential attacks include poisoning datasets by perturbing input samples to mislead a machine learning model into producing undesirable results. Such perturbations are often subtle and imperceptible from a human{\textquoteright}s perspective. This paper investigates two methods of verifying the visual fidelity of image based datasets by detecting perturbations made to the data using QR codes. In the first method, a verification string is stored for each image in a dataset. These verification strings can be used to determine whether an image in the dataset has been perturbed. In the second method, only a single verification string stored and is used to verify whether an entire dataset is intact.",
keywords = "Adversarial machine learning, Cyber security, QR code, Visual fidelity, Watermarking",
author = "Chow, {Yang Wai} and Willy Susilo and Jianfeng Wang and Richard Buckland and Joonsang Baek and Jongkil Kim and Nan Li",
note = "Publisher Copyright: {\textcopyright} 2019, Springer Nature Switzerland AG.; null ; Conference date: 19-09-2019 Through 21-09-2019",
year = "2019",
doi = "10.1007/978-3-030-30619-9_23",
language = "English",
isbn = "9783030306182",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "320--335",
editor = "Xiaofeng Chen and Xinyi Huang and Jun Zhang",
booktitle = "Machine Learning for Cyber Security - 2nd International Conference, ML4CS 2019, Proceedings",
}