{"version.version":"0.0.7","version.cm_url":"https://github.com/ceurws/ceur-spt","spt.html_url":"/Vol-4154/paper1.html","spt.description":null,"spt.id":"Vol-4154/paper1","spt.wikidataid":null,"spt.title":"Alignment and Adversarial Robustness: Are More Human-Like Models More\n                        Secure?","spt.pdfUrl":"https://ceur-ws.org/Vol-4154/paper1.pdf","spt.volume":{"number":4154,"acronym":"SPAIML 2025","wikidataid":null,"title":"Proceedings of the 1st International Workshop on Security and Privacy-Preserving AI/ML","description":null,"url":null,"date":"2026-01-28","dblp":null,"k10plus":null,"urn":null},"spt.session":null,"cvb.id":"Vol-4154/paper1","cvb.title":"Alignment and Adversarial Robustness: Are More Human-Like Models More\n                        Secure?","cvb.type":null,"cvb.position":null,"cvb.pagesFrom":null,"cvb.pagesTo":null,"cvb.authors":"Blaine Hoak,Kunyang Li,Patrick McDaniel","cvb.vol_number":"4154","cvb.pdf_name":"paper1.pdf","cvb.pages":"1-10","cvb.fail":null}