@article{Anibal:2026aa,
 abstract = {Accessibility and cost remain barriers to the adoption of healthcare technology and will determine the impact of breakthroughs like generative AI. However, despite recent advancements in these areas, AI models may still contain biases and be prone to misuse by governments or other power structures with an interest in influencing public opinion. This report examines the potential effects of these ``pro-state''biases on the delivery of healthcare. DeepSeek is used as a case study to illustrate the healthcare risks that may arise from unknown or biased post-training methods and other forms of AI knowledge editing.},
 author = {Anibal, James and Bedrick, Steven and Nguyen, Hang and Gunkel, Jasmine and Huth, Hannah and Le, Tram and Salvi Cruz, Samantha and Hazen, Lindsey and Wood, Bradford J.},
 bdsk-url-1 = {https://doi.org/10.1007/s43681-025-00842-1},
 date = {2026/01/08},
 date-added = {2026-01-13 09:06:28 -0800},
 date-modified = {2026-01-13 09:06:28 -0800},
 doi = {10.1007/s43681-025-00842-1},
 id = {Anibal2026},
 isbn = {2730-5961},
 journal = {AI and Ethics},
 number = {1},
 pages = {94},
 title = {DeepSeek for healthcare: do no harm?},
 url = {https://doi.org/10.1007/s43681-025-00842-1},
 volume = {6},
 year = {2026}
}
