@article{Wen:2025aa,
 abstract = {Current discussion surrounding the clinical capabilities of generative language models(GLMs) predominantly centers around multiple-choice question-answer(MCQA) benchmarks derived from clinical licensing examinations. While accepted for human examinees, characteristics unique to GLMs bring into question the validity of such benchmarks. Here, we validate five benchmarks using eight GLMs, ablating for parameter size and reasoning capabilities, validating via prompt permutation three key assumptions that underpin the generalizability of MCQA-based assessments: that knowledge is applied, not memorized, that semantic consistency will lead to consistent answers, and that situations with no answers can be recognized. While large models are more resilient to our perturbations compared to small models, we globally invalidate these assumptions, with implications for reasoning models. Additionally, despite retaining the knowledge, small models are prone to memorization. All models exhibit significant failure in null-answer scenarios. We then suggest several adaptations for more robust benchmark designs, more reflective of real-world conditions.},
 author = {Wen, Andrew and Lu, Qiuhao and Chuang, Yu-Neng and Wang, Guanchu and Yuan, Jiayi and Zhang, Jiamu and Wang, Liwei and Fu, Sunyang and Miller, Kurt D. and Jia, Heling and Bedrick, Steven D. and Hersh, William R. and Roberts, Kirk E. and Hu, Xia and Liu, Hongfang},
 bdsk-url-1 = {https://doi.org/10.1038/s41746-025-02253-2},
 date = {2025/12/27},
 date-added = {2026-01-13 09:05:53 -0800},
 date-modified = {2026-01-13 09:05:53 -0800},
 doi = {10.1038/s41746-025-02253-2},
 id = {Wen2025},
 isbn = {2398-6352},
 journal = {npj Digital Medicine},
 title = {Context matching is not reasoning when performing generalized clinical evaluation of generative language models},
 url = {https://doi.org/10.1038/s41746-025-02253-2},
 year = {2025}
}
