<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">J Particip Med</journal-id><journal-id journal-id-type="publisher-id">jopm</journal-id><journal-id journal-id-type="index">28</journal-id><journal-title>Journal of Participatory Medicine</journal-title><abbrev-journal-title>J Particip Med</abbrev-journal-title><issn pub-type="epub">2152-7202</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v17i1e68146</article-id><article-id pub-id-type="doi">10.2196/68146</article-id><article-categories><subj-group subj-group-type="heading"><subject>Viewpoint</subject></subj-group></article-categories><title-group><article-title>Generative AI as Third Agent: Large Language Models and the Transformation of the Clinician-Patient Relationship</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Campos</surname><given-names>Hugo de O</given-names></name><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Wolfe</surname><given-names>Daniel</given-names></name><degrees>MPH, MPhil</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Luan</surname><given-names>Hongzhou</given-names></name><degrees>MEng</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Sim</surname><given-names>Ida</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Strategic Advisory Board, Computational Precision Health, University of California, Berkeley and University of California, San Francisco</institution><addr-line>2195 Hearst Ave, Suite 120</addr-line><addr-line>Oakland</addr-line><addr-line>CA</addr-line><country>United States</country></aff><aff id="aff2"><institution>Computational Precision Health, University of California, Berkeley and University of California, San Francisco</institution><addr-line>Berkeley</addr-line><addr-line>CA</addr-line><country>United States</country></aff><aff id="aff3"><institution>Department of Medicine, University of California, San Francisco</institution><addr-line>San Francisco</addr-line><addr-line>CA</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Woods</surname><given-names>Susan</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Hudson</surname><given-names>Matthew</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Greene</surname><given-names>Sarah</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Adams</surname><given-names>Laura</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Cordovano</surname><given-names>Grace</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Hamdan</surname><given-names>Achmad</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>McCollister</surname><given-names>Anna</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Murray</surname><given-names>Ian</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Arasteh</surname><given-names>Soroosh Tayebi</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to  Hugo de O Campos, Strategic Advisory Board, Computational Precision Health, University of California, Berkeley and University of California, San Francisco, 2195 Hearst Ave, Suite 120, Oakland, CA, 94720, United States; <email>hcampos@researchpartner.org</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>11</day><month>8</month><year>2025</year></pub-date><volume>17</volume><elocation-id>e68146</elocation-id><history><date date-type="received"><day>29</day><month>10</month><year>2024</year></date><date date-type="rev-recd"><day>24</day><month>06</month><year>2025</year></date><date date-type="accepted"><day>30</day><month>06</month><year>2025</year></date></history><copyright-statement>&#x00A9; Hugo de O Campos, Daniel Wolfe, Hongzhou Luan, Ida Sim. Originally published in Journal of Participatory Medicine (<ext-link ext-link-type="uri" xlink:href="https://jopm.jmir.org">https://jopm.jmir.org</ext-link>), 11.8.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in Journal of Participatory Medicine, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://jopm.jmir.org">https://jopm.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://jopm.jmir.org/2025/1/e68146"/><abstract><p>The use of artificial intelligence (AI) in health care has significant implications for patient-clinician interactions. Practical and ethical challenges have emerged with the adoption of large language models (LLMs) that respond to prompts from clinicians, patients, and caregivers. With an emphasis on patient experience, this paper examines the potential of LLMs to act as facilitators, interrupters, or both in patient-clinician relationships. Drawing on our experiences as patient advocates, computer scientists, and physician informaticists working to improve data exchange and patient experience, we examine how LLMs might enhance patient engagement, support triage, and inform clinical decision-making. While affirming LLMs as a tool enabling the rise of the &#x201C;AI patient,&#x201D; we also explore concerns surrounding data privacy, algorithmic bias, moral injury, and the erosion of human connection. To help navigate these tensions, we outline a conceptual framework that anticipates the role and impact of LLMs in patient-clinician dynamics and propose key areas for future inquiry. Realizing the potential of LLMs requires careful consideration of which aspects of the patient-clinician relationship must remain distinctly human and why, even when LLMs offer plausible substitutes. This inquiry should draw on ethics and philosophy, aligned with AI imperatives such as patient-centered design and transparency, and shaped through collaboration between technologists, health care providers, and patient communities.</p></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>large language model</kwd><kwd>generative AI</kwd><kwd>healthcare</kwd><kwd>empowerment</kwd><kwd>patient-clinician relationship</kwd><kwd>patient engagement</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The integration of artificial intelligence (AI) into health care has rapidly transformed various aspects of medical practice, from diagnostics to treatment planning. The emergence of generative AI, particularly large language models (LLMs) that can interact and communicate with humans in a personalized and empathetic way, heralds what some commentators have termed &#x201C;relational AI,&#x201D; [<xref ref-type="bibr" rid="ref1">1</xref>] with LLMs now functioning increasingly like &#x201C;agents in the clinic&#x201D; interposing themselves into patient-clinician interactions [<xref ref-type="bibr" rid="ref2">2</xref>]. It is not yet clear if LLMs will act as facilitators&#x2014;enhancing communication, supporting decision-making, and strengthening the clinician-patient relationship&#x2014;or as interrupters, disrupting natural interactions, creating friction, or undermining trust. This paper explores patient and clinician perspectives on that question and proposes areas for inquiry when researching participatory medicine in an AI-enabled clinical relationship.</p><p>Patients have long sought medical information and support outside of traditional clinical settings, and since the advent of the internet, have often turned to online resources, listservs, and virtual communities [<xref ref-type="bibr" rid="ref3">3</xref>]. The emergence of powerful and easily navigated search engines, followed by Web 2.0&#x2019;s digital and social networking platforms, amplified this trend, offering patients unprecedented access to medical knowledge and peer-to-peer platforms for sharing personal medical information and clinical experiences [<xref ref-type="bibr" rid="ref4">4</xref>]. The rise of internet use for health information and self-diagnosis fueled a movement of e-patients [<xref ref-type="bibr" rid="ref5">5</xref>] even as medical professionals raised concerns regarding the impact of &#x201C;Dr. Google&#x201D; on patient-clinician relations. The ship is sailed: as many as half of Americans seek health care information online for themselves or others, without evidence of negative effects on health outcomes or the patient-clinician relationship [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Legislative and technological developments have given new data and scope to the e-patient movement. More than 1 in 3 Americans now own a wearable or portable device that collects information about steps, sleep, blood pressure, heart rate, or other indicators of health or fitness [<xref ref-type="bibr" rid="ref8">8</xref>]. The 21st Century Cures Act has required health systems to adopt information standards allowing data export and interoperability with other systems and mandated that patients have electronic access to their health information at no cost [<xref ref-type="bibr" rid="ref9">9</xref>]. Advocates from the OpenNotes movement, which promotes trust-building through the sharing of medical records between health care providers and patients, have heralded these developments as key enablers of a shift to democratized, person-centric, and participatory health care [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. If, as the e-patient movement suggests, data is power [<xref ref-type="bibr" rid="ref12">12</xref>], then new types of data&#x2014;and new ways of collecting and accessing it&#x2014;can further empower patients. LLMs that allow patients and providers to organize health information, draw insights from it, and increasingly engage in iterative &#x201C;prompting&#x201D; with personal health data add a new layer to this already intricate landscape.</p><p>Building on the framing of LLMs as a &#x201C;relational&#x201D; technology, this paper focuses on their impact on clinician-patient relations. LLMs have been simultaneously hailed as a &#x201C;turning point in patient power&#x201D; [<xref ref-type="bibr" rid="ref13">13</xref>] and as an unreliable, inconsistent, and unaccountable tool that is dangerous for medical use [<xref ref-type="bibr" rid="ref14">14</xref>]. The degree to which LLM use will be acknowledged during a clinical encounter, and whether or not LLMs are a facilitator of or impediment to therapeutic alliance, are open questions. Writing from our perspectives as patient advocates, computer scientists, and physician-informaticists, we explore various potential roles for LLMs in the clinical exchange. We propose areas of research aimed at better understanding current attitudes toward and uses of LLMs, while moving us toward a collaborative use that could enrich therapeutic alliance and health outcomes.</p></sec><sec id="s2"><title>Patient and Personal Care Team: The Rise of the AI Patient</title><p>Even before LLMs, patients regarded AI use in health care with ambivalence. Six in ten said they would be uncomfortable with their provider relying on AI in providing their care. A nearly equal number thought that using AI to diagnose disease or recommend treatment would make relations with clinicians worse. At the same time, a majority believed racial bias would be decreased if AI were used more to do things such as diagnose disease and recommend treatments for patients [<xref ref-type="bibr" rid="ref15">15</xref>].</p><p>LLMs, and the associated ability of patients to move from passive objects of AI to more active users, may change the calculus. We join those patient advocates who see LLMs as a new and important tool in health care and self-care, particularly in addressing the gaps in access and communication that often plague the current health care system. Chat interfaces like OpenAI&#x2019;s ChatGPT have opened options previously unavailable to the e-patient, lowering the technology and language literacy barrier, allowing patients to ask questions in multiple languages, generating responses tailored to different audiences and responsive to requests for clarification or further elucidation. By synthesizing complex, highly technical health-related literature or multiple examination results into understandable summaries for a range of educational levels, LLMs have become valuable tools for participatory medicine. For patients and their families, these technologies facilitate a fundamental and empowering shift in the flow of information, moving from patients to doctors rather than the other way around [<xref ref-type="bibr" rid="ref13">13</xref>]. If electronic medical records and the internet enabled the rise of the e-patient, LLMs are now driving the rise of the AI patient [<xref ref-type="bibr" rid="ref16">16</xref>].</p><p>As a caretaker for his elderly father and a patient with a genetic heart condition, one of us (HdOC) relies on LLMs for various tasks, including preparing for appointments, organizing health information, weighing the pros and cons of different medical interventions, and summarizing medical notes for family members not fluent in English. HdOC&#x2019;s recent leveraging of LLMs to navigate his father&#x2019;s complex medical needs demonstrates the technology&#x2019;s potential to empower informed patients and bridge information gaps (<xref ref-type="other" rid="box1">Textbox 1</xref>). More generally, LLMs are already being used by patients and by the families and caregivers integral to their care to simplify and improve understanding of informed consent forms, to parse complicated communications from insurance companies or a medical note, to better understand laboratory notes, and to translate any and all of the above into different languages [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>].</p><p>While all patients have the potential to increase their sense of agency and engagement in their health care, not all have the technological literacy to use LLMs to advance participatory medicine. AI may thus exacerbate existing health disparities and create a digital divide between those with internet access and those without, and between those who have or who lack the skills and resources to use AI tools effectively. Former Google CEO Eric Schmidt, speaking to Stanford students at a 2024 forum on AI&#x2019;s likely impact on global development, offered a prognosis relevant here: &#x201C;the rich get richer, and the poor do the best they can&#x201D; [<xref ref-type="bibr" rid="ref19">19</xref>]. For example, the size of the &#x201C;context window&#x201D; that determines the amount of information that an LLM can take in is expected shortly to grow to more than a million tokens, or the equivalent of 750,000 words. This massive expansion, a 45-fold increase from earlier models such as GPT 3.5, will mean that those who can pay for premium LLM services will receive more personalized and contextualized answers. Those relegated to smaller-capacity, free LLM services will not. Even use of freely available tools will require understanding of multiple dimensions: English proficiency, medical literacy, numeracy, and technical and critical thinking skills to help them make informed decisions in an increasingly AI-mediated health care landscape [<xref ref-type="bibr" rid="ref20">20</xref>].</p><boxed-text id="box1"><title>  Case example.</title><p>One of us (HdOC) faced a challenging situation when his older adult father developed a severe pruritic rash, and the earliest available dermatologist appointment was months away. Drawing from his experience using large language models (LLMs) for his own health care, HdOC turned to various publicly available chatbots, including Anthropic&#x2019;s Claude 3 Opus, Perplexity AI&#x2019;s Perplexity, and OpenAI&#x2019;s GPT-4o, for counsel on his father&#x2019;s condition. He meticulously collected his father&#x2019;s medical records by downloading clinical notes from past clinical encounters and accessing blood test results (with permission) through his father&#x2019;s electronic patient record portal. Armed with these records, including laboratory results, recent clinical notes, and photos of the rash, HdOC then used multiple LLMs to analyze these inputs. The models provided differential diagnoses, recommended actions, and identified a potential link between the rash and his father&#x2019;s underlying chronic kidney disease.</p><p>HdOC then developed a multipronged treatment plan based on LLM recommendations. The plan included strict dietary adjustments to manage kidney function, reducing shower frequency and temperature to prevent skin dryness, aggressive moisturizing with fragrance-free products, and the application of topical corticosteroids to control itching and inflammation. HdOC also used the LLMs to translate this information into Portuguese, ensuring that HdOC&#x2019;s father could understand the proposed treatment plan and participate in the decision-making process.</p><p>By comparing outputs from different LLMs and validating AI-suggested interventions through online searches and email correspondence with medical professionals, HdOC implemented a care plan that significantly improved the rash within 10 days. By the time the appointment with the dermatologist arrived, the rash had mostly cleared up. Strategic use of the tools enabled an approach that transformed the traditional patient-clinician dynamic into a more equal partnership, correcting power and information asymmetry, and ultimately leading to better outcomes and enhanced patient satisfaction.</p><p>Prompts and outputs from LLMs are included in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></boxed-text><p>While the use of an LLM as an ally or &#x201C;doctor in your pocket&#x201D; holds great potential, integrating LLM insights into the patient-clinician relationship remains a challenge. Just as the e-patient movement emphasized patient control over &#x201C;our data,&#x201D; AI patients are likely to support LLMs for personal health use but may be wary of their adoption by institutions, health systems, or commercial entities. Comfort about when or if LLMs are used in the diagnostic and care pathway may vary by patient and condition: surveys of patients asked to consider AI use to augment or replace physician input in the years before LLM availability, for example, found significant differences in concerns about privacy and AI-assisted diagnosis among those with chronic or acute conditions, as well as variation in understanding of AI function by age and demographics [<xref ref-type="bibr" rid="ref21">21</xref>]. Patients with rare diseases and their family members today, for example, are significantly more likely to use LLMs for health assistance than other patients [<xref ref-type="bibr" rid="ref22">22</xref>]&#x2014;we do yet know how this impacts their interactions with clinicians, or if it will positively impact their care. Diagnostic errors with general purpose LLMs such as GPT (OpenAI), Llama (Meta AI), or Gemini (Google LLC) are a particular area of concern&#x2014;multiple studies show that while these models can answer examination questions or analyze clinical vignettes correctly, they often produce diagnostic conclusions or responses at variance with clinician recommendations when confronted with real-life, &#x201C;noisy&#x201D; medical data, and reproduce racial or gender biases and stereotypes adversely impacting diagnosis [<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref27">27</xref>].</p><p>Usage transparency&#x2014;knowing when and how LLMs are being used by any part of the health care system&#x2014;is also likely to be key to patient trust. Patients discern whether medical professionals or health systems deem them worthy of enough respect to disclose when AI has been deployed in their care and to inform them of potential limitations [<xref ref-type="bibr" rid="ref28">28</xref>]. A patient at a recent advocacy forum shared her experience with a clinic representative named Jennifer, with whom she had been messaging about medication refills and scheduling an appointment. Jennifer was helpful, kind, and friendly, even engaging in casual conversation about personal topics. But when the patient arrived for her appointment and asked to say hello to Jennifer, she was surprised to learn that Jennifer was a chatbot&#x2014;not a real person (personal communication, 2024). Many patients express discomfort when an LLM is used to replace a genuine human connection. But is the discomfort arising from being misled, or does it stem from deeper existential concerns about forming relationships with a nonhuman entity? Can these qualms be overcome? And should they be?</p></sec><sec id="s3"><title>Keyboard Liberation or Loss of Human Connection?</title><sec id="s3-1"><title>Promise and Pitfalls</title><p>For many patients and clinicians, the most immediate use case for LLMs is what Eric Topol famously termed &#x201C;keyboard liberation,&#x201D; [<xref ref-type="bibr" rid="ref29">29</xref>] reducing time spent feeding information into electronic health records (EHRs) and increasing opportunities for interaction. LLM-driven scribing systems, which listen to patient-clinician exchanges and automatically generate large parts of clinical notes and after-visit summaries, are increasingly deployed in well-resourced, AI-capable health care systems [<xref ref-type="bibr" rid="ref30">30</xref>]. Early reports are that both patients and clinicians feel more connected when the clinician can shift attention from the keyboard [<xref ref-type="bibr" rid="ref31">31</xref>]. By taking over rote administrative tasks and allowing clinicians to shift their focus from screens back to patients, the best-case scenario is that LLMs will free up clinicians to practice &#x201C;at the top of their license,&#x201D; reducing clinician burnout and improving patient experience [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>].</p><p>The same advances that promise liberation, however, may also bring unanticipated and undesired shifts in roles. For nearly a decade, analysts have debated whether various physician roles&#x2014;from radiologists to primary care providers&#x2014;will be needed at all in an AI-enabled future, or whether replacement with AI-enabled avatars could reduce the burden and increase health care delivery [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Current LLMs remain vulnerable to hallucinations, errors of fact or reasoning that make the elimination of a human in the loop inadvisable. Performance can be improved through human correction (eg, reinforcement learning through human feedback), and through the fine-tuning of smaller, more health care&#x2013;focused models by supplementing their built-in knowledge with a connection to external medical databases and peer-reviewed literature. Whether this method&#x2014;known as retrieval-augmented generation&#x2014;will improve LLMs to allow for unsupervised diagnosis remains uncertain.</p><p>The risk that LLMs will eclipse humans in the clinical encounter is a concern even when clinicians are present. Analysts have warned that as health systems increase their use of LLMs, human clinical skills may degrade over time, particularly as LLMs ingest AI-generated data for training, creating a self-referential and increasingly machine-driven learning loop [<xref ref-type="bibr" rid="ref14">14</xref>]. Automation bias&#x2014;the belief that the machine-generated insights are more authoritative than they actually are&#x2014;is another concern raised by those analyzing the potential impacts of physician use of LLM-generated notes in the EHR [<xref ref-type="bibr" rid="ref36">36</xref>]. The same bias may apply to patients using LLMs to organize and analyze medical information.</p><p>Finally, LLM recommendations may add moral injury to the clinical encounter. Managed care has for some time required physicians to play a dual and conflicted role in the health system, tasked both with protecting patient well-being and achieving cost containment or other health system priorities [<xref ref-type="bibr" rid="ref37">37</xref>]. It is easy to imagine LLMs trained by payors or health systems mandating that clinicians adhere to algorithmically determined actions even when these conflict with their clinical judgment on what is best for patient health.</p></sec><sec id="s3-2"><title>Strengthening the Human in the Clinical Exchange</title><p>LLM use has sharpened longstanding questions about which qualities in care are considered essentially human, and how these impact the patient-clinician relationship. On the one hand, LLMs have highlighted  the patient view that human clinicians might benefit from their own &#x201C;fine-tuning&#x201D;: in a study comparing physician and AI chatbot responses to questions on a public forum, patients rated the AI responses as more empathetic [<xref ref-type="bibr" rid="ref38">38</xref>]. However, critics argue that such expressions amount to &#x201C;artificial empathy&#x201D;&#x2014;a superficial simulation rather than a genuine recognition of patient worry or suffering [<xref ref-type="bibr" rid="ref39">39</xref>]. Just as selecting the correct answer on a multiple-choice medical examination cannot replace a seasoned clinician&#x2019;s intuition or ability to recognize subtle patterns [<xref ref-type="bibr" rid="ref40">40</xref>], an empathetic-sounding reply does not equal the deeper understanding of a patient&#x2019;s distress&#x2014;or sensitivity to the moral and cultural values that shape appropriate response&#x2014;that defines authentic human empathy in care.</p><p>What do patients value? Busch et al [<xref ref-type="bibr" rid="ref41">41</xref>] conducted a meta-analysis of studies examining what patients and caregivers regarded as central to humanistic exchange in clinical encounters. They found that a majority highlighted 6 elements. Each raises questions about whether LLMs, regardless of their command of medical facts, will be able to reproduce these elements or fall short (<xref ref-type="other" rid="box2">Textbox 2</xref>).</p><boxed-text id="box2"><title>  Key elements of humanistic care: the patient&#x2019;s view.</title><p>While competence in diagnosis and treatment is a key concern, many other factors also determine what patients and caregivers value in care. A meta-analysis by Busch et al [<xref ref-type="bibr" rid="ref41">41</xref>] found that a majority identified the 6 elements below as key to humanistic care. Each suggests questions about how LLM use might facilitate or impede them.</p><p><bold>1. Empathy.</bold> This extends beyond the clinical encounter to include genuine, emotionally engaged awareness of patient or caregiver experience outside the clinic, and clinician openness in learning more about the complexity of the patient&#x2019;s point of view [<xref ref-type="bibr" rid="ref42">42</xref>].</p><p><bold>2. Respect for patients&#x2019; (and caregivers&#x2019;) dignity, uniqueness, individuality, and humanity.</bold> In addition to respectful care delivery in the clinical exchange, this includes attention to prevention and treatment in the context of the patient&#x2019;s life course, and a focus on individuals&#x2019; (and caregivers&#x2019;) preferences and values [<xref ref-type="bibr" rid="ref41">41</xref>].</p><p><bold>3. Relationship bonding.</bold> Additionally referred to as therapeutic alliance, this is a shared sense between clinician and patient that affirms the collaborative nature of the relationship, shared emotional bond, and agreement on treatment goals and tasks [<xref ref-type="bibr" rid="ref43">43</xref>].</p><p><bold>4. Respect for patient autonomy and involvement.</bold> This includes creation of an environment where patients (and their caregivers) feel safe expressing their concerns, or disagreeing with or exploring alternatives to clinical recommendations [<xref ref-type="bibr" rid="ref44">44</xref>]</p><p><bold>5. Communication.</bold> In addition to clear verbal communication, this includes nonverbal communication&#x2014;tone of voice, eye contact, and facial expressions, as well as things such as examination room characteristics, touch, interpersonal distance, and clinician clothing, gestures, and posture [<xref ref-type="bibr" rid="ref45">45</xref>].</p><p><bold>6. Patience and commitment.</bold> While these are difficult to define, both include care that allows time and interest in patient engagement during the clinic visit and beyond, without patients feeling rushed or dismissed and with a sense of clinician interest in patient progress over time [<xref ref-type="bibr" rid="ref41">41</xref>].</p></boxed-text></sec></sec><sec id="s4"><title>LLM as Third Agent in the Clinical Encounter</title><sec id="s4-1"><title> Evolution of the Doctor-Patient Relationship</title><p>The doctor-patient relationship has historically been seen as the bedrock of medicine: even now, while the patient may have family caregivers and the clinician may practice within a clinical care team or health system, the direct, one-on-one human connection between patient and clinician remains an ideal. In this model, the doctor is a trusted confidante whose role as a medical and even moral adviser to the patient has moved some analysts to describe the doctor-patient relationship as similar to that of a parent and child [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>].</p><p>Medical historians note that this idealized view of the family doctor was already out of date for much of the 20th century, even as it remained the dominant cultural narrative [<xref ref-type="bibr" rid="ref48">48</xref>]. By the 1990s, the hallmarks of that relationship&#x2014;physicians as carers for the whole family, and with freedom to act as they saw fit to safeguard patient health&#x2014;were largely no longer in place. The growth of managed care, capitation, and other health system changes contributed to this shift. Changes in patient self-concept and advances in digital technology&#x2014;including online medical information platforms, increased patient access to their own electronic medical records, and the overall movement for patient self-advocacy&#x2014;further accelerated the transformation [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref51">51</xref>].</p></sec><sec id="s4-2"><title>New Trilateral Framework</title><p>LLMs now introduce a new, third agent, shaping communication, understanding, and connection between patients and caregivers and their clinicians. We present a framework for describing and analyzing this new interaction, in which patients (and their surrogates) as well as clinicians at all skill levels avail themselves of the power of LLMs to review background information, secure diagnostic or therapeutic assistance, or navigate choices.</p><p><xref ref-type="fig" rid="figure1">Figure 1</xref> illustrates the current state of this new trilateral (3-sided) interaction. Each &#x201C;corner&#x201D; of the triangle represents an actor participating in the exchange of information. Two corners are inhabited by human actors: patients and their caregivers, and clinicians and clinical care teams. The third corner is now inhabited by LLMs, which are used by both clinicians and patients to generate content, analysis, and recommendations. Between these corners are &#x201C;edges&#x201D; representing interactions between humans and machines as well as between human actors.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Trilateral interaction framework&#x2014;LLMs as a third agent in the patient-clinician relationship, and factors mediating exchange. LLM: large language model.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jopm_v17i1e68146_fig01.png"/></fig><p>Accuracy of LLMs and the knowledge required to use them are factors that impact all dynamics, and so are at the center of the diagram. Other elements mediating the nature of the interactions are noted along each edge of the clinical exchange. For clinicians, LLM use may be constrained by the permissions or restrictions imposed by the system in which they work. For patients and caregivers, health literacy&#x2014;including the ability to detect likely hallucinations or assess the reliability of cited sources&#x2014;will impact both the use and usefulness of LLMs. Factors that have shaped clinician-patient relations since well before the LLMs remain relevant and are noted underneath the arrow indicating interaction between physician and patient, including power and information asymmetries, trust, and the quality of communication.</p><p>As LLMs currently play a limited role in generating direct communication between physician and patient, nothing links the LLM directly to the arrow representing doctor-patient exchange. While some health systems are deploying AI-generated &#x201C;smart replies&#x201D; to patients, these are generally only for routine matters such as scheduling of appointments, prescription refills, or the like, and only after clinician review and approval. A recent study of such AI-generated replies has found that clinicians deemed only 20% of drafts usable [<xref ref-type="bibr" rid="ref52">52</xref>]. For their part, some &#x201C;AI patients&#x201D; have also begun to use LLMs to compose or clarify communications to clinicians, or to help raise the possibility of new diagnoses or course of treatment [<xref ref-type="bibr" rid="ref53">53</xref>]. A paramount concern is how these trilateral interactions will impact patients&#x2019; and clinicians&#x2019; sense of their own agency and trust in other humans, and in the overall health care system.</p></sec><sec id="s4-3"><title>LLM as Interrupter?</title><p>LLMs can expand capacity across a range of health care actors, delivering new knowledge, predictive insights, or recommendations to patients and their families, as well as to physicians, nurses, physician assistants, and pharmacists. As noted, although the multilingual translation capabilities of LLM-based chatbots have not been fully evaluated for medical accuracy or tested with non-English prompts, they are likely to improve comprehension in families where patients or caregivers are not native speakers.</p><p>This generative power, however, may diminish or interrupt humanistic exchange. As LLMs improve their capacity to generate communications without human supervision or refinement, it is not difficult to imagine health systems using them as a substitute for, rather than a facilitator of, human-to-human interactions (<xref ref-type="fig" rid="figure2">Figure 2</xref>). In some cases, the AI models&#x2014;trained by health systems&#x2014;may have goals that differ from those of doctors, leading to recommendations that prioritize cost saving over care, or that are insensitive to patients&#x2019; moral or cultural values. Patient use of smartphone photos and LLMs for self-diagnosis of dermatological conditions, or use of ChatGPT to diagnose cause of stomachache or cough without consultation with medical professionals [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref54">54</xref>], represents present-day scenarios where the LLM, functioning as a &#x201C;doctor in your pocket,&#x201D; is less a facilitator of exchange between patient and clinician than its interrupter [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref55">55</xref>].</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>LLM as an interrupter in clinician-patient interaction. LLM: large language model.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jopm_v17i1e68146_fig02.png"/></fig><p>Reductions in human-to-human exchange risks loss of the therapeutic alliance, sense of shared purpose, respect, and connectedness that defines humanistic care. As noted in the anecdote above, where a patient had been interacting with an LLM without knowing it, this framework also suggests the need for new ethics governing trilateral, AI-involved medical exchange.</p><p>The health impact of this blurring of boundaries between human and machine and the diminution of human interaction is not yet clear. LLMs are evolving, and their power to simulate human relations raises the question of whether machine-generated therapeutic alliances might be as &#x201C;good&#x201D; in some practical sense as those created through human interaction. Might generative AI someday reproduce all the qualities in <xref ref-type="other" rid="box2">Textbox 2</xref>? Older adult Japanese patients have experienced decreased loneliness with therapeutic robot pets, such as small mechanical seals [<xref ref-type="bibr" rid="ref56">56</xref>], and children with developmental difficulties have found benefit from robot playmates [<xref ref-type="bibr" rid="ref57">57</xref>]. Generative AI may possess similar or greater powers of comfort. At the same time, as psychiatrist and medical anthropologist Kleinman [<xref ref-type="bibr" rid="ref58">58</xref>] reminds us, caregiving is relational and reciprocal, including both a range of physical acts&#x2014;touch, embrace, lifting, steadying, toileting, and more&#x2014;as well as the way we look at another human being, receive their gaze, experience a quality of voice or physical presence as an expression of solidarity and moral support. For Kleinman and countless patients and families, these essential elements of human care had become mechanized and inauthentic in much of modern health care even before the advent of the LLM. The ineffably human dimensions of care&#x2014;moments of connection, physical presence, deep empathy, and moral solidarity&#x2014;are unlikely to be replicated by even the most sophisticated language models, no matter how well-prompted or finely tuned.</p></sec><sec id="s4-4"><title>Longer Term: Agentic AI as Ally or Facilitator?</title><p>Generative AI is already evolving beyond prompted responses from chatbots to enable what is termed &#x201C;agentic AI&#x201D;&#x2014;systems capable of initiating autonomous action in the virtual and physical world, potentially serving as loyal assistants while preserving human agency. In this scenario, AI can become allies for clinicians, patients, and their respective care teams, facilitating rather than replacing their essential partnership (<xref ref-type="fig" rid="figure3">Figure 3</xref>). Agentic AI is assisting in mediating communication but is not eclipsing human exchange. Clinicians and computer scientists working with them have already begun discussion of agentic &#x201C;AI teammates&#x201D;&#x2014;that is, tools to enhance decision-making capacity, parsing clinical records, initiating routine tasks such as prior authorizations, assessing medication interactions, and recommending treatment regimens or preventive strategies tailored to individual patient needs [<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]. These scenarios, however, have tended to omit attention to patients and their caregivers, for whom agentic AI could similarly serve as a navigator and advocate, organizing clinical records and synthesizing data and medical knowledge, illuminating health determinants and advisable courses of action.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Agentic AI as a facilitator between clinician and patient. While clinicians and patients both use agentic AI, humanistic exchange remains robust. AI: artificial intelligence; LLM: large language model.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="jopm_v17i1e68146_fig03.png"/></fig><p>Agentic AI will represent an evolution beyond current LLM capabilities, joining a range of AI tools to initiate actions autonomously according to tailored parameters. Unlike today&#x2019;s LLMs, which primarily generate text in response to user queries, agentic AI systems will learn from patterns of use, anticipate needs, and act proactively. For example, they might autonomously organize clinical data, flag potential drug interactions, or offer unprompted suggestions or questions for patients and providers to consider in recommending or adjusting treatment regimens, preventive strategies, or diagnoses. AI agents may also autonomously carry out actions such as typing into a computer or clicking on buttons (called &#x201C;computer use&#x201D;) [<xref ref-type="bibr" rid="ref61">61</xref>]&#x2014;for example, submitting a request for a prescription renewal, contesting the denial of an insurance claim, or changing the alert settings for a continuous glucose monitor.</p><p>Agentic AI will also help patients advocate for themselves, recommending strategies to increase patient independence or preferred approaches to address health challenges, while identifying attempts by health care or insurance systems to limit patient choice or impose unwanted treatment pressures. For physicians, AI agents could similarly be tailored or trained to their clinical preferences and style, drawing on lessons from experiences with particular patients and improving their ability to tailor both their communication and approaches to care. In this vision, both patients and clinicians may come to view their AI tools, implicitly or explicitly, as &#x201C;theirs,&#x201D; that is, trained by them to serve their specific interests. The embedding of agentic AI in the clinical relationship could nonetheless align patient and clinician in working toward shared outcomes and improved health.</p></sec></sec><sec id="s5"><title>Generative AI for Participatory Medicine: Areas for Inquiry</title><sec id="s5-1"><title>LLM Use, Function, and Safety in the Clinician-Patient Exchange</title><p>The newness of generative AI as a third agent in the clinician-patient relationship raises multiple questions, as yet largely unanswered, on whether these new models are fit for purpose. How will patients or clinicians use &#x201C;their&#x201D; AI to organize information before, during, or after a visit? How well or safely will the tools perform the tasks required? How will use vary? While answers are likely to change rapidly with field advances, key research directions can include:</p></sec><sec id="s5-2"><title>Human Use of General-Purpose LLM Chatbots In Health and Health Care</title><p>Rapid uptake of these tools, and rapidly growing capacity to ingest images, PDFs, and increasing numbers of words, make understanding current and potential uses by patients and clinicians critical to understanding likely trajectories for off-the-shelf LLMs and those fine-tuned for health care applications. Key questions include patient assessment of intelligibility of LLM responses, including for those with different English language or educational levels, technical literacy, and assessment of accuracy of responses to patient queries, etc. What is the impact of biases or style embedded in particular LLMs, and how might those interact with patient need or preference? As with model training, research on LLMs as third agents will require diverse datasets and participants, including patients of different races, ethnicities, or gender identities, levels of English proficiency and education, health conditions, comfort with technology, and internet access, as well as varying preferences for communication and recommendations in health care.</p></sec><sec id="s5-3"><title>Customization and Optimization of LLMs For Specific Clinical Purposes (eg, Diagnosis and Care Navigation) for Specific Users, Whether Clinicians, Patients, Or Caregivers</title><p> Research should examine both proposed solutions and methods development&#x2014;including but not limited to pretraining of focused foundation models, incorporation of multiple forms of data (images, sensor data, or EHRs), quantification of uncertainty for estimates produced, and methods for fine-tuning and debiasing. Optimization work, with particular attention to retrieval-augmented generation, is already being carried out at a rapid pace in industry and academia, with industry likely to progress more rapidly given its disproportionate access to large-scale compute capacity. This raises collateral but related research questions on tensions or concordance between patient needs and health care market incentives, regulatory requirements in marketing and labelling of LLM applications to health, potential impact of use agreements between particular LLM or EHR vendors, and differentials in use or constraints to use varying by health care system (public vs private), budget, geography, etc.</p></sec><sec id="s5-4"><title>Mediators in Generative AI Use: Health Literacy, Trust and Transparency, Clinician-Patient Power Dynamics and Beyond</title></sec><sec id="s5-5"><title>Research Priorities</title><p>Participatory medicine emphasizes reducing information asymmetries and increasing trust as key to enabling patients and caregivers to become more effective partners in the clinical setting. The factors mediating relations between the agents in the trilateral framework of patient, clinician, and LLM in <xref ref-type="fig" rid="figure1">Figure 1</xref> raise a range of research questions beyond assessment of the function of off-the-shelf or fine-tuned LLMs. Priorities for this research include approaches to health and technical literacy, informed consent and ethical use, transparency and trust, removal of communication barriers and gains in efficiency, clinician constraint and autonomy, and value of the human in care. </p><sec id="s5-5-1"><title>Approaches to Health and Technical Literacy</title><p>How does patient empowerment increase numeracy, critical thinking, or effective use of one or more LLM chatbots? Current versions of off-the-shelf LLMs do not cite sources or rank them by accuracy, leaving patients to distinguish between hallucination and reality, or between more and less authoritative sources of information. What strategies can be used to increase patient comfort with LLM use, comparison between models, and the ability to distinguish between recommendations based on low or high strength of evidence or rigor of sources. Literacy in data import or access is also variable&#x2014;while all patients now have the potential to access medical records from across multiple health systems, knowing how to do that and how to feed results to LLMs will determine whether or not these tools significantly alter information asymmetry.</p></sec><sec id="s5-5-2"><title>Informed Consent and Ethical Use</title><p> For patients, what is understood regarding privacy, informed consent, or the ability to opt out when their health data is used by health systems to train LLMs? When they upload their personal medical data via a chatbot, what do they understand about the uses that can be made of that information? ChatGPT is now used, with apparent success, to simplify informed consent forms for both clinical research and before surgical procedures [<xref ref-type="bibr" rid="ref62">62</xref>]. But how informed are clinicians themselves about the potential use of their practice patterns to train generative AI, or about the training data or testing of LLMs in a clinical context? Guidelines urging AI that is &#x201C;FAVES&#x201D;&#x2014;&#x201D;fair, accurate, verifiable, effective, and safe&#x201D;&#x2014;or calls for centralized laboratories to evaluate health AI safety and effectiveness may be insufficient either to address such questions as the impact of LLMs drift in function over time, or to assess impacts of LLM use on patient or workforce morale at point of care.</p></sec><sec id="s5-5-3"><title>Transparency and Trust</title><p> We cite the instance above of patient disappointment upon discovering that her interlocutor was in fact a chatbot rather than a human provider. What is the impact of disclosure by patients, physicians, and health systems of LLM use, or of not disclosing the use at all? One study of &#x201C;smart replies&#x201D; outside the health domain found that when participants think their communication partner is using AI-generated responses, they perceive them as &#x201C;less cooperative&#x201D; or &#x201C;affiliative.&#x201D; When AI&#x2019;s role in authoring the responses was unknown, those receiving them judged their interlocutors to be more cooperative collaborators [<xref ref-type="bibr" rid="ref54">54</xref>]. Whether or how patients and physicians reveal use of AI assistance, under what circumstances this is judged a positive or negative, and whether perception on the benefits of LLM use varies by patient or physician type, health condition, or health system are all questions of interest. Measurement of trust and partnership needs to begin with the design stage, with inclusion of patients in the cocreation of research methods and aims central to research success [<xref ref-type="bibr" rid="ref51">51</xref>].</p></sec><sec id="s5-5-4"><title>Removal of Communication Barriers and Gains in Efficiency</title><p> With physician shortages projected to reach 86,000 in the United States within the next decade [<xref ref-type="bibr" rid="ref63">63</xref>], how will LLM use allow existing clinicians to do more, or reduce the need for exchange with patients? How might patient use of these tools, or even of AI-generated summaries of key data points (including from different health systems, from wearable data inaccessible via the EHR, etc.) speed or improve communication with clinicians?</p></sec><sec id="s5-5-5"><title>Clinician Constraint and Autonomy</title><p> Clinician priorities and commitment to care are not necessarily aligned with health system priorities. Whether generative AI&#x2019;s potential in the health system is realized depends in part on whether the cost of deploying and maintaining the innovations is offset by increased incoming revenue or decreases in the expense of replacing burned-out clinicians. Regulatory or liability concerns may also constrain health systems or physicians, leaving patients freer than clinicians in some instances to explore LLM-generated insights about their conditions. How or if limits on clinician autonomy impact the use of generative AI, physician sense of self-efficacy or cognitive load, and patient experience are all research questions of interest.</p></sec><sec id="s5-5-6"><title>Value of the Human in Care</title><p> The importance to patients of human caring in health care in the age of generative AI, or the degree to which clinicians value their role or humanistic exchange as integral to a process of caregiving, is not yet known. This may vary by patient, condition, specialty or primary care, or health system and depend on patient access to or help from other human actors, including family and other service providers. As electronic, LLM-generated communication between patients and clinicians grows, or as patients or physicians turn to avatars or AI agents to represent them, the question of how much human exchange is needed and what is essentially human about such exchange will become increasingly central.</p></sec></sec></sec><sec id="s6" sec-type="conclusions"><title>Conclusion</title><p>The LLM is a new change agent in the health care dynamic, and one with transformative potential for patients and clinicians. Clear-eyed research into both the function and use of LLMs can help bend the arc of that change toward mutual benefit. The key lies not in advancing LLMs as a replacement for clinician-patient interaction, but as a tool to augment it. By conceptualizing something closer to &#x201C;assistive intelligence,&#x201D; we can leverage LLMs to enhance and facilitate human connections and collaboration, supporting sound clinical decision-making and improved communication. For patients, in particular, LLMs represent a powerful corrective to power and knowledge imbalances and may lead to a more effective clinician-patient partnership.</p><p>Understanding the impact of LLMs and agentic AI on clinician-patient relations will require social science and computer science, qualitative research, as well as quantitative analytics and software engineering. While a focus on clinician-patient interactions is insufficient to address the multiple incentives and forces that underlie the American health care system, understanding the dynamics of those interactions&#x2014;and acting to design, train, and use LLMs in ways that reinforce humanistic collaboration&#x2014;is possible and necessary. Adhering to the principles of engagement, cocreation, and ethics that have emerged from patient movements can create a future where AI serves as a facilitator of the communication and connection at the heart of human-centered and effective care.</p></sec></body><back><fn-group><fn fn-type="con"><p>HdOC, DW, IS, and HL contributed to the research, framing, and writing of the text. HdOC, DW, and HL created the graphics.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations:</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">EHR</term><def><p>electronic health record</p></def></def-item><def-item><term id="abb3">LLM</term><def><p>large language model</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sim</surname><given-names>I</given-names> </name><name name-style="western"><surname>Cassel</surname><given-names>C</given-names> </name></person-group><article-title>The ethics of relational AI - expanding and implementing the Belmont Principles</article-title><source>N Engl J Med</source><year>2024</year><month>07</month><day>18</day><volume>391</volume><issue>3</issue><fpage>193</fpage><lpage>196</lpage><pub-id pub-id-type="doi">10.1056/NEJMp2314771</pub-id><pub-id pub-id-type="medline">39007542</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mehandru</surname><given-names>N</given-names> </name><name name-style="western"><surname>Miao</surname><given-names>BY</given-names> </name><name name-style="western"><surname>Almaraz</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Sushil</surname><given-names>M</given-names> </name><name name-style="western"><surname>Butte</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Alaa</surname><given-names>A</given-names> </name></person-group><article-title>Evaluating large language models as agents in the clinic</article-title><source>NPJ Digit Med</source><year>2024</year><month>04</month><day>3</day><volume>7</volume><issue>1</issue><fpage>84</fpage><pub-id pub-id-type="doi">10.1038/s41746-024-01083-y</pub-id><pub-id pub-id-type="medline">38570554</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>K</given-names> </name><name name-style="western"><surname>Hoti</surname><given-names>K</given-names> </name><name name-style="western"><surname>Hughes</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Emmerton</surname><given-names>L</given-names> </name></person-group><article-title>Dr Google Is here to stay but health care professionals are still valued: an analysis of health care consumers&#x2019; internet navigation support preferences</article-title><source>J Med Internet Res</source><year>2017</year><month>06</month><day>14</day><volume>19</volume><issue>6</issue><fpage>e210</fpage><pub-id pub-id-type="doi">10.2196/jmir.7489</pub-id><pub-id pub-id-type="medline">28615156</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Frost</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Massagli</surname><given-names>MP</given-names> </name></person-group><article-title>Social uses of personal health information within PatientsLikeMe, an online patient community: what can happen when patients have access to one another&#x2019;s data</article-title><source>J Med Internet Res</source><year>2008</year><month>05</month><day>27</day><volume>10</volume><issue>3</issue><fpage>e15</fpage><pub-id pub-id-type="doi">10.2196/jmir.1053</pub-id><pub-id pub-id-type="medline">18504244</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ferguson</surname><given-names>T</given-names> </name><name name-style="western"><surname>Frydman</surname><given-names>G</given-names> </name></person-group><article-title>The first generation of e-patients</article-title><source>BMJ</source><year>2004</year><month>05</month><day>15</day><volume>328</volume><issue>7449</issue><fpage>1148</fpage><lpage>1149</lpage><pub-id pub-id-type="doi">10.1136/bmj.328.7449.1148</pub-id><pub-id pub-id-type="medline">15142894</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Farnood</surname><given-names>A</given-names> </name><name name-style="western"><surname>Johnston</surname><given-names>B</given-names> </name><name name-style="western"><surname>Mair</surname><given-names>FS</given-names> </name></person-group><article-title>A mixed methods systematic review of the effects of patient online self-diagnosing in the &#x201C;smart-phone society&#x201D; on the healthcare professional-patient relationship and medical authority</article-title><source>BMC Med Inform Decis Mak</source><year>2020</year><month>10</month><day>6</day><volume>20</volume><issue>1</issue><fpage>253</fpage><pub-id pub-id-type="doi">10.1186/s12911-020-01243-6</pub-id><pub-id pub-id-type="medline">33023577</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Millenson</surname><given-names>ML</given-names> </name><name name-style="western"><surname>Baldwin</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Zipperer</surname><given-names>L</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>H</given-names> </name></person-group><article-title>Beyond Dr. Google: the evidence on consumer-facing digital tools for diagnosis</article-title><source>Diagnosis (Berl)</source><year>2018</year><month>09</month><day>25</day><volume>5</volume><issue>3</issue><fpage>95</fpage><lpage>105</lpage><pub-id pub-id-type="doi">10.1515/dx-2018-0009</pub-id><pub-id pub-id-type="medline">30032130</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="web"><article-title>Study reveals wearable device trends among U.S. adults</article-title><source>National Heart, Lung and Blood Institute</source><year>2023</year><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.nhlbi.nih.gov/news/2023/study-reveals-wearable-device-trends-among-us-adults">https://www.nhlbi.nih.gov/news/2023/study-reveals-wearable-device-trends-among-us-adults</ext-link></comment></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="web"><article-title>21st Century Cures Act: interoperability, information blocking, and the ONC health IT certification program</article-title><source>Federal Register</source><year>2020</year><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.federalregister.gov/documents/2020/05/01/2020-07419/21st-century-cures-act-interoperability-information-blocking-and-the-onc-health-it-certification">https://www.federalregister.gov/documents/2020/05/01/2020-07419/21st-century-cures-act-interoperability-information-blocking-and-the-onc-health-it-certification</ext-link></comment></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="web"><article-title>Statement: health data, technology &#x0026; interoperability: patient engagement, information sharing &#x0026; public health interoperability</article-title><source>OpenNotes</source><year>2024</year><access-date>2025-10-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.opennotes.org/news/opennotes-statement-health-data-technology-and-interoperabilty-public-health-interoperability/">https://www.opennotes.org/news/opennotes-statement-health-data-technology-and-interoperabilty-public-health-interoperability/</ext-link></comment></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Turchi</surname><given-names>T</given-names> </name><name name-style="western"><surname>Prencipe</surname><given-names>G</given-names> </name><name name-style="western"><surname>Malizia</surname><given-names>A</given-names> </name><name name-style="western"><surname>Filogna</surname><given-names>S</given-names> </name><name name-style="western"><surname>Latrofa</surname><given-names>F</given-names> </name><name name-style="western"><surname>Sgandurra</surname><given-names>G</given-names> </name></person-group><article-title>Pathways to democratized healthcare: envisioning human-centered AI-as-a-service for customized diagnosis and rehabilitation</article-title><source>Artif Intell Med</source><year>2024</year><month>05</month><volume>151</volume><fpage>102850</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2024.102850</pub-id><pub-id pub-id-type="medline">38555849</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>deBronkart</surname><given-names>D</given-names> </name><name name-style="western"><surname>Eysenbach</surname><given-names>G</given-names> </name></person-group><article-title>Gimme my damn data (and let patients help!): The #GimmeMyDamnData manifesto</article-title><source>J Med Internet Res</source><year>2019</year><month>11</month><day>22</day><volume>21</volume><issue>11</issue><fpage>e17045</fpage><pub-id pub-id-type="doi">10.2196/17045</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>deBronkart</surname><given-names>D</given-names> </name></person-group><article-title>Patients use genai to explore OUR questions. that&#x2019;s autonomy</article-title><source>Patients Use AI</source><year>2024</year><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://patientsuseai.substack.com/p/we-patients-use-genai-to-explore">https://patientsuseai.substack.com/p/we-patients-use-genai-to-explore</ext-link></comment></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>B&#x00E9;lisle-Pipon</surname><given-names>JC</given-names> </name></person-group><article-title>Why we need to be careful with LLMs in medicine</article-title><source>Front Med (Lausanne)</source><year>2024</year><volume>11</volume><fpage>1495582</fpage><pub-id pub-id-type="doi">10.3389/fmed.2024.1495582</pub-id><pub-id pub-id-type="medline">39697212</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Tyson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Pasquini</surname><given-names>G</given-names> </name><name name-style="western"><surname>Spencer</surname><given-names>A</given-names> </name><name name-style="western"><surname>Funk</surname><given-names>C</given-names> </name></person-group><article-title>60% of Americans would be uncomfortable with provider relying on AI in their own health care</article-title><source>Pew Research Center</source><year>2023</year><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://tinyurl.com/ws37ndy4">https://tinyurl.com/ws37ndy4</ext-link></comment></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Campos</surname><given-names>H</given-names> </name></person-group><article-title>The AI-patient revolution is here: time to rethink participatory medicine</article-title><source>Medium</source><year>2024</year><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://medium.com/@hugooc/the-ai-patient-revolution-is-here-time-to-rethink-participatory-medicine-9808a17bd93f">https://medium.com/@hugooc/the-ai-patient-revolution-is-here-time-to-rethink-participatory-medicine-9808a17bd93f</ext-link></comment></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Sarabu</surname><given-names>C</given-names> </name></person-group><article-title>How AI is enabling a new level of patient empowerment</article-title><source>OpenNotes</source><year>2024</year><month>10</month><day>1</day><access-date>2025-08-08</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.opennotes.org/news/how-ai-is-enabling-a-new-level-of-patient-empowerment-himss-tv/">https://www.opennotes.org/news/how-ai-is-enabling-a-new-level-of-patient-empowerment-himss-tv/</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stokel-Walker</surname><given-names>C</given-names> </name></person-group><article-title>How patients are using AI</article-title><source>BMJ</source><year>2024</year><volume>387</volume><fpage>q2393</fpage><pub-id pub-id-type="doi">10.1136/bmj.q2393</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Schmidt</surname><given-names>E</given-names> </name></person-group><article-title>The age of AI</article-title><year>2025</year><month>04</month><day>20</day><access-date>2025-07-16</access-date><publisher-name>Stanford Computer Science Lecture</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://x.com/quasa0/status/1823933017217482883">https://x.com/quasa0/status/1823933017217482883</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Campos</surname><given-names>H</given-names> </name></person-group><article-title>AI, Health Equity, and Patient Empowerment: Bridging Healthcare Gaps.</article-title><conf-name>Listening, Engaging, and Acting to Reduce Neighborhood Disparities (LEARN&#x2019;d) Conference</conf-name><conf-date>Sep 30 to Oct 1, 2024</conf-date><conf-loc>Birmingham, AL</conf-loc></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Esmaeilzadeh</surname><given-names>P</given-names> </name><name name-style="western"><surname>Mirzaei</surname><given-names>T</given-names> </name><name name-style="western"><surname>Dharanikota</surname><given-names>S</given-names> </name></person-group><article-title>Patients&#x2019; perceptions toward human-artificial intelligence interaction in health care: experimental study</article-title><source>J Med Internet Res</source><year>2021</year><month>11</month><day>25</day><volume>23</volume><issue>11</issue><fpage>e25856</fpage><pub-id pub-id-type="doi">10.2196/25856</pub-id><pub-id pub-id-type="medline">34842535</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Fox</surname><given-names>S</given-names> </name></person-group><article-title>Rare disease in the US 2025</article-title><source>SSRS</source><year>2025</year><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://ssrs.com/insights/rare-disease-in-the-us-2025/">https://ssrs.com/insights/rare-disease-in-the-us-2025/</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Floyd</surname><given-names>W</given-names> </name><name name-style="western"><surname>Kleber</surname><given-names>T</given-names> </name><name name-style="western"><surname>Pasli</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Evaluating the reliability of Chat-GPT model responses for radiation oncology patient inquiries</article-title><source>Int J Radiat Oncol Biol Phys</source><year>2023</year><month>10</month><volume>117</volume><issue>2</issue><fpage>e383</fpage><pub-id pub-id-type="doi">10.1016/j.ijrobp.2023.06.2497</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goh</surname><given-names>E</given-names> </name><name name-style="western"><surname>Gallo</surname><given-names>R</given-names> </name><name name-style="western"><surname>Hom</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Influence of a large language model on diagnostic reasoning: a randomized clinical vignette study</article-title><source>medRxiv</source><year>2024</year><month>03</month><day>14</day><volume>14</volume><issue>2024</issue><fpage>2024.03.12.24303785</fpage><pub-id pub-id-type="doi">10.1101/2024.03.12.24303785</pub-id><pub-id pub-id-type="medline">38559045</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huo</surname><given-names>B</given-names> </name><name name-style="western"><surname>McKechnie</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ortenzi</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Dr. GPT will see you now: the ability of large language model-linked chatbots to provide colorectal cancer screening recommendations</article-title><source>Health Technol</source><year>2024</year><month>05</month><volume>14</volume><issue>3</issue><fpage>463</fpage><lpage>469</lpage><pub-id pub-id-type="doi">10.1007/s12553-024-00836-9</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stoneham</surname><given-names>S</given-names> </name><name name-style="western"><surname>Livesey</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cooper</surname><given-names>H</given-names> </name><name name-style="western"><surname>Mitchell</surname><given-names>C</given-names> </name></person-group><article-title>ChatGPT versus clinician: challenging the diagnostic capabilities of artificial intelligence in dermatology</article-title><source>Clin Exp Dermatol</source><year>2024</year><month>06</month><day>25</day><volume>49</volume><issue>7</issue><fpage>707</fpage><lpage>710</lpage><pub-id pub-id-type="doi">10.1093/ced/llad402</pub-id><pub-id pub-id-type="medline">37979201</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zack</surname><given-names>T</given-names> </name><name name-style="western"><surname>Lehman</surname><given-names>E</given-names> </name><name name-style="western"><surname>Suzgun</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Assessing the potential of GPT-4 to perpetuate racial and gender biases in health care: a model evaluation study</article-title><source>Lancet Digit Health</source><year>2024</year><month>01</month><volume>6</volume><issue>1</issue><fpage>e12</fpage><lpage>e22</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(23)00225-X</pub-id><pub-id pub-id-type="medline">38123252</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Cordovano</surname><given-names>G</given-names> </name><name name-style="western"><surname>deBronkart</surname><given-names>D</given-names> </name><name name-style="western"><surname>Downing</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Collective AI rights for patients</article-title><source>The Light Collective</source><year>2024</year><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://lightcollective.org/wp-content/uploads/2024/06/Collective-AI-Rights-For-Patients-v_2.0.pdf">https://lightcollective.org/wp-content/uploads/2024/06/Collective-AI-Rights-For-Patients-v_2.0.pdf</ext-link></comment></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Stiglich</surname><given-names>JM</given-names> </name></person-group><article-title>Topol: AI-fueled &#x2018;keyboard liberation&#x2019; will improve the medical encounter</article-title><year>2024</year><month>04</month><day>18</day><publisher-name>American College of Physicians Internal Medicine Meeting</publisher-name></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rotenstein</surname><given-names>L</given-names> </name><name name-style="western"><surname>Melnick</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Iannaccone</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Virtual scribes and physician time spent on electronic health records</article-title><source>JAMA Netw Open</source><year>2024</year><month>05</month><day>1</day><volume>7</volume><issue>5</issue><fpage>e2413140</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2024.13140</pub-id><pub-id pub-id-type="medline">38787556</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tierney</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Gayre</surname><given-names>G</given-names> </name><name name-style="western"><surname>Hoberman</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Ambient artificial intelligence scribes to alleviate the burden of clinical documentation</article-title><source>NEJM Catalyst</source><year>2024</year><month>02</month><day>21</day><volume>5</volume><issue>3</issue><fpage>CAT.23.0404</fpage><pub-id pub-id-type="doi">10.1056/CAT.23.0404</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="book"><person-group person-group-type="author"><collab>National Academies of Sciences, Engineering, and Medicine; National Academy of Medicine; Committee on Systems Approaches to Improve Patient Care by Supporting Clinician Well-Being</collab></person-group><source>Taking Action Against Clinician Burnout: A Systems Approach to Professional Well-Being</source><year>2019</year><publisher-name>National Academies Press</publisher-name></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>West</surname><given-names>CP</given-names> </name><name name-style="western"><surname>Dyrbye</surname><given-names>LN</given-names> </name><name name-style="western"><surname>Shanafelt</surname><given-names>TD</given-names> </name></person-group><article-title>Physician burnout: contributors, consequences and solutions</article-title><source>J Intern Med</source><year>2018</year><month>06</month><volume>283</volume><issue>6</issue><fpage>516</fpage><lpage>529</lpage><pub-id pub-id-type="doi">10.1111/joim.12752</pub-id><pub-id pub-id-type="medline">29505159</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hinton</surname><given-names>G</given-names> </name></person-group><article-title>On radiology</article-title><conf-name>Machine Learning and Market for Intelligence Conference</conf-name><conf-date>Nov 24, 2016</conf-date><conf-loc>Toronto, Canada</conf-loc></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Garson</surname><given-names>A</given-names> </name></person-group><article-title>Artificial intelligence could replace doctors. it&#x2019;s time to prepare. (Opinion)</article-title><source>Houston Chronicle</source><year>2023</year><access-date>2025-07-31</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://tinyurl.com/4k4xs2z4">https://tinyurl.com/4k4xs2z4</ext-link></comment></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McCoy</surname><given-names>LG</given-names> </name><name name-style="western"><surname>Manrai</surname><given-names>AK</given-names> </name><name name-style="western"><surname>Rodman</surname><given-names>A</given-names> </name></person-group><article-title>Large language models and the degradation of the medical record</article-title><source>N Engl J Med</source><year>2024</year><month>10</month><day>31</day><volume>391</volume><issue>17</issue><fpage>1561</fpage><lpage>1564</lpage><pub-id pub-id-type="doi">10.1056/NEJMp2405999</pub-id><pub-id pub-id-type="medline">39465898</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Angell</surname><given-names>M</given-names> </name></person-group><article-title>The doctor as double agent</article-title><source>Kennedy Inst Ethics J</source><year>1993</year><month>09</month><volume>3</volume><issue>3</issue><fpage>279</fpage><lpage>286</lpage><pub-id pub-id-type="doi">10.1353/ken.0.0253</pub-id><pub-id pub-id-type="medline">10127995</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ayers</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Poliak</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dredze</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title><source>JAMA Intern Med</source><year>2023</year><month>06</month><day>1</day><volume>183</volume><issue>6</issue><fpage>589</fpage><lpage>596</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id><pub-id pub-id-type="medline">37115527</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Koranteng</surname><given-names>E</given-names> </name><name name-style="western"><surname>Rao</surname><given-names>A</given-names> </name><name name-style="western"><surname>Flores</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Empathy and equity: key considerations for large language model adoption in health care</article-title><source>JMIR Med Educ</source><year>2023</year><month>12</month><day>28</day><volume>9</volume><fpage>e51199</fpage><pub-id pub-id-type="doi">10.2196/51199</pub-id><pub-id pub-id-type="medline">38153778</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Farrington</surname><given-names>A</given-names> </name></person-group><article-title>Intuition and expert clinical practice in nursing</article-title><source>Br J Nurs</source><year>1993</year><month>02</month><day>25</day><volume>2</volume><issue>4</issue><fpage>228</fpage><lpage>233</lpage><pub-id pub-id-type="doi">10.12968/bjon.1993.2.4.228</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Busch</surname><given-names>IM</given-names> </name><name name-style="western"><surname>Moretti</surname><given-names>F</given-names> </name><name name-style="western"><surname>Travaini</surname><given-names>G</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>AW</given-names> </name><name name-style="western"><surname>Rimondini</surname><given-names>M</given-names> </name></person-group><article-title>Humanization of care: key elements identified by patients, caregivers, and healthcare providers. a systematic review</article-title><source>Patient</source><year>2019</year><month>10</month><volume>12</volume><issue>5</issue><fpage>461</fpage><lpage>474</lpage><pub-id pub-id-type="doi">10.1007/s40271-019-00370-1</pub-id><pub-id pub-id-type="medline">31203515</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guidi</surname><given-names>C</given-names> </name><name name-style="western"><surname>Traversa</surname><given-names>C</given-names> </name></person-group><article-title>Empathy in patient care: from &#x201C;Clinical Empathy&#x201D; to &#x201C;Empathic Concern&#x201D;</article-title><source>Med Health Care Philos</source><year>2021</year><month>12</month><volume>24</volume><issue>4</issue><fpage>573</fpage><lpage>585</lpage><pub-id pub-id-type="doi">10.1007/s11019-021-10033-4</pub-id><pub-id pub-id-type="medline">34196934</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Martin</surname><given-names>DJ</given-names> </name><name name-style="western"><surname>Garske</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Davis</surname><given-names>MK</given-names> </name></person-group><article-title>Relation of the therapeutic alliance with outcome and other variables: a meta-analytic review</article-title><source>J Consult Clin Psychol</source><year>2000</year><month>06</month><volume>68</volume><issue>3</issue><fpage>438</fpage><lpage>450</lpage><pub-id pub-id-type="medline">10883561</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fukami</surname><given-names>T</given-names> </name></person-group><article-title>Shared decision making with psychological safety</article-title><source>Lancet</source><year>2023</year><month>04</month><volume>401</volume><issue>10383</issue><fpage>1153</fpage><lpage>1154</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(23)00344-6</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Marcinowicz</surname><given-names>L</given-names> </name><name name-style="western"><surname>Konstantynowicz</surname><given-names>J</given-names> </name><name name-style="western"><surname>Godlewski</surname><given-names>C</given-names> </name></person-group><article-title>Patients&#x2019; perceptions of GP non-verbal communication: a qualitative study</article-title><source>Br J Gen Pract</source><year>2010</year><month>02</month><volume>60</volume><issue>571</issue><fpage>83</fpage><lpage>87</lpage><pub-id pub-id-type="doi">10.3399/bjgp10X483111</pub-id><pub-id pub-id-type="medline">20132701</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goodyear-Smith</surname><given-names>F</given-names> </name><name name-style="western"><surname>Buetow</surname><given-names>S</given-names> </name></person-group><article-title>Power issues in the doctor-patient relationship</article-title><source>Health Care Anal</source><year>2001</year><volume>9</volume><issue>4</issue><fpage>449</fpage><lpage>462</lpage><pub-id pub-id-type="doi">10.1023/A:1013812802937</pub-id><pub-id pub-id-type="medline">11874258</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Katz</surname><given-names>J</given-names> </name></person-group><source>The Silent World of Doctor and Patient</source><year>1984</year><publisher-name>Free Press</publisher-name></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loudon</surname><given-names>I</given-names> </name></person-group><article-title>The concept of the family doctor</article-title><source>Bull Hist Med</source><year>1984</year><volume>58</volume><issue>3</issue><fpage>347</fpage><lpage>362</lpage><pub-id pub-id-type="medline">6388688</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Balint</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Shelton</surname><given-names>WN</given-names> </name></person-group><article-title>Understanding the dynamics of the patient-physician relationship: balancing the fiduciary and stewardship roles of physicians</article-title><source>Am J Psychoanal</source><year>2002</year><month>12</month><volume>62</volume><issue>4</issue><fpage>337</fpage><lpage>346</lpage><pub-id pub-id-type="doi">10.1023/A:1021140815879</pub-id><pub-id pub-id-type="medline">12512677</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Fox</surname><given-names>S</given-names> </name></person-group><source>Rebel Health: A Field Guide to the Patient-Led Revolution in Medical Care</source><year>2024</year><publisher-name>The MIT Press</publisher-name></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mesk&#x00F3;</surname><given-names>B</given-names> </name><name name-style="western"><surname>deBronkart</surname><given-names>D</given-names> </name></person-group><article-title>Patient design: the importance of including patients in designing health care</article-title><source>J Med Internet Res</source><year>2022</year><month>08</month><day>31</day><volume>24</volume><issue>8</issue><fpage>e39178</fpage><pub-id pub-id-type="doi">10.2196/39178</pub-id><pub-id pub-id-type="medline">36044250</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Garcia</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>SP</given-names> </name><name name-style="western"><surname>Shah</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Artificial intelligence-generated draft replies to patient inbox messages</article-title><source>JAMA Netw Open</source><year>2024</year><month>03</month><day>4</day><volume>7</volume><issue>3</issue><fpage>e243201</fpage><pub-id pub-id-type="doi">10.1001/jamanetworkopen.2024.3201</pub-id><pub-id pub-id-type="medline">38506805</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="web"><article-title>Axios AI+</article-title><source>Axios</source><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.axios.com/newsletters/axios-ai-plus-0c9864c0-9244-11ef-9b20-23aefab33c01">https://www.axios.com/newsletters/axios-ai-plus-0c9864c0-9244-11ef-9b20-23aefab33c01</ext-link></comment></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hohenstein</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kizilcec</surname><given-names>RF</given-names> </name><name name-style="western"><surname>DiFranzo</surname><given-names>D</given-names> </name><etal/></person-group><article-title>Artificial intelligence in communication impacts language and social relationships</article-title><source>Sci Rep</source><year>2023</year><month>04</month><day>4</day><volume>13</volume><issue>1</issue><fpage>5487</fpage><pub-id pub-id-type="doi">10.1038/s41598-023-30938-9</pub-id><pub-id pub-id-type="medline">37015964</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>You</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Gui</surname><given-names>X</given-names> </name></person-group><article-title>Self-diagnosis through AI-enabled chatbot-based symptom checkers: user experiences and design considerations</article-title><source>AMIA Annu Symp Proc</source><year>2020</year><volume>2020</volume><issue>1354&#x2013;63</issue><fpage>1354</fpage><lpage>1363</lpage><pub-id pub-id-type="medline">33936512</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Inoue</surname><given-names>K</given-names> </name><name name-style="western"><surname>Wada</surname><given-names>K</given-names> </name><name name-style="western"><surname>Shibata</surname><given-names>T</given-names> </name></person-group><article-title>Exploring the applicability of the robotic seal PARO to support caring for older persons with dementia within the home context</article-title><source>Palliat Care Soc Pract</source><year>2021</year><volume>15</volume><fpage>26323524211030285</fpage><pub-id pub-id-type="doi">10.1177/26323524211030285</pub-id><pub-id pub-id-type="medline">34350398</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pearson</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Borenstein</surname><given-names>J</given-names> </name></person-group><article-title>The intervention of robot caregivers and the cultivation of children&#x2019;s capability to play</article-title><source>Sci Eng Ethics</source><year>2013</year><month>03</month><volume>19</volume><issue>1</issue><fpage>123</fpage><lpage>137</lpage><pub-id pub-id-type="doi">10.1007/s11948-011-9309-8</pub-id><pub-id pub-id-type="medline">21918922</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kleinman</surname><given-names>A</given-names> </name></person-group><article-title>Care: in search of a health agenda</article-title><source>Lancet</source><year>2015</year><month>07</month><volume>386</volume><issue>9990</issue><fpage>240</fpage><lpage>241</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(15)61271-5</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zou</surname><given-names>J</given-names> </name><name name-style="western"><surname>Topol</surname><given-names>EJ</given-names> </name></person-group><article-title>The rise of agentic AI teammates in medicine</article-title><source>Lancet</source><year>2025</year><month>02</month><day>8</day><volume>405</volume><issue>10477</issue><fpage>457</fpage><pub-id pub-id-type="doi">10.1016/S0140-6736(25)00202-8</pub-id><pub-id pub-id-type="medline">39922663</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Meyers</surname><given-names>J</given-names> </name></person-group><article-title>AI as &#x201C;assistive intelligence&#x201D;</article-title><source>Substack</source><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://substack.com/@jamesmeyersmd/p-149619618">https://substack.com/@jamesmeyersmd/p-149619618</ext-link></comment></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="web"><article-title>Introducing computer use, a new Claude 35 sonnet, and Claude 35 haiku</article-title><source>Anthropic</source><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.anthropic.com/news/3-5-models-and-computer-use">https://www.anthropic.com/news/3-5-models-and-computer-use</ext-link></comment></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="web"><article-title>Using ChatGPT to facilitate truly informed medical consent</article-title><source>NEJM AI</source><access-date>2025-07-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://ai.nejm.org/doi/full/10.1056/AIcs2300145">https://ai.nejm.org/doi/full/10.1056/AIcs2300145</ext-link></comment></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="book"><person-group person-group-type="author"><collab>GlobalData Plc</collab></person-group><source>The Complexities of Physician Supply and Demand: Projections From 2021 to 2036</source><year>2024</year><publisher-name>Association of American Medical Colleges</publisher-name></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Prompts and outputs from LLMs. LLM: large language model.</p><media xlink:href="jopm_v17i1e68146_app1.pdf" xlink:title="PDF File, 1741 KB"/></supplementary-material></app-group></back></article>