<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JBB</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Bioinform Biotech</journal-id>
      <journal-title>JMIR Bioinformatics and Biotechnology</journal-title>
      <issn pub-type="epub">2563-3570</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v5i1e64406</article-id>
      <article-id pub-id-type="pmid">39321336</article-id>
      <article-id pub-id-type="doi">10.2196/64406</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Ethical Considerations in Human-Centered AI: Advancing Oncology Chatbots Through Large Language Models</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Uzun</surname>
            <given-names>Ece</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Lai</surname>
            <given-names>Jiaying</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Arasteh</surname>
            <given-names>Soroosh Tayebi</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Chow</surname>
            <given-names>James C L</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <address>
            <institution>Princess Margaret Cancer Centre</institution>
            <institution>University Health Network</institution>
            <addr-line>7/F, Rm 7-606</addr-line>
            <addr-line>700 University Ave</addr-line>
            <addr-line>Toronto, ON, M5G 1X6</addr-line>
            <country>Canada</country>
            <phone>1 4169464501</phone>
            <fax>1 4169466566</fax>
            <email>james.chow@uhn.ca</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4202-4855</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Li</surname>
            <given-names>Kay</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5765-1635</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Radiation Oncology</institution>
        <institution>University of Toronto</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Princess Margaret Cancer Centre</institution>
        <institution>University Health Network</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of English</institution>
        <institution>University of Toronto</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: James C L Chow <email>james.chow@uhn.ca</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>6</day>
        <month>11</month>
        <year>2024</year>
      </pub-date>
      <volume>5</volume>
      <elocation-id>e64406</elocation-id>
      <history>
        <date date-type="received">
          <day>16</day>
          <month>7</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>22</day>
          <month>8</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>23</day>
          <month>8</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>23</day>
          <month>9</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©James C L Chow, Kay Li. Originally published in JMIR Bioinformatics and Biotechnology (https://bioinform.jmir.org), 06.11.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Bioinformatics and Biotechnology, is properly cited. The complete bibliographic information, a link to the original publication on https://bioinform.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://bioinform.jmir.org/2024/1/e64406" xlink:type="simple"/>
      <abstract>
        <p>The integration of chatbots in oncology underscores the pressing need for human-centered artificial intelligence (AI) that addresses patient and family concerns with empathy and precision. Human-centered AI emphasizes ethical principles, empathy, and user-centric approaches, ensuring technology aligns with human values and needs. This review critically examines the ethical implications of using large language models (LLMs) like GPT-3 and GPT-4 (OpenAI) in oncology chatbots. It examines how these models replicate human-like language patterns, impacting the design of ethical AI systems. The paper identifies key strategies for ethically developing oncology chatbots, focusing on potential biases arising from extensive datasets and neural networks. Specific datasets, such as those sourced from predominantly Western medical literature and patient interactions, may introduce biases by overrepresenting certain demographic groups. Moreover, the training methodologies of LLMs, including fine-tuning processes, can exacerbate these biases, leading to outputs that may disproportionately favor affluent or Western populations while neglecting marginalized communities. By providing examples of biased outputs in oncology chatbots, the review highlights the ethical challenges LLMs present and the need for mitigation strategies. The study emphasizes integrating human-centric values into AI to mitigate these biases, ultimately advocating for the development of oncology chatbots that are aligned with ethical principles and capable of serving diverse patient populations equitably.</p>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>humanistic AI</kwd>
        <kwd>ethical AI</kwd>
        <kwd>human-centered AI</kwd>
        <kwd>machine learning</kwd>
        <kwd>large language models</kwd>
        <kwd>natural language processing</kwd>
        <kwd>oncology chatbot</kwd>
        <kwd>transformer-based model</kwd>
        <kwd>ChatGPT</kwd>
        <kwd>health care</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Overview</title>
        <p>The development of oncology chatbots underscores the critical need for systems grounded in human-centered artificial intelligence (AI) principles that prioritize empathy, accuracy, and personalized patient support. In the context of oncology, where patients and their families often face significant emotional and informational challenges, these chatbots are essential tools for addressing their unique concerns [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref6">6</xref>]. However, as the adoption of large language models (LLMs) such as GPT-3 and GPT-4 becomes increasingly common in health care, the ethical considerations surrounding their use have grown in importance. It is vital that oncology chatbots adhere to ethical standards that ensure fairness, transparency, accountability, and respect for user privacy and autonomy. These systems should be designed to serve diverse user groups, particularly those from underrepresented communities, by avoiding biases and ensuring equitable treatment [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. Human-centered AI in oncology focuses on creating systems that prioritize the needs and experiences of patients and health care providers, thereby enhancing care, empathy, and support. Ethical AI extends beyond mere technical functionality; it involves embedding principles that safeguard the well-being, dignity, and rights of patients. This includes building trust through transparency, securing patient data, and delivering accurate and bias-free information [<xref ref-type="bibr" rid="ref9">9</xref>-<xref ref-type="bibr" rid="ref12">12</xref>].</p>
        <p>This review explores the integration of generative AI and LLMs into oncology chatbots, aiming to create tools that embody these human-centered AI principles. The customization and personalization of chatbots are essential to meet the specific needs of each user, transforming traditional chatbots from basic information providers into empathetic, patient-focused tools that significantly enhance the care experience [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. The primary goal of this review is to examine the challenges and ethical concerns associated with deploying AI in sensitive health care settings, particularly oncology. As these technologies become more widespread, it is crucial to ensure that they align with human-centered ethical principles. This study is motivated by the need to address potential biases in AI systems, which could inadvertently harm the very patients they are designed to support.</p>
        <p>The paper contributes to the field by identifying and analyzing key ethical challenges associated with oncology chatbots, with a specific focus on biases in the datasets used to train these models. Unlike previous studies that provide broad discussions on AI ethics, this review specifically addresses the unique ethical dilemmas faced in oncology, where the stakes are exceptionally high. The study also offers practical strategies for developers and health care providers to enhance the ethical development of AI, proposing a framework for human-centered AI in oncology. The findings of this study reveal that oncology chatbots often endure biases rooted in their training data, leading to unfair or ineffective outcomes. To address these issues, the paper provides strategic recommendations, such as using more diverse and representative datasets, implementing continuous monitoring, and refining training methodologies. These measures aim to ensure that AI-driven tools in oncology are not only effective but also ethically sound. In comparison to existing literature, this study offers a focused analysis of the ethical implications specific to oncology chatbots, an area that has been relatively underexplored. By providing a detailed examination of the sources of bias and presenting practical solutions, this paper advances the conversation on ethical AI in health care, particularly within the critical field of oncology.</p>
      </sec>
      <sec>
        <title>Enhancing Oncology Chatbots With Ethical and Human-Centered AI</title>
        <p>In oncology, ethical principles like beneficence, nonmaleficence, autonomy, and justice are crucial to ensure patient well-being. Oncology chatbots, designed to support patients and families, must adhere to these guidelines. For example, a chatbot for patients with breast cancer can provide personalized treatment information and emotional support, ensuring that the information is accurate, culturally sensitive, and delivered with empathy [<xref ref-type="bibr" rid="ref13">13</xref>]. Such chatbots can significantly ease the burden on patients by offering timely and relevant information. However, these chatbots also face ethical challenges, particularly in maintaining privacy and data security. For instance, a pediatric oncology chatbot must securely handle sensitive data, requiring robust encryption and transparent data usage policies [<xref ref-type="bibr" rid="ref14">14</xref>]. Additionally, regular updates and monitoring are essential to prevent biases or inaccuracies that could harm patients. Transparency is another critical concern. Oncology chatbots must clearly disclose their AI nature to users. For instance, in end-of-life care, failing to inform users that they are interacting with an AI could lead to mistrust and harm the health care organization’s reputation. A proactive approach with clear self-disclosure at the start of interactions is essential to maintain trust [<xref ref-type="bibr" rid="ref15">15</xref>]. In health care domains like nephrology, similar ethical considerations apply, with a focus on patient consent, privacy, and bias mitigation. In educational settings, oncology chatbots can also be valuable, but they must follow ethical frameworks to ensure accurate information delivery and fair AI operation. By adhering to these principles, oncology chatbots can effectively bridge learning gaps while maintaining trust and integrity [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref16">16</xref>-<xref ref-type="bibr" rid="ref18">18</xref>].</p>
      </sec>
      <sec>
        <title>Designing Ethical and Trustworthy Oncology Chatbots With Human-Centered AI</title>
        <p>Ethical chatbots, therefore, need to adhere to certain principles. They should prioritize transparency, providing users with clear indications when they are interacting with AI rather than a human. Respecting user privacy, obtaining informed consent, mitigating biases, ensuring data security, and promoting responsible AI use in education are central to developing ethical chatbots. By integrating ethical frameworks and considering societal impact, chatbots can contribute positively while upholding ethical standards in their interactions with users.</p>
        <p>There are 2 concerns to build an ethical oncology chatbot in human-centered AI—first, it has to build trust in the users. Second, how can it build trust? Building a human-centered approach to AI-driven chatbots involves a strategic integration of several key elements. First, the design should prioritize the user’s needs and expectations. Rather than merely dispensing information mechanically, the chatbot should discern and address human needs relevantly. This personalized approach fosters a more trustworthy relationship between the user and the AI. Trust emerges as a pivotal concern in the development of AI chatbots.</p>
        <p>Second, essential strategies can be used to cultivate trust in the oncology chatbots. The first involves personalization tailored to each user, enhancing the sense of individual relevance and reliability [<xref ref-type="bibr" rid="ref19">19</xref>]. The second entails infusing the oncology chatbot with a human-like persona, creating a relatable and approachable interaction for users [<xref ref-type="bibr" rid="ref20">20</xref>]. Implementing these qualities in the design of AI chatbots requires a thoughtful technical strategy. While the focus here is less on technical aspects and more on user experience and interaction, achieving personalization involves machine learning algorithms capable of understanding and adapting to individual user preferences [<xref ref-type="bibr" rid="ref21">21</xref>]. Meanwhile, instilling a human-like persona necessitates sophisticated natural language processing (NLP) techniques and dialogue design that emulate human conversational patterns [<xref ref-type="bibr" rid="ref22">22</xref>]. In essence, the development of human-centered AI chatbots revolves around creating an experience that seamlessly integrates technical prowess with an empathetic understanding of human needs through web-based inputs with the users. By bridging the gap between technological sophistication and human-like interaction, these chatbots can truly serve as effective companions in addressing users’ queries and needs.</p>
      </sec>
      <sec>
        <title>Ethical Challenges in Implementing Transformer-Based AI Models for Health Care Enhancement</title>
        <p>One way is in the design of transformers. In 2019, transformers were used to create LLMs such as Bidirectional Encoder Representations from Transformers (BERT) and GPT-2 [<xref ref-type="bibr" rid="ref23">23</xref>]. The integration of AI technologies, specifically LLMs, holds immense potential for improving efficiency and decision support in health care settings. However, ethical considerations become paramount when deploying such models, especially in critical domains like health care.</p>
        <p>GPT-4, the underlying model of ChatGPT, has demonstrated significant potential in conversational AI applications [<xref ref-type="bibr" rid="ref24">24</xref>]. This advancement has sparked discussions about the ethical implications of deploying such powerful models in health care. One primary concern is the potential inaccuracies in generated content. LLMs can produce convincing yet incorrect information, posing a risk of errors in medical records. Compounding this issue is the opacity of training data, making it challenging to assess accuracy effectively [<xref ref-type="bibr" rid="ref25">25</xref>]. To address this concern, it is crucial for LLMs like GPT-4 to train on precise and validated medical datasets [<xref ref-type="bibr" rid="ref26">26</xref>].</p>
        <p>The growing integration of AI chatbots, exemplified by tools such as ChatGPT and Google Bard, in health care, introduces critical security implications [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. While these AI-driven systems hold significant promise for improving patient care and public health, their reliance on massive datasets, including sensitive patient information, raises concerns about data security. During the pandemic, health care chatbots have become extensively used, addressing tasks like appointment scheduling and providing health information [<xref ref-type="bibr" rid="ref29">29</xref>]. However, this increased usage magnifies security risks and privacy challenges that remain understudied. AI chatbots, like ChatGPT, also pose unique challenges in ensuring patient privacy and compliance with regulations such as the Health Insurance Portability and Accountability Act (HIPAA) [<xref ref-type="bibr" rid="ref30">30</xref>]. Recent viewpoints in medical journals highlight the need for providers to navigate HIPAA compliance while safeguarding patient data [<xref ref-type="bibr" rid="ref31">31</xref>]. Additionally, the safety of medical AI chatbots in patient interactions becomes a paramount consideration, necessitating measures to protect patient data, maintain information accuracy, and ensure user understanding [<xref ref-type="bibr" rid="ref32">32</xref>]. Ethical considerations, including privacy and data security concerns, further complicate the widespread adoption of conversational AI in health care, emphasizing the need for comprehensive guidelines and robust encryption methods to build trust and safeguard sensitive health information in this era of AI-driven health care communication. Another critical ethical consideration is model bias [<xref ref-type="bibr" rid="ref33">33</xref>]. LLMs may inadvertently perpetuate biases present in their training data, leading to medically inaccurate and discriminatory responses. Biases can stem from various sources such as sampling, programming, and compliance, necessitating careful consideration to avoid perpetuating harmful stereotypes. Striking a balance between model accuracy and unbiased responses is essential for responsible deployment in health care settings.</p>
        <p>Privacy, a fundamental principle in health care, adds another layer of ethical complexity when using public LLMs. The potential risks associated with data sharing must be mitigated through strict agreements and HIPAA-compliant training protocols. Ensuring patient privacy is paramount in the implementation of AI technologies in health care [<xref ref-type="bibr" rid="ref34">34</xref>].</p>
        <p>Despite the potential benefits of using AI technologies, particularly transformer-based models, in health care, careful consideration of ethical principles is crucial. Addressing concerns related to accuracy, bias, and privacy will facilitate responsible and patient-centered implementation, benefiting both health care professionals and patients.</p>
        <p>The insights from the Megatron transformer underscore the ethical considerations in deploying transformer models like ChatGPT [<xref ref-type="bibr" rid="ref35">35</xref>]. Trained on vast datasets, Megatron suggests AI’s incapacity to independently ensure ethical behavior, emphasizing its tool-like nature dependent on human usage. Addressing the potential biases in transformer models, especially in health care, demands a focus on fairness metrics, proactive bias detection, and diverse training data. Continuous user feedback becomes crucial for iterative refinement, and bias-awareness training for stakeholders fosters a culture of ethical responsibility. Integrating these strategies into the deployment of transformer models is imperative, ensuring more equitable and inclusive AI-generated content across diverse applications.</p>
      </sec>
      <sec>
        <title>Ethical Considerations in Deploying LLMs in Health Care and Education</title>
        <p>Using LLMs raises ethical considerations, including the potential for biased outputs, breaches of privacy, and the risk of misuse. These may have serious implications in medical settings. Addressing these concerns requires the adoption of transparent development practices, the responsible handling of data, and the integration of fairness mechanisms.</p>
        <p>The integration of LLMs, such as ChatGPT, in medical practice and research raises crucial ethical issues concerning bias, trust, authorship, equitability, and privacy [<xref ref-type="bibr" rid="ref32">32</xref>]. Although this technology has the potential to revolutionize medicine and medical research, being mindful of its potential consequences is essential. An outright ban on the use of this technology would be shortsighted. Instead, establishing guidelines that aim to responsibly and effectively use LLMs is crucial.</p>
        <p>LLMs, like BioGPT [<xref ref-type="bibr" rid="ref36">36</xref>] and LaMDA (Google Brain) [<xref ref-type="bibr" rid="ref37">37</xref>], are currently under exploration for various applications in the medical field, showcasing versatility in tasks such as text generation, summarization, and aiding in clinical documentation and academic writing. The integration of LLMs, including oncology chatbots powered by ChatGPT, holds promise for streamlining essential health care tasks, including template creation, summarizing academic content, and enhancing the clarity of clinical notes. This potential introduces significant time-saving and efficiency gains in medical settings.</p>
        <p>However, the incorporation of LLMs, particularly oncology chatbots, into health care applications also presents ethical challenges that demand careful consideration to ensure responsible use. Recent research underscores concerns related to the attribution of credit and rights for content generated by LLMs. Users may encounter difficulties in fully claiming credit for positive outcomes while potentially facing responsibility for unintended consequences, such as the generation of misinformation. This highlights the pressing need for updated perspectives on responsibility and the establishment of clear guidelines addressing issues like authorship, disclosure, educational applications, and intellectual property in the context of oncology chatbots and LLMs in general. Navigating the ethical implications of integrating oncology chatbots and LLMs into the medical field requires a comprehensive approach to foster responsible and transparent use of these powerful language models in health care settings.</p>
        <p>In the field of education, LLMs show potential in automating tasks such as question generation, feedback provision, and essay grading. However, concerns about practicality and ethics, including technological readiness, transparency, and privacy considerations, must be addressed. A systematic scoping review identifies these challenges and recommends updating innovations with state-of-the-art models, open-sourcing models or systems, and adopting a human-centered approach in development. Therefore, the ethical considerations surrounding the use of LLMs in various fields in medicine and education, necessitate a careful and responsible approach. Establishing clear guidelines such as ensuring transparency and incorporating human oversight are essential steps in harnessing the benefits of LLMs while mitigating potential risks.</p>
        <p>As a research group focused on human-centered AI and the ethical integration of AI principles into medical and oncology chatbots [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref6">6</xref>], particularly leveraging LLMs [<xref ref-type="bibr" rid="ref32">32</xref>], our analysis delves into the historical evolution and the transformative potential of LLMs. We aim to spotlight the continuum of advancements in computational theory that has shaped our technological landscape, emphasizing the pivotal role of integrating humanistic and ethical considerations into AI for health care.</p>
      </sec>
    </sec>
    <sec>
      <title>LLMs and NLP Unveil New Potential for Human-Centered AI in Oncology Chatbots</title>
      <sec>
        <title>Neural Networks and Machine Learning</title>
        <p>Neural networks, fundamental to modern AI, emulate the structure and functioning of the human brain, forming the basis for various applications [<xref ref-type="bibr" rid="ref38">38</xref>]. In the medical context, the integration of LLMs like ChatGPT brings forth unprecedented possibilities. LLMs are part of the NLP domain and are built on architectures such as GPT and BERT [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. Unlike rule-based models, LLMs learn unsupervised from extensive text data during pretraining, gaining a profound understanding of syntax, grammar, and context. Fine-tuning follows, adapting their knowledge for tasks like text generation and sentiment analysis.</p>
        <p>Within the broader landscape of human-centered AI, the principles of neural networks and machine learning persist. The capacity of neural networks to capture complex patterns in data, combined with machine learning algorithms, remains instrumental. In the realm of human-centered AI, LLMs and NLP play a crucial role. NLP focuses on enabling machines to comprehend and generate human language, aligning with the principles of human-centered AI [<xref ref-type="bibr" rid="ref40">40</xref>]. LLMs, as a significant advancement in NLP, excel in understanding and generating human-like language, enhancing natural interactions between AI systems and users. In the context of oncology chatbots, the integration of LLMs is pivotal. These advanced models empower chatbots to comprehend medical queries, respond empathetically, and adapt to diverse communication styles, ultimately improving the user experience in health care interactions. The use of LLMs in oncology chatbots not only fosters effective communication but also reinforces the human-centered aspect by creating more empathetic and context-aware interactions within the medical domain.</p>
      </sec>
      <sec>
        <title>LLMs in Oncology Chatbot</title>
        <p>LLMs have achieved remarkable breakthroughs, innovating the field of NLP with their capacity to generate human-like text and excel in a multitude of NLP tasks. A compelling example is their application in the development of oncology chatbots [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref41">41</xref>]. These chatbots have the ability to communicate with users in a natural and coherent manner, offering invaluable assistance to both health care professionals and patients. LLMs have enabled oncology chatbots to generate human-like responses, providing users with a web-based and intuitive experience. These chatbots can understand complex medical queries, extract relevant information from patients’ descriptions of their symptoms, and generate responses that are not only accurate but also easily comprehensible to laypersons. This human-like text-generation capability significantly enhances the user experience, fostering trust and improving communication between patients and health care providers [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
        <p>Furthermore, LLMs empower oncology chatbots to perform diverse NLP tasks within the health care domain. They can extract critical information from medical records, assisting in patient diagnosis and treatment recommendations. These chatbots can also provide medication information, offer guidance on healthy lifestyles, and even support mental health through empathetic conversations [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. Their versatility makes them invaluable tools in health care, augmenting the capabilities of medical professionals and providing accessible, round-the-clock health care information and support. <xref rid="figure1" ref-type="fig">Figure 1</xref> shows the various applications of an oncology chatbot powered by LLMs.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Various applications of an LLM-powered oncology chatbot. LLM: large language model.</p>
          </caption>
          <graphic xlink:href="bioinform_v5i1e64406_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec>
      <title>Applications and Implications of LLMs for Human-Centered AI in Oncology Chatbots</title>
      <sec>
        <title>Practical Applications of LLMs in Human-Centered AI</title>
        <p>LLMs have demonstrated extensive practical applications across diverse domains, showcasing their versatility and transformative potential within the framework of human-centered AI. In the realm of language translation, these models have markedly enhanced the precision and fluency of machine translation systems [<xref ref-type="bibr" rid="ref45">45</xref>]. They adeptly translate text among multiple languages, fostering seamless cross-cultural communication and bolstering global business operations. Within text generation, LLMs exhibit proficiency in crafting human-like text for multifarious purposes, aiding content creation by drafting papers, generating marketing copy, or assisting authors in producing creative content [<xref ref-type="bibr" rid="ref46">46</xref>]. Moreover, LLMs find use in chatbots and web-based assistants, delivering natural and contextually sensitive responses in customer support, health care, and various other industries [<xref ref-type="bibr" rid="ref47">47</xref>,<xref ref-type="bibr" rid="ref48">48</xref>]. An illustration in <xref rid="figure2" ref-type="fig">Figure 2</xref> depicts an example of the RT Bot used in radiotherapy education, epitomizing the integration of LLMs within the sphere of human-centered AI applications.</p>
        <p>In software development, LLMs have demonstrated their prowess in code generation and code completion tasks. They can assist programmers by generating code snippets, fixing bugs, and enhancing productivity [<xref ref-type="bibr" rid="ref49">49</xref>]. Moreover, in data analytics, LLMs are used for natural language querying of databases, simplifying data exploration and analysis for nontechnical users [<xref ref-type="bibr" rid="ref50">50</xref>]. Moreover, LLMs are invaluable in the health care sector, where they aid in medical record analysis, diagnosis support, and drug discovery [<xref ref-type="bibr" rid="ref51">51</xref>]. They can sift through vast amounts of medical literature to extract relevant information and assist health care professionals in making informed decisions. LLMs are also used in sentiment analysis and social media monitoring, helping businesses gauge public opinion, and adapt their strategies accordingly [<xref ref-type="bibr" rid="ref52">52</xref>].</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>The RT Bot providing education in radiotherapy.</p>
          </caption>
          <graphic xlink:href="bioinform_v5i1e64406_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Humanistic and Ethical AI</title>
        <p>Humanistic AI refers to the approach in AI development that prioritizes human values, well-being, and understanding in the design and implementation of AI systems [<xref ref-type="bibr" rid="ref53">53</xref>]. It emphasizes creating AI technologies that align with human principles, fostering empathy, compassion, and a deeper understanding of human needs and emotions. On the other hand, ethical AI involves adhering to moral principles and guidelines in the development and deployment of AI systems [<xref ref-type="bibr" rid="ref54">54</xref>]. It encompasses considerations of fairness, transparency, accountability, privacy, and the societal impact of AI applications. Ethical AI aims to ensure that AI technologies benefit individuals and communities while minimizing potential harm or biases. Incorporating humanistic and ethical AI principles into oncology chatbots is crucial. Humanistic AI prioritizes empathy and understanding of human needs, while ethical AI ensures fairness, transparency, and accountability. This dual focus not only aligns with societal expectations but also safeguards against biases and harm, ensuring AI benefits individuals and communities in the medical domain [<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref57">57</xref>].</p>
        <p>The development of LLMs and oncology chatbots is deeply intertwined with the concepts of humanistic and ethical AI. LLMs, such as GPT-3 and GPT-4, are designed to generate human-like text and have been applied to various domains, including health care [<xref ref-type="bibr" rid="ref58">58</xref>]. Oncology chatbots powered by LLMs aim to provide assistance, information, and even preliminary diagnosis to users [<xref ref-type="bibr" rid="ref59">59</xref>]. Humanistic AI in oncology chatbots based on LLMs involves creating interfaces and interactions that are more empathetic, understandable, and accommodating to human emotions and concerns [<xref ref-type="bibr" rid="ref60">60</xref>]. It seeks to imbue these AI systems with a human touch, making them more relatable and comforting for users seeking medical information or support. Ethical considerations in the development of LLM-based oncology chatbots are crucial. These AI systems must maintain patient privacy, ensure the accuracy and reliability of information provided, mitigate biases in data and responses, and offer transparent explanations for their suggestions or advice [<xref ref-type="bibr" rid="ref61">61</xref>]. In addition, ethical AI in this context involves clearly delineating the capabilities and limitations of oncology chatbots to users, ensuring informed decision-making and responsible use of the technology.</p>
        <p>Humanistic and ethical AI principles guide the responsible development and deployment of LLM-based oncology chatbots, promoting trust, reliability, and user satisfaction while addressing societal concerns and ethical implications associated with these AI-driven health care solutions [<xref ref-type="bibr" rid="ref62">62</xref>].</p>
      </sec>
      <sec>
        <title>Societal and Ethical Implications of LLMs in Deploying Oncology Chatbots</title>
        <p>The deployment of LLMs in the health care sector, particularly in the form of oncology chatbots, presents both significant benefits and ethical challenges. On one hand, oncology chatbots powered by LLMs can enhance access to health care information and provide quick assistance to users with medical queries. They offer a convenient means for individuals to seek information about symptoms, treatments, or health care recommendations. However, ethical concerns emerge when considering issues of privacy, security, and misinformation [<xref ref-type="bibr" rid="ref63">63</xref>]. Oncology chatbots may inadvertently expose sensitive patient information if not properly secured, raising concerns about data breaches and privacy violations. Moreover, LLMs can potentially propagate medical misinformation, leading to incorrect self-diagnoses or treatment decisions [<xref ref-type="bibr" rid="ref64">64</xref>]. The responsible development and deployment of oncology chatbots must involve robust data protection measures, continuous monitoring for accuracy, and adherence to medical ethics guidelines to ensure that these technological advancements contribute positively to health care while mitigating potential risks. Balancing the benefits of LLM-powered oncology chatbots with these ethical considerations is essential for their responsible and effective use in the health care domain [<xref ref-type="bibr" rid="ref32">32</xref>]. Above all, the training datasets can be biased, and fall short of the underrepresented communities such as women, aboriginal people, persons with disabilities, and members of visible minorities [<xref ref-type="bibr" rid="ref65">65</xref>,<xref ref-type="bibr" rid="ref66">66</xref>]. The oncology chatbots still have to be trained to answer the needs of these communities.</p>
      </sec>
    </sec>
    <sec>
      <title>Challenges and Limitations</title>
      <sec>
        <title>Incorporating Humanistic and Ethical Principles Into LLM-Driven Oncology Chatbots</title>
        <p>The application of LLMs in oncology chatbots not only presents a promising avenue for enhancing health care accessibility and support but also introduces critical ethical considerations within the realm of human-centered AI [<xref ref-type="bibr" rid="ref20">20</xref>]. Despite the potential benefits, the integration of these systems raises significant ethical concerns and safety considerations. One prevalent issue pertains to the potential perpetuation of biases and discrimination within these AI systems. LLMs, learning from extensive datasets that may inherently contain societal biases, risk generating skewed recommendations or responses that could adversely affect specific demographics, thus perpetuating health care disparities [<xref ref-type="bibr" rid="ref67">67</xref>,<xref ref-type="bibr" rid="ref68">68</xref>]. Moreover, the deployment of AI-driven chatbots might inadvertently impede individuals’ autonomy, recourse, and rights by overshadowing or dismissing their unique health care needs or preferences [<xref ref-type="bibr" rid="ref69">69</xref>]. Transparency also remains a significant challenge, as these models often generate outcomes that are nontransparent, difficult to explain, or seemingly unjustifiable, making it challenging for users to comprehend or challenge the decisions made by the AI [<xref ref-type="bibr" rid="ref32">32</xref>]. Furthermore, there are concerns regarding user privacy breaches, as personal health information shared with these chatbots may not always be adequately secured [<xref ref-type="bibr" rid="ref70">70</xref>]. Additionally, the reliance on AI-driven interactions might risk isolation and the deterioration of the patient-doctor relationship, potentially undermining the crucial social connections essential for holistic health care [<xref ref-type="bibr" rid="ref71">71</xref>]. Ensuring the reliability and safety of outcomes produced by these chatbots remains a concern, as inaccuracies or poor-quality responses could have detrimental consequences on patient health and well-being [<xref ref-type="bibr" rid="ref72">72</xref>,<xref ref-type="bibr" rid="ref73">73</xref>]. Mitigating these ethical challenges and ensuring the safety of LLM-based oncology chatbots necessitate robust frameworks, stringent regulations, and ongoing scrutiny to address potential harms and uphold ethical standards within the domain of human-centered AI in health care. <xref rid="figure3" ref-type="fig">Figure 3</xref> shows a proposed framework of a radiotherapy chatbot based on ChatGPT. The chatbot is anchored by a robust core powered by ChatGPT, interfacing seamlessly with a meticulously curated database of verified medical information [<xref ref-type="bibr" rid="ref74">74</xref>]. The model undergoes domain-specific training to enhance its comprehension of radiotherapy intricacies, while a continuous feedback loop ensures that validated data inform its responses and are cross-verified for accuracy. To enhance ethical AI practices, the framework should incorporate bias mitigation strategies by diversifying data sources, ensuring transparency about the chatbot’s capabilities and limitations, implementing robust user privacy measures, establishing continuous ethical reviews, providing user education on verifying information, and creating accessible feedback mechanisms for reporting inaccuracies. This iterative approach fosters a dynamic, reliable, and ethically responsible ecosystem for delivering accurate and up-to-date information within the scope of radiotherapy [<xref ref-type="bibr" rid="ref75">75</xref>].</p>
        <p>Therefore, integrating humanistic and ethical principles into LLM-based oncology chatbots stands as a significant challenge in contemporary AI development [<xref ref-type="bibr" rid="ref76">76</xref>]. Achieving this integration requires a comprehensive approach. First, prioritizing patient confidentiality and data security remains pivotal. Implementing robust encryption measures and stringent access controls can effectively mitigate risks associated with sensitive medical information [<xref ref-type="bibr" rid="ref77">77</xref>]. Second, infusing empathy and sensitivity into the chatbot’s responses poses a significant hurdle. It necessitates the development of algorithms capable of understanding and empathetically responding to patients’ emotional states, demanding extensive research into sentiment analysis and contextually appropriate language generation [<xref ref-type="bibr" rid="ref78">78</xref>]. Moreover, carefully considering the ethical implications of decision<bold>-</bold>making in medical scenarios is crucial. Collaborative efforts among AI developers, ethicists, and medical professionals are vital to embed ethical guidelines into the chatbot’s algorithms, ensuring alignment with medical ethics and patient welfare [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref79">79</xref>,<xref ref-type="bibr" rid="ref80">80</xref>]. Striking a balance between technical functionality and ethical considerations is key to fostering trust and acceptance of LLM-based oncology chatbots in the health care ecosystem. Continuous vigilance, ongoing refinement, and transparent communication about the chatbot’s capabilities and limitations are essential steps in responsibly integrating humanistic and ethical principles into this advancing technology [<xref ref-type="bibr" rid="ref63">63</xref>,<xref ref-type="bibr" rid="ref81">81</xref>].</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Schematic diagram showing the framework of medical chatbot based on large language model–based ChatGPT, focused on radiotherapy, ensuring accuracy, compliance, and continuous refinement.</p>
          </caption>
          <graphic xlink:href="bioinform_v5i1e64406_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Approaches to Mitigate Bias in LLM-Driven Oncology Chatbots</title>
        <p>To avoid potential bias in LLM-based oncology chatbots, it is crucial to adopt a comprehensive approach. First, ensure that the training data are diverse and representative of the entire population the chatbot aims to assist. This involves incorporating information from various demographic groups, ethnicities, genders, and socioeconomic backgrounds to prevent the model from learning and perpetuating biases present in specific subsets of data [<xref ref-type="bibr" rid="ref82">82</xref>,<xref ref-type="bibr" rid="ref83">83</xref>]. Moreover, ethical data collection practices should be a priority, with developers implementing strict guidelines to eliminate unintentional biases. Transparently communicate ethical standards to users and stakeholders to foster trust and accountability in the development process [<xref ref-type="bibr" rid="ref84">84</xref>]. Incorporating bias detection and correction algorithms during both the training and deployment phases is essential [<xref ref-type="bibr" rid="ref85">85</xref>,<xref ref-type="bibr" rid="ref86">86</xref>]. These mechanisms should be designed to identify and rectify biased outputs in real time, with regular updates to adapt to evolving data and user interactions. In addition, transparency is key in addressing bias; therefore, the chatbot should be designed to provide clear explanations for its decisions. This not only enhances user trust but also enables health care professionals to understand the reasoning behind the chatbot’s recommendations. Continuous monitoring and evaluation are also important to the chatbot’s success [<xref ref-type="bibr" rid="ref87">87</xref>,<xref ref-type="bibr" rid="ref88">88</xref>]. Regularly assess its performance over time, ensuring that potential biases are identified and corrected promptly. User feedback integration further enhances the system, allowing diverse user groups to report biases and contribute to ongoing improvements [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. Furthermore, collaboration with health care professionals is paramount. Involving experts in the development and validation processes helps refine the chatbot’s responses, ensuring accuracy and minimizing biases that may arise from a lack of medical context [<xref ref-type="bibr" rid="ref32">32</xref>]. Finally, regulatory compliance with health care and data protection standards is vital. Adhering to established regulations ensures that the chatbot operates within ethical and legal boundaries, building trust among users and health care providers alike [<xref ref-type="bibr" rid="ref89">89</xref>]. <xref ref-type="table" rid="table1">Table 1</xref> summarizes the strategies for mitigating bias in LLM-based oncology chatbots.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Strategies for mitigating bias in large language model–based oncology chatbots.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="500"/>
            <col width="500"/>
            <thead>
              <tr valign="top">
                <td>Strategies</td>
                <td>Description</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Diverse and representative training data</td>
                <td>Use data that reflect the diversity of the target population</td>
              </tr>
              <tr valign="top">
                <td>Ethical data collection practices</td>
                <td>Implement strict ethical guidelines for data collection</td>
              </tr>
              <tr valign="top">
                <td>Bias detection and correction algorithms</td>
                <td>Integrate algorithms to identify and correct biased outputs</td>
              </tr>
              <tr valign="top">
                <td>Explainability and transparency</td>
                <td>Design the chatbot to provide clear explanations for decisions</td>
              </tr>
              <tr valign="top">
                <td>User feedback integration</td>
                <td>Continuously monitor and evaluate the chatbot’s performance</td>
              </tr>
              <tr valign="top">
                <td>Collaboration with health care professionals</td>
                <td>Encourage user feedback to identify and address biases</td>
              </tr>
              <tr valign="top">
                <td>Privacy-preserving models</td>
                <td>Involve health care experts in development and validation</td>
              </tr>
              <tr valign="top">
                <td>Regulatory compliance</td>
                <td>Adhere to health care and data protection regulations</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Navigating the AI Frontier: Challenges and Ethical Considerations</title>
        <p>The rise of LLMs, exemplified by GPT-4, has sparked both excitement and apprehension. Geoffrey Hinton, a prominent figure in deep learning, acknowledges their potential to surpass human intelligence [<xref ref-type="bibr" rid="ref90">90</xref>]. However, this rapid progress raises ethical and safety concerns. Despite having significantly fewer connections than the human brain, LLMs exhibit remarkable learning capabilities. Their ability to generalize from limited examples challenges conventional wisdom. Hinton argues that their occasional errors and hallucinations are features, akin to human imperfections. His fears extend beyond mere intelligence; he emphasizes the risk of AI misuse by malicious actors. Whether in elections or warfare, AI’s capacity to create subgoals and manipulate environments demands urgent attention. Responsible development and regulation are imperative. Hinton envisions a hybrid intelligence—a fusion of learning and communication—where machines outperform humans in both domains. This transformative era requires collective action and societal discussions akin to historical agreements on chemical weapons. As AI development outpaces regulation, Hinton questions whether our existing social structures can handle the implications. Responsible AI deployment necessitates interdisciplinary collaboration and thoughtful governance. While some may dismiss Hinton’s concerns, the stakes are high. As we navigate the path toward AI advancement, we must grapple with the potential consequences and strive for ethical, human-centered progress.</p>
      </sec>
      <sec>
        <title>Concerns of Datasets in LLMs or NLP for Ethical and Human-Centered AI</title>
        <p>The ethical considerations surrounding the datasets used in training LLMs and NLP systems are critical for advancing human-centered AI, particularly in the context of oncology chatbots. The datasets used for training these models often reflect societal biases, which can lead to ethical dilemmas when the outputs of these chatbots are applied in real-world health care settings [<xref ref-type="bibr" rid="ref57">57</xref>]. For instance, commonly used datasets like the Common Crawl, Wikipedia, and clinical databases may overrepresent affluent, Western demographics while underrepresenting minority groups, non-Western cultures, and marginalized communities. This bias can result in oncology chatbots that are less effective in serving diverse patient populations, potentially exacerbating health disparities. Moreover, the ethical implications of dataset bias become evident when examining specific LLMs like GPT-3 and GPT-4. These models are often fine-tuned on domain-specific datasets, which can inadvertently amplify existing biases. For example, if an oncology chatbot is trained predominantly on datasets from high-income health care systems, it may lack the cultural competency required to address the needs of patients from low-income or diverse backgrounds [<xref ref-type="bibr" rid="ref91">91</xref>]. Such a scenario not only limits the chatbot’s effectiveness but also raises concerns regarding equity in health care delivery. <xref ref-type="table" rid="table2">Table 2</xref> outlines how to address the concerns related to datasets in LLM or NLP for ethical and human-centered AI, specifically in the context of oncology chatbots. This table provides examples of datasets, identifies potential biases, and suggests strategies for mitigating these biases. By systematically addressing these issues, we seek to illuminate the vital importance of ethical dataset selection and its influence on developing effective, human-centered oncology chatbots. Our findings underscore the need for continuous evaluation and modification of datasets to reduce bias, ensuring that LLMs accurately represent and serve the diverse populations they aim to support in the field of oncology.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Overview of dataset, potential biases, and strategies for mitigation in large language model or natural language processing medical chatbot.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="150"/>
            <col width="260"/>
            <col width="260"/>
            <col width="330"/>
            <thead>
              <tr valign="top">
                <td>Dataset</td>
                <td>Description</td>
                <td>Potential biases</td>
                <td>Strategies for mitigation</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Common Crawl</td>
                <td>A large dataset collected from web pages across the internet</td>
                <td>Overrepresentation of Western cultures, socioeconomic status</td>
                <td>Ensure diverse sourcing and include localized health care data from various regions</td>
              </tr>
              <tr valign="top">
                <td>Wikipedia</td>
                <td>Open-source encyclopedia with content generated by volunteers</td>
                <td>Gender and racial biases due to contributor demographics</td>
                <td>Use guidelines for inclusive contributions and diversify contributor base</td>
              </tr>
              <tr valign="top">
                <td>MIMIC-III</td>
                <td>Critical care database with deidentified health data</td>
                <td>Predominantly includes data from urban hospitals; underrepresents rural populations</td>
                <td>Incorporate data from a variety of health care settings, including rural and underserved areas</td>
              </tr>
              <tr valign="top">
                <td>Health-related Twitter data</td>
                <td>Tweets related to health topics used for sentiment analysis</td>
                <td>Possible bias in language and topics relevant to affluent groups</td>
                <td>Filter and include tweets from diverse socioeconomic backgrounds and global populations</td>
              </tr>
              <tr valign="top">
                <td>Clinical trials data</td>
                <td>Data from clinical trials used to evaluate treatments</td>
                <td>Limited representation of minority groups in trial participants</td>
                <td>Prioritize inclusion of diverse populations in future trials and datasets</td>
              </tr>
              <tr valign="top">
                <td>PubMed studies</td>
                <td>Biomedical literature and research papers</td>
                <td>Predominantly Western-centric studies may neglect non-Western medical practices</td>
                <td>Integrate literature from diverse geographical regions and cultural contexts</td>
              </tr>
              <tr valign="top">
                <td>Patient health records</td>
                <td>Deidentified patient data for training models</td>
                <td>Disparities in data collection practices may overlook marginalized groups</td>
                <td>Standardize data collection practices to ensure comprehensive representation</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
    </sec>
    <sec>
      <title>Future Directions</title>
      <p>While LLMs have undoubtedly showcased remarkable capabilities, it is crucial to recognize their inherent limitations, especially when applied in the context of oncology chatbots. One of the most pronounced constraints is the absence of genuine understanding [<xref ref-type="bibr" rid="ref92">92</xref>]. LLMs excel at producing coherent and contextually relevant text, yet they lack true comprehension or reasoning abilities. In the realm of oncology chatbots, this limitation can manifest in responses based solely on patterns from their training data, without a deep grasp of medical principles [<xref ref-type="bibr" rid="ref93">93</xref>]. Furthermore, there is the risk of unintentionally generating misleading or inaccurate content, a particularly critical concern in health care, where erroneous information can carry significant consequences [<xref ref-type="bibr" rid="ref94">94</xref>]. Therefore, the deployment of oncology chatbots should be approached as a supplementary aid alongside human medical professionals [<xref ref-type="bibr" rid="ref95">95</xref>]. It is imperative to navigate their limitations thoughtfully while maintaining vigilant oversight to ensure the precision and reliability of the information they furnish.</p>
      <p>Ongoing research in the realm of LLMs is dedicated to confronting their limitations and enhancing their reliability, interpretability, and ethical standing. One particularly promising avenue focuses on the development of more resilient training datasets that seek to mitigate bias and encompass a broader spectrum of perspectives and languages [<xref ref-type="bibr" rid="ref96">96</xref>]. Researchers are actively exploring methods to render LLMs more interpretable, facilitating users in comprehending and trusting their decision-making processes. Additionally, there is a mounting emphasis on ethical considerations, including the establishment of guidelines and regulations governing LLM deployment, content generation, and the protection of data privacy [<xref ref-type="bibr" rid="ref97">97</xref>]. Upholding transparency, accountability, and fairness in LLMs is fundamental to their responsible use. Future directions may encompass the creation of hybrid models that combine the strengths of LLMs with other AI techniques, ultimately enhancing their reliability while diminishing the likelihood of generating misleading information [<xref ref-type="bibr" rid="ref98">98</xref>]. As LLMs assume an increasingly central role across diverse domains, ongoing research and ethical considerations are pivotal forces shaping their development and deployment for the betterment of society.</p>
      <p>Future directions for LLMs focus on several key areas of advancement. These include enhancing the models’ ability to understand context, nuances, and user intent in natural language, which will lead to more effective human-computer interactions. There is also a growing emphasis on integrating text-based models with vision and audio capabilities, enabling richer and more comprehensive communication. Addressing and reducing biases in LLMs is critical to ensuring fairness and inclusivity in generated content, while customization and fine-tuning of models are becoming increasingly important for specific applications. Efforts are also being made to develop more energy-efficient LLM architectures and training methods, which would reduce their environmental impact and make them accessible on low-power devices. Real-time conversational AI is another area of focus, with the goal of enabling more seamless, natural, and context-aware interactions. Privacy-preserving models are being researched to protect user data, and human-AI collaboration is being advanced to enhance productivity and creativity. Ethical guidelines and regulations are being established to ensure the responsible and safe use of LLMs [<xref ref-type="bibr" rid="ref99">99</xref>]. In education, LLMs are being used to create personalized and adaptive learning experiences. In the medical field, these models are expanding their role in research, diagnostics, and patient care, with a strong emphasis on adhering to medical ethics and ensuring compliance with standards such as patient confidentiality and informed consent. Finally, the creative capabilities of LLMs are being explored, pushing the boundaries in generating content across various artistic domains.</p>
      <p>The integration of LLMs into the domain of oncology chatbots raises intriguing opportunities and concerns, underscoring the significance of human-centered AI within health care applications. While LLMs offer a powerful tool for enhancing human-computer interactions, particularly in health care settings, their application necessitates careful consideration and balance [<xref ref-type="bibr" rid="ref100">100</xref>,<xref ref-type="bibr" rid="ref101">101</xref>]. Historically, expert systems have played a pivotal role in decision support and knowledge representation within these applications. The incorporation of LLMs introduces a novel dimension to this landscape by capitalizing on their remarkable capacity to comprehend and generate human language. However, it is crucial to recognize that akin to expert systems, LLMs possess inherent limitations. While excelling at processing extensive data and generating coherent responses, their actual grasp of intricate medical principles may be constrained. Therefore, the primary challenge lies in harnessing the capabilities of LLMs while ensuring that their responses align with medical accuracy, ethical considerations, and the ethos of human-centered AI in health care.</p>
    </sec>
    <sec>
      <title>Conclusions</title>
      <p>The emergence of LLMs signifies a transformative leap in computational paradigms, highlighting the central role of human-centered AI in this progression. Models such as GPT-3 and GPT-4 have not only revolutionized machine learning but have also profoundly influenced oncology chatbots through their advanced language processing capabilities. However, as technological advancements persist, the ethical dimensions—particularly concerning biases and misinformation—require meticulous attention. Integrating humanistic and ethical principles into the development of LLMs, especially within oncology chatbots, is crucial for responsible AI integration. Envisioning a future where machines possess unparalleled language abilities alongside adept management of ethical complexities demands a proactive ethical framework.</p>
      <p>This comprehensive review explores the evolution, applications, and future trajectories of LLMs in health care and beyond. It is essential to acknowledge the inherent limitations and dynamic nature of technology, suggesting that the landscape of LLMs is rapidly evolving. Future directions outlined herein may witness significant changes or novel developments shortly. Therefore, ongoing research efforts should continuously update and expand this review, encompassing newer LLM iterations, exploring specific health care applications, and conducting empirical studies to validate practical implications and real-world efficacy.</p>
      <p>Furthermore, deeper exploration into the ethical implications and societal impacts of widespread LLM implementation remains a critical avenue for future inquiry. Continued research endeavors in these areas will not only enhance our comprehension and use of LLMs but also address emerging challenges and opportunities, aligning with the foundational principles of human-centered AI.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">BERT</term>
          <def>
            <p>Bidirectional Encoder Representations from Transformers</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">HIPAA</term>
          <def>
            <p>Health Insurance Portability and Accountability Act</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">LLM</term>
          <def>
            <p>large language model</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">NLP</term>
          <def>
            <p>natural language processing</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work is supported by the Planning and Dissemination Grants—Institute Community Support, Canadian Institutes of Health Research, Canada (CIHR PCS—168296 and CIHR PCS—191021).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sanders</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JCL</given-names>
            </name>
          </person-group>
          <article-title>Chatbot for health care and oncology applications using artificial intelligence and machine learning: systematic review</article-title>
          <source>JMIR Cancer</source>
          <year>2021</year>
          <volume>7</volume>
          <issue>4</issue>
          <fpage>e27850</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cancer.jmir.org/2021/4/e27850/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/27850</pub-id>
          <pub-id pub-id-type="medline">34847056</pub-id>
          <pub-id pub-id-type="pii">v7i4e27850</pub-id>
          <pub-id pub-id-type="pmcid">PMC8669585</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rebelo</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Sanders</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JCL</given-names>
            </name>
          </person-group>
          <article-title>Learning the treatment process in radiotherapy using an artificial intelligence-assisted chatbot: development study</article-title>
          <source>JMIR Form Res</source>
          <year>2022</year>
          <volume>6</volume>
          <issue>12</issue>
          <fpage>e39443</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://formative.jmir.org/2022/12/e39443/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/39443</pub-id>
          <pub-id pub-id-type="medline">36327383</pub-id>
          <pub-id pub-id-type="pii">v6i12e39443</pub-id>
          <pub-id pub-id-type="pmcid">PMC9718518</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JCL</given-names>
            </name>
            <name name-style="western">
              <surname>Sanders</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Design of an educational chatbot using artificial intelligence in radiotherapy</article-title>
          <source>AI</source>
          <year>2023</year>
          <volume>4</volume>
          <issue>1</issue>
          <fpage>319</fpage>
          <lpage>332</lpage>
          <pub-id pub-id-type="doi">10.3390/ai4010015</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kovacek</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JCL</given-names>
            </name>
          </person-group>
          <article-title>An AI-assisted chatbot for radiation safety education in radiotherapy</article-title>
          <source>IOP SciNotes</source>
          <year>2021</year>
          <volume>2</volume>
          <issue>3</issue>
          <fpage>034002</fpage>
          <pub-id pub-id-type="doi">10.1088/2633-1357/ac1f88</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence in radiotherapy and patient care</article-title>
          <source>InArtificial Intelligence in Medicine</source>
          <year>2021</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer International Publishing</publisher-name>
          <fpage>1</fpage>
          <lpage>13</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JCL</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Sanders</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Developing an AI-assisted educational chatbot for radiotherapy using the IBM Watson assistant platform</article-title>
          <source>Healthcare (Basel)</source>
          <year>2023</year>
          <volume>11</volume>
          <issue>17</issue>
          <fpage>2417</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=healthcare11172417"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/healthcare11172417</pub-id>
          <pub-id pub-id-type="medline">37685452</pub-id>
          <pub-id pub-id-type="pii">healthcare11172417</pub-id>
          <pub-id pub-id-type="pmcid">PMC10487627</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ayers</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Poliak</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dredze</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Leas</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Kelley</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Faix</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Longhurst</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Hogarth</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title>
          <source>JAMA Intern Med</source>
          <year>2023</year>
          <volume>183</volume>
          <issue>6</issue>
          <fpage>589</fpage>
          <lpage>596</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37115527"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id>
          <pub-id pub-id-type="medline">37115527</pub-id>
          <pub-id pub-id-type="pii">2804309</pub-id>
          <pub-id pub-id-type="pmcid">PMC10148230</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sidlauskiene</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Joye</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Auruskeviciene</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>AI-based chatbots in conversational commerce and their effects on product and price perceptions</article-title>
          <source>Electron Mark</source>
          <year>2023</year>
          <volume>33</volume>
          <issue>1</issue>
          <fpage>24</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37252674"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s12525-023-00633-8</pub-id>
          <pub-id pub-id-type="medline">37252674</pub-id>
          <pub-id pub-id-type="pii">633</pub-id>
          <pub-id pub-id-type="pmcid">PMC10206356</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bond</surname>
              <given-names>RR</given-names>
            </name>
            <name name-style="western">
              <surname>Mulvenna</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Wan</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Finlay</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Koene</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Brisk</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Boger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Adel</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <source>Human Centered Artificial Intelligence: Weaving UX into Algorithmic Decision Making</source>
          <year>2019</year>
          <publisher-loc>Romania</publisher-loc>
          <publisher-name>International Conference on Human-Computer Interaction</publisher-name>
          <fpage>2</fpage>
          <lpage>9</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shneiderman</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Bridging the gap between ethics and practice</article-title>
          <source>ACM Trans Interact Intell Syst</source>
          <year>2020</year>
          <volume>10</volume>
          <issue>4</issue>
          <fpage>1</fpage>
          <lpage>31</lpage>
          <pub-id pub-id-type="doi">10.1145/3419764</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="web">
          <article-title>Schwartz Reisman Institute for Technology and Society</article-title>
          <source>University of Toronto</source>
          <year>2024</year>
          <access-date>2024-10-08</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://srinstitute.utoronto.ca/">https://srinstitute.utoronto.ca/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Marasoiu</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The development and use of chatbots in public health: scoping review</article-title>
          <source>JMIR Hum Factors</source>
          <year>2022</year>
          <volume>9</volume>
          <issue>4</issue>
          <fpage>e35882</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://humanfactors.jmir.org/2022/4/e35882/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/35882</pub-id>
          <pub-id pub-id-type="medline">36197708</pub-id>
          <pub-id pub-id-type="pii">v9i4e35882</pub-id>
          <pub-id pub-id-type="pmcid">PMC9536768</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tayebi Arasteh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Lotfinia</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kuhl</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kather</surname>
              <given-names>JN</given-names>
            </name>
            <name name-style="western">
              <surname>Truhn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Nebelung</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Large language models streamline automated machine learning for clinical studies</article-title>
          <source>Nat Commun</source>
          <year>2024</year>
          <volume>15</volume>
          <issue>1</issue>
          <fpage>1603</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41467-024-45879-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41467-024-45879-8</pub-id>
          <pub-id pub-id-type="medline">38383555</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41467-024-45879-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC10881983</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>NH</given-names>
            </name>
            <name name-style="western">
              <surname>Entwistle</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Pfeffer</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Creation and adoption of large language models in medicine</article-title>
          <source>JAMA</source>
          <year>2023</year>
          <volume>330</volume>
          <issue>9</issue>
          <fpage>866</fpage>
          <lpage>869</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2023.14217</pub-id>
          <pub-id pub-id-type="medline">37548965</pub-id>
          <pub-id pub-id-type="pii">2808296</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Suppadungsuk</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Thongprayoon</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Miao</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Krisanapan</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Qureshi</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kashani</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Cheungpasitporn</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Exploring the potential of chatbots in critical care nephrology</article-title>
          <source>Medicines (Basel)</source>
          <year>2023</year>
          <volume>10</volume>
          <issue>10</issue>
          <fpage>58</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=medicines10100058"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/medicines10100058</pub-id>
          <pub-id pub-id-type="medline">37887265</pub-id>
          <pub-id pub-id-type="pii">medicines10100058</pub-id>
          <pub-id pub-id-type="pmcid">PMC10608511</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kaur</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chandan</surname>
              <given-names>JS</given-names>
            </name>
            <name name-style="western">
              <surname>Robbins</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Qualitative exploration of digital chatbot use in medical education: a pilot study</article-title>
          <source>Digit Health</source>
          <year>2021</year>
          <volume>7</volume>
          <fpage>20552076211038151</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/20552076211038151?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub  0pubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/20552076211038151</pub-id>
          <pub-id pub-id-type="medline">34513002</pub-id>
          <pub-id pub-id-type="pii">10.1177_20552076211038151</pub-id>
          <pub-id pub-id-type="pmcid">PMC8424726</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>KC</surname>
              <given-names>GP</given-names>
            </name>
            <name name-style="western">
              <surname>Ranjan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ankit</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>A personalized medical assistant chatbot: Medibot</article-title>
          <source>Int J Sci Technol Eng</source>
          <year>2019</year>
          <volume>5</volume>
          <issue>7</issue>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nißen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Rüegger</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Stieger</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Flückiger</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Allemand</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>V Wangenheim</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kowatsch</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>The effects of health care chatbot personas with different social roles on the client-chatbot bond and usage intentions: development of a design codebook and web-based study</article-title>
          <source>J Med Internet Res</source>
          <year>2022</year>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>e32630</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2022/4/e32630/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/32630</pub-id>
          <pub-id pub-id-type="medline">35475761</pub-id>
          <pub-id pub-id-type="pii">v24i4e32630</pub-id>
          <pub-id pub-id-type="pmcid">PMC9096656</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Trivedi</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Leveraging predictive modeling, machine learning personalization, NLP customer support, and AI chatbots to increase customer loyalty</article-title>
          <source>Empir Quests Manag Essences</source>
          <year>2020</year>
          <volume>3</volume>
          <issue>3</issue>
          <fpage>1</fpage>
          <lpage>24</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>King</surname>
              <given-names>MR</given-names>
            </name>
          </person-group>
          <article-title>The future of AI in medicine: a perspective from a chatbot</article-title>
          <source>Ann Biomed Eng</source>
          <year>2023</year>
          <volume>51</volume>
          <issue>2</issue>
          <fpage>291</fpage>
          <lpage>295</lpage>
          <pub-id pub-id-type="doi">10.1007/s10439-022-03121-w</pub-id>
          <pub-id pub-id-type="medline">36572824</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10439-022-03121-w</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thirunavukarasu</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSJ</given-names>
            </name>
            <name name-style="western">
              <surname>Elangovan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gutierrez</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>TF</given-names>
            </name>
            <name name-style="western">
              <surname>Ting</surname>
              <given-names>DSW</given-names>
            </name>
          </person-group>
          <article-title>Large language models in medicine</article-title>
          <source>Nat Med</source>
          <year>2023</year>
          <volume>29</volume>
          <issue>8</issue>
          <fpage>1930</fpage>
          <lpage>1940</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-023-02448-8</pub-id>
          <pub-id pub-id-type="medline">37460753</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-023-02448-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Generative pre‐trained transformer 4 in healthcare: challenges, opportunities, and recommendations</article-title>
          <source>Med Adv</source>
          <year>2023</year>
          <volume>1</volume>
          <issue>2</issue>
          <fpage>163</fpage>
          <lpage>166</lpage>
          <pub-id pub-id-type="doi">10.1002/med4.21</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sheth</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>HP</given-names>
            </name>
            <name name-style="western">
              <surname>Prescher</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Strelzow</surname>
              <given-names>JA</given-names>
            </name>
          </person-group>
          <article-title>Ethical considerations of artificial intelligence in health care: examining the role of generative pretrained transformer-4</article-title>
          <source>J Am Acad Orthop Surg</source>
          <year>2024</year>
          <volume>32</volume>
          <issue>5</issue>
          <fpage>205</fpage>
          <lpage>210</lpage>
          <pub-id pub-id-type="doi">10.5435/JAAOS-D-23-00787</pub-id>
          <pub-id pub-id-type="medline">38175996</pub-id>
          <pub-id pub-id-type="pii">00124635-990000000-00861</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Truhn</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Loeffler</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Müller-Franzes</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Nebelung</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hewitt</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Brandner</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bressem</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Foersch</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kather</surname>
              <given-names>JN</given-names>
            </name>
          </person-group>
          <article-title>Extracting structured information from unstructured histopathology reports using generative pre-trained transformer 4 (GPT-4)</article-title>
          <source>J Pathol</source>
          <year>2024</year>
          <volume>262</volume>
          <issue>3</issue>
          <fpage>310</fpage>
          <lpage>319</lpage>
          <pub-id pub-id-type="doi">10.1002/path.6232</pub-id>
          <pub-id pub-id-type="medline">38098169</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mehra</surname>
              <given-names>PS</given-names>
            </name>
          </person-group>
          <year>2023</year>
          <conf-name>2023 International Conference on IoT, Communication and Automation Technology (ICICAT)</conf-name>
          <conf-date>2023 June 04</conf-date>
          <conf-loc>Gorakhpur, India</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/icicat57735.2023.10263706</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Security implications of AI chatbots in health care</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <volume>25</volume>
          <fpage>e47551</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e47551/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/47551</pub-id>
          <pub-id pub-id-type="medline">38015597</pub-id>
          <pub-id pub-id-type="pii">v25i1e47551</pub-id>
          <pub-id pub-id-type="pmcid">PMC10716748</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Arshad</surname>
              <given-names>HB</given-names>
            </name>
            <name name-style="western">
              <surname>Butt</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>SU</given-names>
            </name>
            <name name-style="western">
              <surname>Javed</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Nasir</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and artificial intelligence in hospital level research: potential, precautions, and prospects</article-title>
          <source>Methodist Debakey Cardiovasc J</source>
          <year>2023</year>
          <volume>19</volume>
          <issue>5</issue>
          <fpage>77</fpage>
          <lpage>84</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/38028967"/>
          </comment>
          <pub-id pub-id-type="doi">10.14797/mdcvj.1290</pub-id>
          <pub-id pub-id-type="medline">38028967</pub-id>
          <pub-id pub-id-type="pmcid">PMC10655767</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Waters</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Aneja</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>JC</given-names>
            </name>
          </person-group>
          <article-title>Unlocking the power of ChatGPT, artificial intelligence, and large language models: practical suggestions for radiation oncologists</article-title>
          <source>Pract Radiat Oncol</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>6</issue>
          <fpage>e484</fpage>
          <lpage>e490</lpage>
          <pub-id pub-id-type="doi">10.1016/j.prro.2023.06.011</pub-id>
          <pub-id pub-id-type="medline">37598727</pub-id>
          <pub-id pub-id-type="pii">S1879-8500(23)00213-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Woo</surname>
              <given-names>WL</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Nima</surname>
              <given-names>RRO</given-names>
            </name>
            <name name-style="western">
              <surname>Ling</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Development of conversational artificial intelligence for pandemic healthcare query support</article-title>
          <source>Int J Auto AI Mach Learn</source>
          <year>2020</year>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>54</fpage>
          <lpage>79</lpage>
          <pub-id pub-id-type="doi">10.61797/ijaaiml.v1i1.35</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JCL</given-names>
            </name>
            <name name-style="western">
              <surname>Sanders</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Impact of ChatGPT on medical chatbots as a disruptive technology</article-title>
          <source>Front Artif Intell</source>
          <year>2023</year>
          <volume>6</volume>
          <fpage>1166014</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37091303"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/frai.2023.1166014</pub-id>
          <pub-id pub-id-type="medline">37091303</pub-id>
          <pub-id pub-id-type="pmcid">PMC10113434</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Stahl</surname>
              <given-names>BC</given-names>
            </name>
            <name name-style="western">
              <surname>Eke</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The ethics of ChatGPT – exploring the ethical issues of an emerging technology</article-title>
          <source>Int J Inf Manage</source>
          <year>2024</year>
          <volume>74</volume>
          <fpage>102700</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ijinfomgt.2023.102700</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Haupt</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Marks</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>AI-generated medical advice-GPT and beyond</article-title>
          <source>JAMA</source>
          <year>2023</year>
          <volume>329</volume>
          <issue>16</issue>
          <fpage>1349</fpage>
          <lpage>1350</lpage>
          <pub-id pub-id-type="doi">10.1001/jama.2023.5321</pub-id>
          <pub-id pub-id-type="medline">36972070</pub-id>
          <pub-id pub-id-type="pii">2803077</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yao</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>A review of evolutionary artificial neural networks</article-title>
          <source>Int J Intell Syst</source>
          <year>1993</year>
          <volume>8</volume>
          <issue>4</issue>
          <fpage>539</fpage>
          <lpage>567</lpage>
          <pub-id pub-id-type="doi">10.1002/int.4550080406</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alawida</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mejri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mehmood</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Chikhaoui</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Isaac Abiodun</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>A comprehensive study of ChatGPT: advancements, limitations, and ethical considerations in natural language processing and cybersecurity</article-title>
          <source>Information</source>
          <year>2023</year>
          <volume>14</volume>
          <issue>8</issue>
          <fpage>462</fpage>
          <pub-id pub-id-type="doi">10.3390/info14080462</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Min</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Ross</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Sulem</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Veyseh</surname>
              <given-names>APB</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>TH</given-names>
            </name>
            <name name-style="western">
              <surname>Sainz</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Agirre</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Heintz</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Roth</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Recent advances in natural language processing via large pre-trained language models: a survey</article-title>
          <source>ACM Comput Surv</source>
          <year>2023</year>
          <volume>56</volume>
          <issue>2</issue>
          <fpage>1</fpage>
          <lpage>40</lpage>
          <pub-id pub-id-type="doi">10.1145/3605943</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Xia</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Qin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Poon</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>TY</given-names>
            </name>
          </person-group>
          <article-title>BioGPT: generative pre-trained transformer for biomedical text generation and mining</article-title>
          <source>Brief Bioinform</source>
          <year>2022</year>
          <volume>23</volume>
          <issue>6</issue>
          <fpage>bbac409</fpage>
          <pub-id pub-id-type="doi">10.1093/bib/bbac409</pub-id>
          <pub-id pub-id-type="medline">36156661</pub-id>
          <pub-id pub-id-type="pii">6713511</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thoppilan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>FD</given-names>
            </name>
            <name name-style="western">
              <surname>Hall</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shazeer</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Kulshreshtha</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>HT</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bos</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Baker</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Lamda: language models for dialog applications</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Jan 20, 2022</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2201.08239</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Alshemali</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Kalita</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Improving the reliability of deep neural networks in NLP: a review</article-title>
          <source>Knowl Based Syst</source>
          <year>2020</year>
          <volume>191</volume>
          <fpage>105210</fpage>
          <pub-id pub-id-type="doi">10.1016/j.knosys.2019.105210</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gilbert</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Harvey</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Melvin</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Vollebregt</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Wicks</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Large language model AI chatbots require approval as medical devices</article-title>
          <source>Nat Med</source>
          <year>2023</year>
          <volume>29</volume>
          <issue>10</issue>
          <fpage>2396</fpage>
          <lpage>2398</lpage>
          <pub-id pub-id-type="doi">10.1038/s41591-023-02412-6</pub-id>
          <pub-id pub-id-type="medline">37391665</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41591-023-02412-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siddique</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>JCL</given-names>
            </name>
          </person-group>
          <article-title>Machine learning in healthcare communication</article-title>
          <source>Encyclopedia</source>
          <year>2021</year>
          <volume>1</volume>
          <issue>1</issue>
          <fpage>220</fpage>
          <lpage>239</lpage>
          <pub-id pub-id-type="doi">10.3390/encyclopedia1010021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>KQ</given-names>
            </name>
            <name name-style="western">
              <surname>Lan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Cui</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>LLM-empowered chatbots for psychiatrist and patient simulation: application and evaluation</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online May 23, 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2305.13614</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schmälzle</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence for health message generation: an empirical study using a large language model (LLM) and prompt engineering</article-title>
          <source>Front Commun</source>
          <year>2023</year>
          <volume>8</volume>
          <fpage>1129082</fpage>
          <pub-id pub-id-type="doi">10.3389/fcomm.2023.1129082</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kong</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Multilingual machine translation with large language models: empirical results and analysis</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Apr 10, 2023</comment>
          <pub-id pub-id-type="doi">10.18653/v1/2024.findings-naacl.176</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van Heerden</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Pozuelo</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Kohrt</surname>
              <given-names>BA</given-names>
            </name>
          </person-group>
          <article-title>Global mental health services and the impact of artificial intelligence-powered large language models</article-title>
          <source>JAMA Psychiatry</source>
          <year>2023</year>
          <volume>80</volume>
          <issue>7</issue>
          <fpage>662</fpage>
          <lpage>664</lpage>
          <pub-id pub-id-type="doi">10.1001/jamapsychiatry.2023.1253</pub-id>
          <pub-id pub-id-type="medline">37195694</pub-id>
          <pub-id pub-id-type="pii">2804646</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jeon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Large language models in education: a focus on the complementary relationship between human teachers and ChatGPT</article-title>
          <source>Educ Inf Technol</source>
          <year>2023</year>
          <volume>28</volume>
          <issue>12</issue>
          <fpage>15873</fpage>
          <lpage>15892</lpage>
          <pub-id pub-id-type="doi">10.1007/s10639-023-11834-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Urbanowicz</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Martin</surname>
              <given-names>PCN</given-names>
            </name>
            <name name-style="western">
              <surname>O'Connor</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bright</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Tatonetti</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Won</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Gonzalez-Hernandez</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>JH</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and large language models in academia: opportunities and challenges</article-title>
          <source>BioData Min</source>
          <year>2023</year>
          <volume>16</volume>
          <issue>1</issue>
          <fpage>20</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://biodatamining.biomedcentral.com/articles/10.1186/s13040-023-00339-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13040-023-00339-9</pub-id>
          <pub-id pub-id-type="medline">37443040</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13040-023-00339-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC10339472</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ouyang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Harman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>LLM is like a box of chocolates: the non-determinism of ChatGPT in code generation</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Aug 5, 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2308.02828</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Shang</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Generating efficient training data via LLM-based attribute manipulation</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Jul 14, 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2307.07099</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tasioulas</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence, humanistic ethics</article-title>
          <source>Daedalus</source>
          <year>2022</year>
          <volume>151</volume>
          <issue>2</issue>
          <fpage>232</fpage>
          <lpage>243</lpage>
          <pub-id pub-id-type="doi">10.1162/daed_a_01912</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De Angelis</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Baglivo</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Arzilli</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Privitera</surname>
              <given-names>GP</given-names>
            </name>
            <name name-style="western">
              <surname>Ferragina</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tozzi</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Rizzo</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and the rise of large language models: the new AI-driven infodemic threat in public health</article-title>
          <source>Front Public Health</source>
          <year>2023</year>
          <volume>11</volume>
          <fpage>1166120</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37181697"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpubh.2023.1166120</pub-id>
          <pub-id pub-id-type="medline">37181697</pub-id>
          <pub-id pub-id-type="pmcid">PMC10166793</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karabacak</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Margetis</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Embracing large language models for medical applications: opportunities and challenges</article-title>
          <source>Cureus</source>
          <year>2023</year>
          <volume>15</volume>
          <issue>5</issue>
          <fpage>e39305</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37378099"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.39305</pub-id>
          <pub-id pub-id-type="medline">37378099</pub-id>
          <pub-id pub-id-type="pmcid">PMC10292051</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Siau</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence (AI) ethics: ethics of AI and ethical AI</article-title>
          <source>J Database Manag</source>
          <year>2020</year>
          <volume>31</volume>
          <issue>2</issue>
          <fpage>74</fpage>
          <lpage>87</lpage>
          <pub-id pub-id-type="doi">10.4018/jdm.2020040105</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Teran</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Pincay</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wallimann-Helmer</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Portmann</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>A literature review on digital ethics from a humanistic and sustainable perspective</article-title>
          <year>2021</year>
          <conf-name>Proceedings of the 14th International Conference on Theory and Practice of Electronic Governance</conf-name>
          <conf-date>2012 Jan 12</conf-date>
          <conf-loc>New York</conf-loc>
          <fpage>57</fpage>
          <lpage>64</lpage>
          <pub-id pub-id-type="doi">10.1145/3494193.3494295</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Deep ethical learning: taking the interplay of human and artificial intelligence seriously</article-title>
          <source>Hastings Cent Rep</source>
          <year>2019</year>
          <volume>49</volume>
          <issue>1</issue>
          <fpage>36</fpage>
          <lpage>39</lpage>
          <pub-id pub-id-type="doi">10.1002/hast.977</pub-id>
          <pub-id pub-id-type="medline">30790317</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Varlamov</surname>
              <given-names>OO</given-names>
            </name>
            <name name-style="western">
              <surname>Chuvikov</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Adamova</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Petrov</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Zabolotskaya</surname>
              <given-names>IK</given-names>
            </name>
            <name name-style="western">
              <surname>Zhilina</surname>
              <given-names>TN</given-names>
            </name>
          </person-group>
          <article-title>Logical, philosophical and ethical aspects of AI in medicine</article-title>
          <source>Int J Mach Learn Comput</source>
          <year>2019</year>
          <volume>9</volume>
          <issue>6</issue>
          <fpage>868</fpage>
          <pub-id pub-id-type="doi">10.18178/ijmlc.2019.9.6.885</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Lu</surname>
              <given-names>KJQ</given-names>
            </name>
            <name name-style="western">
              <surname>Meaney</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kemppainen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Punnett</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Leung</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Assessment of resident and AI chatbot performance on the University of Toronto family medicine residency progress test: comparative study</article-title>
          <source>JMIR Med Educ</source>
          <year>2023</year>
          <volume>9</volume>
          <fpage>e50514</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mededu.jmir.org/2023//e50514/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/50514</pub-id>
          <pub-id pub-id-type="medline">37725411</pub-id>
          <pub-id pub-id-type="pii">v9i1e50514</pub-id>
          <pub-id pub-id-type="pmcid">PMC10548315</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Patrinely</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stone</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zimmerman</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Donald</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Berkowitz</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Finn</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jahangir</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Scoville</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Reese</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Friedman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bastarache</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>van der Heijden</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wright</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Alexander</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Choe</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chastain</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Zic</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Horst</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Turker</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Agarwal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Osmundson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Idrees</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kieman</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Padmanabhan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Bailey</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Schlegel</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chambless</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gibson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Osterman</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wheless</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Assessing the accuracy and reliability of AI-generated medical responses: an evaluation of the chat-GPT model</article-title>
          <source>Res Sq</source>
          <year>2023</year>
          <fpage>1</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36909565"/>
          </comment>
          <pub-id pub-id-type="doi">10.21203/rs.3.rs-2566942/v1</pub-id>
          <pub-id pub-id-type="medline">36909565</pub-id>
          <pub-id pub-id-type="pii">rs.3.rs-2566942</pub-id>
          <pub-id pub-id-type="pmcid">PMC10002821</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ostherr</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence and medical humanities</article-title>
          <source>J Med Humanit</source>
          <year>2022</year>
          <volume>43</volume>
          <issue>2</issue>
          <fpage>211</fpage>
          <lpage>232</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32654043"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10912-020-09636-4</pub-id>
          <pub-id pub-id-type="medline">32654043</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10912-020-09636-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC9242900</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jobin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ienca</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vayena</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>The global landscape of AI ethics guidelines</article-title>
          <source>Nat Mach Intell</source>
          <year>2019</year>
          <volume>1</volume>
          <issue>9</issue>
          <fpage>389</fpage>
          <lpage>399</lpage>
          <pub-id pub-id-type="doi">10.1038/s42256-019-0088-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dave</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Athaluri</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT in medicine: an overview of its applications, advantages, limitations, future prospects, and ethical considerations</article-title>
          <source>Front Artif Intell</source>
          <year>2023</year>
          <volume>6</volume>
          <fpage>1169595</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37215063"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/frai.2023.1169595</pub-id>
          <pub-id pub-id-type="medline">37215063</pub-id>
          <pub-id pub-id-type="pmcid">PMC10192861</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Haltaufderheide</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ranisch</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>The ethics of ChatGPT in medicine and healthcare: a systematic review on large language models (LLMs)</article-title>
          <source>NPJ Digit Med</source>
          <year>2024</year>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>183</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41746-024-01157-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41746-024-01157-x</pub-id>
          <pub-id pub-id-type="medline">38977771</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41746-024-01157-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC11231310</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Chatbots in medical research: advantages and limitations of artificial intelligence-enabled writing with a focus on ChatGPT as an author</article-title>
          <source>Clin Nucl Med</source>
          <year>2023</year>
          <volume>48</volume>
          <issue>9</issue>
          <fpage>838</fpage>
          <lpage>839</lpage>
          <pub-id pub-id-type="doi">10.1097/RLU.0000000000004665</pub-id>
          <pub-id pub-id-type="medline">37083827</pub-id>
          <pub-id pub-id-type="pii">00003072-990000000-00523</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Patrinely</surname>
              <given-names>JR</given-names>
            </name>
            <name name-style="western">
              <surname>Stone</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Zimmerman</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Donald</surname>
              <given-names>RR</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Berkowitz</surname>
              <given-names>ST</given-names>
            </name>
            <name name-style="western">
              <surname>Finn</surname>
              <given-names>AP</given-names>
            </name>
            <name name-style="western">
              <surname>Jahangir</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Scoville</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Reese</surname>
              <given-names>TS</given-names>
            </name>
            <name name-style="western">
              <surname>Friedman</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Bastarache</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>van der Heijden</surname>
              <given-names>YF</given-names>
            </name>
            <name name-style="western">
              <surname>Wright</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ye</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Alexander</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Choe</surname>
              <given-names>JH</given-names>
            </name>
            <name name-style="western">
              <surname>Chastain</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Zic</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Horst</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Turker</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Agarwal</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Osmundson</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Idrees</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kiernan</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Padmanabhan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Bailey</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Schlegel</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Chambless</surname>
              <given-names>LB</given-names>
            </name>
            <name name-style="western">
              <surname>Gibson</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Osterman</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wheless</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>DB</given-names>
            </name>
          </person-group>
          <article-title>Accuracy and reliability of chatbot responses to physician questions</article-title>
          <source>JAMA Netw Open</source>
          <year>2023</year>
          <month>10</month>
          <day>02</day>
          <volume>6</volume>
          <issue>10</issue>
          <fpage>e2336483</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37782499"/>
          </comment>
          <pub-id pub-id-type="doi">10.1001/jamanetworkopen.2023.36483</pub-id>
          <pub-id pub-id-type="medline">37782499</pub-id>
          <pub-id pub-id-type="pii">2809975</pub-id>
          <pub-id pub-id-type="pmcid">PMC10546234</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Oca</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Meller</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Parikh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>McCoy</surname>
              <given-names>AO</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sudharshan</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gupta</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang-Nunes</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Bias and inaccuracy in AI chatbot ophthalmologist recommendations</article-title>
          <source>Cureus</source>
          <year>2023</year>
          <volume>15</volume>
          <issue>9</issue>
          <fpage>e45911</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37885556"/>
          </comment>
          <pub-id pub-id-type="doi">10.7759/cureus.45911</pub-id>
          <pub-id pub-id-type="medline">37885556</pub-id>
          <pub-id pub-id-type="pmcid">PMC10599183</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Leslie</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Understanding artificial intelligence ethics and safety</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Jun 11, 2019</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.1906.05684</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Au Yeung</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kraljevic</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Luintel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Balston</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Idowu</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Dobson</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Teo</surname>
              <given-names>JT</given-names>
            </name>
          </person-group>
          <article-title>AI chatbots not yet ready for clinical use</article-title>
          <source>Front Digit Health</source>
          <year>2023</year>
          <volume>5</volume>
          <fpage>1161098</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37122812"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fdgth.2023.1161098</pub-id>
          <pub-id pub-id-type="medline">37122812</pub-id>
          <pub-id pub-id-type="pmcid">PMC10130576</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>May</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Denecke</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Security, privacy, and healthcare-related conversational agents: a scoping review</article-title>
          <source>Inform Health Soc Care</source>
          <year>2022</year>
          <volume>47</volume>
          <issue>2</issue>
          <fpage>194</fpage>
          <lpage>210</lpage>
          <pub-id pub-id-type="doi">10.1080/17538157.2021.1983578</pub-id>
          <pub-id pub-id-type="medline">34617857</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mijwil</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Aljanabi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>AH</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: exploring the role of cybersecurity in the protection of medical information</article-title>
          <source>Mesopotamian J Cybersecur</source>
          <year>2023</year>
          <fpage>18</fpage>
          <lpage>21</lpage>
          <pub-id pub-id-type="doi">10.58496/mjcs/2023/004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sheth</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shekarpour</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yip</surname>
              <given-names>HY</given-names>
            </name>
          </person-group>
          <article-title>Extending patient-chatbot experience with internet-of-things and background knowledge: case studies with healthcare applications</article-title>
          <source>IEEE Intell Syst</source>
          <year>2019</year>
          <volume>34</volume>
          <issue>4</issue>
          <fpage>24</fpage>
          <lpage>30</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34690576"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/mis.2019.2905748</pub-id>
          <pub-id pub-id-type="medline">34690576</pub-id>
          <pub-id pub-id-type="pmcid">PMC8536202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Musheyev</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bockelman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Loeb</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kabarriti</surname>
              <given-names>AE</given-names>
            </name>
          </person-group>
          <article-title>Assessment of artificial intelligence chatbot responses to top searched queries about cancer</article-title>
          <source>JAMA Oncol</source>
          <year>2023</year>
          <volume>9</volume>
          <issue>10</issue>
          <fpage>1437</fpage>
          <lpage>1440</lpage>
          <pub-id pub-id-type="doi">10.1001/jamaoncol.2023.2947</pub-id>
          <pub-id pub-id-type="medline">37615960</pub-id>
          <pub-id pub-id-type="pii">2808733</pub-id>
          <pub-id pub-id-type="pmcid">PMC10450581</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Swick</surname>
              <given-names>RK</given-names>
            </name>
          </person-group>
          <article-title>The accuracy of artificial intelligence (AI) chatbots in telemedicine</article-title>
          <source>J S C Acad Sci</source>
          <year>2021</year>
          <volume>19</volume>
          <issue>2</issue>
          <fpage>17</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bélisle-Pipon</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Monteferrante</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Roy</surname>
              <given-names>MC</given-names>
            </name>
            <name name-style="western">
              <surname>Couture</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Artificial intelligence ethics has a black box problem</article-title>
          <source>AI Soc</source>
          <year>2022</year>
          <volume>38</volume>
          <issue>4</issue>
          <fpage>1507</fpage>
          <lpage>1522</lpage>
          <pub-id pub-id-type="doi">10.1007/s00146-021-01380-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>YL</given-names>
            </name>
            <name name-style="western">
              <surname>Por</surname>
              <given-names>LY</given-names>
            </name>
            <name name-style="western">
              <surname>Ku</surname>
              <given-names>CS</given-names>
            </name>
          </person-group>
          <article-title>A systematic literature review of information security in chatbots</article-title>
          <source>Appl Sci</source>
          <year>2023</year>
          <volume>13</volume>
          <issue>11</issue>
          <fpage>6355</fpage>
          <pub-id pub-id-type="doi">10.3390/app13116355</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Liebrenz</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schleifer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Buadze</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bhugra</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Generating scholarly content with ChatGPT: ethical challenges for medical publishing</article-title>
          <source>Lancet Digit Health</source>
          <year>2023</year>
          <volume>5</volume>
          <issue>3</issue>
          <fpage>e105</fpage>
          <lpage>e106</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://boris.unibe.ch/id/eprint/178562"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/S2589-7500(23)00019-5</pub-id>
          <pub-id pub-id-type="medline">36754725</pub-id>
          <pub-id pub-id-type="pii">S2589-7500(23)00019-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Loh</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT and generative AI chatbots: challenges and opportunities for science, medicine and medical leaders</article-title>
          <source>BMJ Lead</source>
          <year>2023</year>
          <fpage>000797</fpage>
          <pub-id pub-id-type="doi">10.1136/leader-2023-000797</pub-id>
          <pub-id pub-id-type="medline">37192124</pub-id>
          <pub-id pub-id-type="pii">leader-2023-000797</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Devaram</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Empathic chatbot: emotional intelligence for empathic chatbot: emotional intelligence for mental health well-being</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Dec 15, 2020</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2012.09130</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Walker</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Ghani</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kuemmerli</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Nebiker</surname>
              <given-names>CA</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>BP</given-names>
            </name>
            <name name-style="western">
              <surname>Raptis</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Staubli</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Reliability of medical information provided by ChatGPT: assessment against clinical guidelines and patient information quality instrument</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <volume>25</volume>
          <fpage>e47479</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e47479/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/47479</pub-id>
          <pub-id pub-id-type="medline">37389908</pub-id>
          <pub-id pub-id-type="pii">v25i1e47479</pub-id>
          <pub-id pub-id-type="pmcid">PMC10365578</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pryss</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kraft</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Baumeister</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Winkler</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Probst</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Reichert</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Langguth</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Spiliopoulou</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schlee</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Baumeister</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Montag</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Using chatbots to support medical and psychological treatment procedures: challenges, opportunities, technologies, reference architecture</article-title>
          <source>Digital Phenotyping and Mobile Sensing: New Developments in Psychoinformatics</source>
          <year>2019</year>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>249</fpage>
          <lpage>260</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Parviainen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rantala</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Chatbot breakthrough in the 2020s? An ethical reflection on the trend of automated consultations in health care</article-title>
          <source>Med Health Care Philos</source>
          <year>2022</year>
          <volume>25</volume>
          <issue>1</issue>
          <fpage>61</fpage>
          <lpage>71</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34480711"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11019-021-10049-w</pub-id>
          <pub-id pub-id-type="medline">34480711</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11019-021-10049-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC8416570</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref80">
        <label>80</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Ju</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zeng</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fang</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>MedDialog: large-scale medical dialogue datasets</article-title>
          <year>2020</year>
          <conf-name>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</conf-name>
          <conf-date>2020 Nov 10</conf-date>
          <conf-loc>San Diego</conf-loc>
          <fpage>9241</fpage>
          <lpage>9250</lpage>
          <pub-id pub-id-type="doi">10.18653/v1/2020.emnlp-main.743</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref81">
        <label>81</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chakraborty</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Paul</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Ghatak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pandey</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>KU</given-names>
            </name>
            <name name-style="western">
              <surname>Shah</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>An AI-based medical chatbot model for infectious disease prediction</article-title>
          <source>IEEE Access</source>
          <year>2022</year>
          <volume>10</volume>
          <fpage>128469</fpage>
          <lpage>128483</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2022.3227208</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref82">
        <label>82</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zhao</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Peng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Logan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xiang</surname>
              <given-names>X</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of the current state of chatbots for digital health: scoping review</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <volume>25</volume>
          <fpage>e47217</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e47217/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/47217</pub-id>
          <pub-id pub-id-type="medline">38113097</pub-id>
          <pub-id pub-id-type="pii">v25i1e47217</pub-id>
          <pub-id pub-id-type="pmcid">PMC10762606</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref83">
        <label>83</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Omar</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Unabashed bias: how health-care organizations can significantly reduce bias in the face of unaccountable AI</article-title>
          <source>Denv L Rev</source>
          <year>2020</year>
          <volume>98</volume>
          <fpage>807</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref84">
        <label>84</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adam</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Balagopalan</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Alsentzer</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Christia</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ghassemi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Mitigating the impact of biased artificial intelligence in emergency decision-making</article-title>
          <source>Commun Med (Lond)</source>
          <year>2022</year>
          <volume>2</volume>
          <issue>1</issue>
          <fpage>149</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s43856-022-00214-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s43856-022-00214-4</pub-id>
          <pub-id pub-id-type="medline">36414774</pub-id>
          <pub-id pub-id-type="pii">10.1038/s43856-022-00214-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC9681767</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref85">
        <label>85</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Denecke</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Abd-Alrazaq</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Househ</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Warren</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Evaluation metrics for health chatbots: a delphi study</article-title>
          <source>Methods Inf Med</source>
          <year>2021</year>
          <volume>60</volume>
          <issue>5-06</issue>
          <fpage>171</fpage>
          <lpage>179</lpage>
          <pub-id pub-id-type="doi">10.1055/s-0041-1736664</pub-id>
          <pub-id pub-id-type="medline">34719011</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref86">
        <label>86</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Radziwill</surname>
              <given-names>NM</given-names>
            </name>
            <name name-style="western">
              <surname>Benton</surname>
              <given-names>MC</given-names>
            </name>
          </person-group>
          <article-title>Evaluating quality of chatbots and intelligent conversational agents</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Apr 15, 2017</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.1704.04579</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref87">
        <label>87</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hauglid</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Mahler</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Doctor chatbot: the EUʼs regulatory prescription for generative medical AI</article-title>
          <source>Oslo Law Review</source>
          <year>2023</year>
          <volume>10</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.18261/olr.10.1.1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref88">
        <label>88</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vaishya</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Misra</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vaish</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: is this version good for healthcare and research?</article-title>
          <source>Diabetes Metab Syndr</source>
          <year>2023</year>
          <volume>17</volume>
          <issue>4</issue>
          <fpage>102744</fpage>
          <pub-id pub-id-type="doi">10.1016/j.dsx.2023.102744</pub-id>
          <pub-id pub-id-type="medline">36989584</pub-id>
          <pub-id pub-id-type="pii">S1871-4021(23)00040-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref89">
        <label>89</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lindebaum</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fleming</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT undermines human reflexivity, scientific responsibility and responsible management research</article-title>
          <source>Br J Manag</source>
          <year>2023</year>
          <volume>35</volume>
          <issue>2</issue>
          <fpage>566</fpage>
          <lpage>575</lpage>
          <pub-id pub-id-type="doi">10.1111/1467-8551.12781</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref90">
        <label>90</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heaven</surname>
              <given-names>WD</given-names>
            </name>
          </person-group>
          <article-title>Geoffrey Hinton tells us why he's now scared of the tech he helped build</article-title>
          <source>MIT Technology Review</source>
          <year>2023</year>
          <access-date>2024-10-16</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.technologyreview.com/2023/05/02/1072528/geoffrey-hinton-google-why-scared-ai/">https://www.technologyreview.com/2023/05/02/1072528/geoffrey-hinton-google-why-scared-ai/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref91">
        <label>91</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Mou</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Lai</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Luo</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Language and cultural bias in AI: comparing the performance of large language models developed in different countries on traditional Chinese medicine highlights the need for localized models</article-title>
          <source>J Transl Med</source>
          <year>2024</year>
          <volume>22</volume>
          <issue>1</issue>
          <fpage>319</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://translational-medicine.biomedcentral.com/articles/10.1186/s12967-024-05128-4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s12967-024-05128-4</pub-id>
          <pub-id pub-id-type="medline">38553705</pub-id>
          <pub-id pub-id-type="pii">10.1186/s12967-024-05128-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC10981296</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref92">
        <label>92</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sallam</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT utility in healthcare education, research, and practice: systematic review on the promising perspectives and valid concerns</article-title>
          <source>Healthcare (Basel)</source>
          <year>2023</year>
          <volume>11</volume>
          <issue>6</issue>
          <fpage>887</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=healthcare11060887"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/healthcare11060887</pub-id>
          <pub-id pub-id-type="medline">36981544</pub-id>
          <pub-id pub-id-type="pii">healthcare11060887</pub-id>
          <pub-id pub-id-type="pmcid">PMC10048148</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref93">
        <label>93</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Yeo</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Medical specialty recommendations by an artificial intelligence chatbot on a smartphone: development and deployment</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>e27460</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/5/e27460/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/27460</pub-id>
          <pub-id pub-id-type="medline">33882012</pub-id>
          <pub-id pub-id-type="pii">v23i5e27460</pub-id>
          <pub-id pub-id-type="pmcid">PMC8104000</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref94">
        <label>94</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aljanabi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>ChatGPT: future directions and open possibilities</article-title>
          <source>Mesopotamian J Cybersecur</source>
          <year>2023</year>
          <fpage>16</fpage>
          <lpage>17</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref95">
        <label>95</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Ethical considerations of using ChatGPT in health care</article-title>
          <source>J Med Internet Res</source>
          <year>2023</year>
          <volume>25</volume>
          <fpage>e48009</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2023//e48009/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/48009</pub-id>
          <pub-id pub-id-type="medline">37566454</pub-id>
          <pub-id pub-id-type="pii">v25i1e48009</pub-id>
          <pub-id pub-id-type="pmcid">PMC10457697</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref96">
        <label>96</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zenil</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tegnér</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Abrahão</surname>
              <given-names>FS</given-names>
            </name>
            <name name-style="western">
              <surname>Lavin</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kumar</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Frey</surname>
              <given-names>JG</given-names>
            </name>
            <name name-style="western">
              <surname>Weller</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Soldatova</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bundy</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jennings</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Takahashi</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The future of fundamental science led by generative closed-loop artificial intelligence</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online Jul 9, 2023</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2307.07522</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref97">
        <label>97</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akinci D'Antonoli</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Stanzione</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bluethgen</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Vernuccio</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Ugga</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Klontzas</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Cuocolo</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cannella</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Koçak</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Large language models in radiology: fundamentals, applications, ethical considerations, risks, and future directions</article-title>
          <source>Diagn Interv Radiol</source>
          <year>2024</year>
          <volume>30</volume>
          <issue>2</issue>
          <fpage>80</fpage>
          <lpage>90</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/37789676"/>
          </comment>
          <pub-id pub-id-type="doi">10.4274/dir.2023.232417</pub-id>
          <pub-id pub-id-type="medline">37789676</pub-id>
          <pub-id pub-id-type="pmcid">PMC10916534</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref98">
        <label>98</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schukow</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>SC</given-names>
            </name>
            <name name-style="western">
              <surname>Landgrebe</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Parasuraman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Folaranmi</surname>
              <given-names>OO</given-names>
            </name>
            <name name-style="western">
              <surname>Paner</surname>
              <given-names>GP</given-names>
            </name>
            <name name-style="western">
              <surname>Amin</surname>
              <given-names>MB</given-names>
            </name>
          </person-group>
          <article-title>Application of ChatGPT in routine diagnostic pathology: promises, pitfalls, and potential future directions</article-title>
          <source>Adv Anat Pathol</source>
          <year>2024</year>
          <volume>31</volume>
          <issue>1</issue>
          <fpage>15</fpage>
          <lpage>21</lpage>
          <pub-id pub-id-type="doi">10.1097/PAP.0000000000000406</pub-id>
          <pub-id pub-id-type="medline">37501529</pub-id>
          <pub-id pub-id-type="pii">00125480-990000000-00063</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref99">
        <label>99</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pearman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Young</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Cranor</surname>
              <given-names>LF</given-names>
            </name>
          </person-group>
          <article-title>User-friendly yet rarely read: a case study on the redesign of an online HIPAA authorization</article-title>
          <year>2022</year>
          <conf-name>Proceedings on Privacy Enhancing Technologies</conf-name>
          <conf-date>2022 July 01</conf-date>
          <conf-loc>Virginia</conf-loc>
          <fpage>558</fpage>
          <lpage>581</lpage>
          <pub-id pub-id-type="doi">10.56553/popets-2022-0086</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref100">
        <label>100</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frangoudes</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hadjiaros</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schiza</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Matsangidou</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Tsivitanidou</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Neokleous</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>An overview of the use of chatbots in medical and healthcare education</article-title>
          <year>2021</year>
          <conf-name>International Conference on Human-Computer Interaction</conf-name>
          <conf-date>2021 July 03</conf-date>
          <conf-loc>Cham</conf-loc>
          <publisher-name>Springer International Publishing</publisher-name>
          <fpage>170</fpage>
          <lpage>184</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-77943-6_11</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref101">
        <label>101</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nicolescu</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tudorache</surname>
              <given-names>MT</given-names>
            </name>
          </person-group>
          <article-title>Human-computer interaction in customer service: the experience with AI chatbots—a systematic literature review</article-title>
          <source>Electronics</source>
          <year>2022</year>
          <volume>11</volume>
          <issue>10</issue>
          <fpage>1579</fpage>
          <pub-id pub-id-type="doi">10.3390/electronics11101579</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
