<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Bioinform Biotech</journal-id><journal-id journal-id-type="publisher-id">bioinform</journal-id><journal-id journal-id-type="index">19</journal-id><journal-title>JMIR Bioinformatics and Biotechnology</journal-title><abbrev-journal-title>JMIR Bioinform Biotech</abbrev-journal-title><issn pub-type="epub">2563-3570</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v7i1e85212</article-id><article-id pub-id-type="doi">10.2196/85212</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>The AudioGene Translational Dashboard for Diagnosing Autosomal Dominant Nonsyndromic Hearing Loss: Phenotypic Data Visualization and Analysis Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>DeSollar</surname><given-names>Benjamin</given-names></name><degrees>BSE, MSE</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Schaefer</surname><given-names>Nathan</given-names></name><degrees>BSE</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Walls</surname><given-names>Daniel</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Odell</surname><given-names>Amanda M</given-names></name><degrees>MS</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Booth</surname><given-names>Kevin T A</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Azaiez</surname><given-names>Hela</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Schnieders</surname><given-names>Michael</given-names></name><degrees>DSC</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Smith</surname><given-names>Richard J H</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Braun</surname><given-names>Terry</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff7">7</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Casavant</surname><given-names>Thomas</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Electrical and Computer Engineering, University of Iowa</institution><addr-line>103 South Capitol Street, Room 5316</addr-line><addr-line>Iowa City</addr-line><addr-line>IA</addr-line><country>United States</country></aff><aff id="aff2"><institution>Department of Otolaryngology, Head and Neck Surgery, University of Iowa</institution><addr-line>Iowa</addr-line><addr-line>IA</addr-line><country>United States</country></aff><aff id="aff3"><institution>Department of Medical and Molecular Genetics, Indiana University</institution><addr-line>340 West 10th street</addr-line><addr-line>IN</addr-line><country>United States</country></aff><aff id="aff4"><institution>Department of Otolaryngology&#x2014;Head and Neck Surgery, School of Medicine, Indiana University</institution><addr-line>Indianapolis</addr-line><addr-line>IN</addr-line><country>United States</country></aff><aff id="aff5"><institution>Department of Biochemistry and Molecular Biology, University of Iowa</institution><addr-line>Iowa City</addr-line><addr-line>IA</addr-line><country>United States</country></aff><aff id="aff6"><institution>Department of Molecular Physiology and Biophysics, University of Iowa</institution><addr-line>Iowa City</addr-line><addr-line>IA</addr-line><country>United States</country></aff><aff id="aff7"><institution>Department of Biomedical Engineering, University of Iowa</institution><addr-line>Iowa City</addr-line><addr-line>IA</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Hacking</surname><given-names>Sean</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Wu</surname><given-names>Hao</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Sunny</surname><given-names/></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Au</surname><given-names>Chi Lik</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Goar</surname><given-names>Wesley</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Thomas Casavant, PhD, Department of Electrical and Computer Engineering, University of Iowa, 103 South Capitol Street, Room 5316, Iowa City, IA, United States, 1 319-335-5953; <email>tom-casavant@uiowa.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>14</day><month>4</month><year>2026</year></pub-date><volume>7</volume><elocation-id>e85212</elocation-id><history><date date-type="received"><day>03</day><month>10</month><year>2025</year></date><date date-type="rev-recd"><day>12</day><month>03</month><year>2026</year></date><date date-type="accepted"><day>12</day><month>03</month><year>2026</year></date></history><copyright-statement>&#x00A9; Benjamin DeSollar, Nathan Schaefer, Daniel Walls, Amanda M Odell, Kevin T A Booth, Hela Azaiez, Michael Schnieders, Richard J H Smith, Terry Braun, Thomas Casavant. Originally published in JMIR Bioinformatics and Biotechnology (<ext-link ext-link-type="uri" xlink:href="https://bioinform.jmir.org">https://bioinform.jmir.org</ext-link>), 14.4.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Bioinformatics and Biotechnology, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://bioinform.jmir.org/">https://bioinform.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://bioinform.jmir.org/2026/1/e85212"/><abstract><sec><title>Background</title><p>Autosomal dominant nonsyndromic hearing loss (ADNSHL) is highly heterogeneous, with more than 64 genes implicated in its etiology. This complexity limits the diagnostic power of clinical examinations and audiometry alone, while existing computational approaches have achieved only moderate accuracy and often lack interpretability. As precision medicine increasingly emphasizes genotype-phenotype correlations, there is a recognized need for diagnostic tools that provide clinicians with transparent, interpretable outputs.</p></sec><sec><title>Objective</title><p>This study aimed to develop and evaluate the AudioGene Translational Dashboard, an interpretable clinical informatics tool that integrates machine learning models and interactive visualizations to enhance genotype-phenotype correlations and support diagnostic decision-making in ADNSHL.</p></sec><sec sec-type="methods"><title>Methods</title><p>We developed the AudioGene Translational Dashboard, integrating 2 machine learning models (AudioGene version 4 and AudioGene version 9.1) with 6 interactive visualization tools. AudioGene version 4 uses a multi-instance support vector machine classifier for patients with multiple audiograms, while AudioGene version 9.1 combines adaptive boosting, k-nearest neighbors, random forest models, and logistic regression for patients with a single audiogram. Visualizations include audiometric profile plots, audioprofile surfaces, clustering analyses, and data distribution charts designed to facilitate clinical interpretation.</p></sec><sec sec-type="results"><title>Results</title><p>The AudioGene Translational Dashboard was developed to address the &#x201C;70/30&#x201D; phenomenon, indicating a 74% likelihood that the causative gene is among the top 3 predicted genes, thereby providing clinicians with a clear confidence indicator (&#x201C;green flag&#x201D;) or a caution alert (&#x201C;red flag&#x201D;) during diagnosis. While this level of performance is well suited for hypothesis generation, the remaining uncertainty underscores the need for interpretive context in clinical decision-making. Visualization tools enhanced clinicians&#x2019; ability to interpret and correlate phenotypic data with predicted genetic outcomes, improving diagnostic confidence and interpretability.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The AudioGene Translational Dashboard advances clinical informatics in genetic diagnosis of ADNSHL by integrating explainable artificial intelligence with interactive visualizations, enhancing clinical interpretability and diagnostic accuracy. This approach facilitates informed clinical decision-making, highlights the translational potential of genotype-phenotype computational models, and supports precision medicine in hearing loss diagnostics. Future enhancements will target improving class balance and incorporating additional user-customizable features to further optimize clinical applicability.</p></sec></abstract><kwd-group><kwd>autosomal dominant nonsyndromic hearing loss</kwd><kwd>machine learning</kwd><kwd>explainable artificial intelligence</kwd><kwd>clinical decision support systems</kwd><kwd>genotype-phenotype correlation</kwd><kwd>audiometry</kwd><kwd>genetic testing</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Autosomal dominant nonsyndromic hearing loss (ADNSHL) presents a significant genetic diagnostic challenge due to its underlying heterogeneity&#x2014;more than 64 genes are implicated in its etiology [<xref ref-type="bibr" rid="ref1">1</xref>]. Because of this complex genetic landscape, computational tools designed to correlate audiogram profiles (commonly called audioprofiles) with specific genes have achieved only moderate success [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. One such tool, which we developed approximately 15 years ago, is AudioGene. AudioGene uses numerous machine learning (ML) approaches to improve diagnostic precision [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. These approaches include semisupervised support vector machines (SVMs), ensemble models, and hyper-tuning methods. However, challenges such as data imbalance and class sparsity continue to restrict the accuracy of these models.</p><p>Precision medicine harnesses information about the genome of an individual, environment, and lifestyle to guide medical care. With heterogeneous disorders such as ADNSHL, genetic variant interpretation can be challenging, complicating the diagnostic process and impacting patient care. Computational tools may improve the precision and reliability of genetic assessments by capitalizing on genotype-phenotype associations [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Current diagnostic methods for ADNSHL largely rely on clinical examination and audiometry, which do not provide sufficient resolution for the complex genetic landscape of ADNSHL [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. However, with the availability of ML and artificial intelligence&#x2013;driven approaches, there has been a shift toward integrating computational and visualization tools with genetic diagnostics to improve accuracy and predictive power [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>].</p><p>To address these challenges, we have developed the AudioGene Translational Dashboard with the goal of enhancing both the accuracy and interpretability of genetic predictions. A feature of the AudioGene Translational Dashboard is the &#x201C;70/30&#x201D; phenomenon: by integrating the results from both models on a training dataset comprising 3189 audiograms from 1445 patients, we observed that the correct disease-causing gene was predicted within the top 3 predictions 74% of the time, with incorrect predictions accounting for the remaining 26%, hence &#x201C;70/30.&#x201D; This observation signals to health care providers when they can have confidence in the top predictions, serving as a &#x201C;green flag&#x201D; or &#x201C;red flag&#x201D; in the diagnostic process. Having a true positive rate of 70% is beneficial from a research perspective; however, for a diagnostic tool, the remaining 30% represents some risk that necessitates additional interpretative context. By providing this context, the AudioGene Translational Dashboard enables health care providers to weigh their confidence in the predictions, supporting more informed diagnostic decisions.</p><p>The AudioGene Translational Dashboard was introduced into the AudioGene toolset to increase transparency into the &#x201C;black box&#x201D; underlying the models by providing explainable artificial intelligence (XAI) to enhance model interpretability and utility in clinical settings, in line with trends in precision medicine that emphasize the importance of genotype-phenotype associations in improving diagnostic outcomes [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref10">10</xref>].</p></sec><sec id="s1-2"><title>Related Works</title><p>Early attempts to map audiometric phenotypes to their underlying genotypes were spearheaded by AudioGene version 4 (AG4), a semisupervised multi-instance SVM that treats the collection of audiograms for a single patient as a &#x201C;bag&#x201D; and ranks loci according to pair-wise&#x2013;coupled probability estimates [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. Building on this foundation, AudioGene version 9.1 (AG9.1) introduced selective intraensemble data partitioning: training examples are first divided by gene-specific data volume, patient age, and audiogram shape, then modeled with a committee of k-nearest neighbor (KNN), adaptive boosting, and random forest subclassifiers, whose outputs are fused by logistic regression. AG9.1 offers a top-3 accuracy of 77.8%, with a precision of 0.51 and a recall of 0.56, at the cost of introducing a more complex model. We report top-3 accuracy rather than top-1 accuracy because, in the context of gene prioritization for validation sequencing, the cost of excluding the true causative gene is higher than the cost of evaluating a small number of candidate genes. In addition, the top-3 threshold represents a practical trade-off between high confidence in predictions and an acceptable loss of significance when selecting genes for sequence-based validation [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. Both frameworks have improved locus-ranking accuracy for the 23 well-curated ADNSHL genes that account for roughly three-quarters of cases in populations of European ancestry [<xref ref-type="bibr" rid="ref14">14</xref>]. Nonetheless, their predictions can still be difficult to interpret when class imbalance, sparse age coverage, or atypical audiogram morphologies are present.</p><p>Complementary to algorithmic advances, domain-specific visualization has been welcomed as a potentially beneficial tool for clinical use. Audioprofile surfaces (APS) plot 3D trajectories of frequency-specific threshold shift over time, revealing gene-characteristic progression patterns that are not obvious in 2D audiograms [<xref ref-type="bibr" rid="ref15">15</xref>]. Circle-based genome views (eg, Circos [version 0.69-10; Krzywinski, Canada&#x2019;s Michael Smith Genome Sciences Center] enable high-density comparison of structural variation or copy number events [<xref ref-type="bibr" rid="ref16">16</xref>], while integrative genome browsers such as Integrative Genomics Viewer (version 2.19.7; UC San Diego and Broad Institute of MIT and Harvard) allow rapid inspection of read evidence at candidate loci [<xref ref-type="bibr" rid="ref17">17</xref>]. More recent health care dashboards use fuzzy logic overlays and interactive filtering to expose outliers or low-confidence regions directly to end users [<xref ref-type="bibr" rid="ref18">18</xref>]. Despite these advances, few systems combine genotype-prediction engines with audiogram-aware visual contexts; therefore, clinicians must cross-reference separate tools, a workflow that can erode trust in algorithmic suggestions and slow decision-making [<xref ref-type="bibr" rid="ref19">19</xref>]. These limitations reflect shortcomings in how models communicate their reasoning and how results are presented to end users.</p><p>Accordingly, the literature reveals two unmet needs:</p><list list-type="order"><list-item><p>Model transparency&#x2014;while ensemble and semisupervised approaches improve predictive accuracy, they do not inherently communicate <italic>why</italic> a particular gene is ranked highly, especially when training data are imbalanced or noisy.</p></list-item><list-item><p>Unified, clinician-friendly interfaces&#x2014;existing genomic viewers excel at sequence-level detail but lack phenotype-specific visualization; conversely, stand-alone audiogram tools rarely link observed hearing profiles back to the underlying variant evidence.</p></list-item></list><p>The AudioGene Translational Dashboard addresses these gaps by (1) merging the complementary strengths of AG4 and AG9.1 and (2) embedding 6 interactive visual modules&#x2014;APS, 2D audioprofiles, uniform manifold approximation and projection (UMAP) cluster projections, gene count bar charts, region-of-origin pie charts, and age distribution plots&#x2014;around the model output. This hybrid XAI-driven design supports clinicians in validating or questioning the algorithm&#x2019;s &#x201C;70/30&#x201D; confidence observation and thus advances the state of practice in ADNSHL diagnostics.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Tool Overview</title><p>The AudioGene Translational Dashboard integrates 2 ML models to increase diagnostic accuracy for ADNSHL:</p><list list-type="bullet"><list-item><p>AG4&#x2014;it is a multi-instance classifier designed for patients with multiple audiograms that uses a semisupervised SVM and ranks loci based on modified SVM probability outputs [<xref ref-type="bibr" rid="ref4">4</xref>]. It was developed using the Waikato Environment for Knowledge Analysis (version 3.7.2; WekaIO Inc) platform [<xref ref-type="bibr" rid="ref11">11</xref>].</p></list-item><list-item><p>AG9.1&#x2014;it is a single-instance classifier for patients with only 1 audiogram, developed using the scikit-learn library in Python. It comprises multiple submodels: 3 KNNs, 6 adaptive boosting models, 2 random forest models, and a logistic regression module for combining outputs [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref8">8</xref>].</p></list-item></list><p>The AudioGene Translational Dashboard interface provides six distinct visualization tools for health care providers and researchers to interactively assess genetic data and model predictions in real time: (1) audioprofile, a 2D plot displaying the average hearing loss (in dB) over 10 frequencies (125 Hz to 8000 Hz) for each age group, allowing comparison with a patient&#x2019;s hearing loss over time (<xref ref-type="fig" rid="figure1">Figure 1A</xref>); (2) APS, a 3D surface plot depicting gene-specific hearing loss progression over time across frequencies, illustrating age-related changes in decibel loss (<xref ref-type="fig" rid="figure1">Figure 1B</xref>) [<xref ref-type="bibr" rid="ref20">20</xref>]; (3) a region-of-origin pie chart, which displays the geographic origin distribution (eg, Dutch, German, and Chinese) for audiograms associated with each gene; (4) a count bar chart, which illustrates the audiogram count for each gene, highlighting class imbalance challenges (<xref ref-type="fig" rid="figure1">Figure 1C</xref>); (5) spatial analysis and clustering, which shows the cluster position of each gene and a 3D plot of audiograms (clusters are created using the k-means algorithm to partition data into 23 gene-specific groups [<xref ref-type="bibr" rid="ref15">15</xref>]; the 3D plot compresses 11 features [age plus 10 frequencies] into 3 dimensions using the UMAP method (<xref ref-type="fig" rid="figure1">Figure 1D</xref>) [<xref ref-type="bibr" rid="ref21">21</xref>]); and (2) 6) an age distribution scatter plot, which shows the age distribution for each gene within the training dataset, providing context for the predictive model outputs.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Visualization components of the AudioGene Translational Dashboard. (A) Audioprofile for the selected gene (<italic>WFS1</italic>), displaying data from all age ranges along with patient data; (B) audioprofile surface view for the selected gene (<italic>ACTG1</italic>); (C) count bar chart showing the counts of each gene in the training data; and (D) 3D uniform manifold approximation and projection of genetic case data used in the AudioGene Translational Dashboard.(each point represents a classified genetic case, and the color coding corresponds to 1 of the 23 unique clusters identified through different genetic diagnoses).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="bioinform_v7i1e85212_fig01.png"/></fig><p>The first 3 visualization tools were developed to compare patient-specific data to average thresholds for each of the 23 ADNSHL-associated genes. This comparison allows patient audiograms to be contextualized for each gene. The audioprofile visualization shows how a patient&#x2019;s audiogram compares to the expected audiograms associated with each gene.</p><p>The APS adds time as the third axis to provide a 3D rendering of gene-specific audiometric thresholds over time. Audiometric thresholds are represented as a 3D plane, depicting the expected dB loss over time (in years) at each frequency, thereby enabling comparisons between a patient&#x2019;s hearing thresholds and gene-specific expectations.</p><p>The spatial analysis and clustering tool uses a bar chart to show the distribution of each prediction among 23 different clusters. These clusters are created using k-means clustering [<xref ref-type="bibr" rid="ref15">15</xref>]. Additionally, a 3D plot visualizes the audiograms within our data that have a confirmed genetic diagnosis, compressing 11 features into 3 dimensions using UMAP [<xref ref-type="bibr" rid="ref21">21</xref>]. This feature allows users to interact with the bar chart, highlighting corresponding clusters in the 3D plot (<xref ref-type="fig" rid="figure1">Figure 1</xref>). Users can compare their patient&#x2019;s audiogram, represented by a large red dot, with others in the cluster, facilitating the identification of similar audiograms and associated genetic diagnoses.</p><p>By using these 3 visualizations, we aim to either enhance or reduce confidence in the predictions. For example, if the model ranks the <italic>COCH</italic> gene second among the top 3 predictions, health care providers can analyze the APS and spatial analysis tools to determine the degree of correlation with patterns typically associated with the <italic>COCH</italic> gene, potentially increasing confidence in that diagnosis.</p><p>The last 3 visualization tools provide context for the data in the AudioGene dataset. The count bar chart highlights significant class imbalance, showcasing the challenge the model encounters in predicting smaller classes due to underrepresentation. The region-of-origin pie chart and age distribution scatter plot provide additional context about data distribution, allowing health care providers to understand model limitations and adjust diagnostic strategies accordingly.</p><p>The AudioGene Translational Dashboard integrates into the workflow of a clinician as a secondary validation layer. For example, within our clinical workflow, clinicians, genetic counselors, and bioinformaticians review the results of a clinical genetic test, including patient history, family structure, audiograms, and identified variants in hearing loss genes. This team can then inspect a patient&#x2019;s audiometric data relative to the landscape of audiometric data across all genes and patients, considering variance within a gene, rarity or abundance of cases within a cluster, and distance to genetically validated cases.</p></sec><sec id="s2-2"><title>System Design</title><p>The system was designed using the SERN (SQL, Express.js, React.js, and Node.js) stack, which uses a client-server architecture where computationally intensive tasks are performed by the server, deployed in a Docker (version 28.5.1; Docker Inc) container [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. The client is supported by React (version 18.2.0; React Foundation), a JavaScript library facilitating user interactions [<xref ref-type="bibr" rid="ref23">23</xref>]. Data preprocessing used linear interpolation and extrapolation for missing values. The same methods for handling missing values were applied in the ML models [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref8">8</xref>].</p><p>The <italic>Pandas</italic> library in Python was used for data manipulation and analysis [<xref ref-type="bibr" rid="ref8">8</xref>], and visualization libraries such as Plotly were used to create interactive graphs and plots [<xref ref-type="bibr" rid="ref24">24</xref>].</p><p>For more information, please refer to the master&#x2019;s thesis by DeSollar [<xref ref-type="bibr" rid="ref3">3</xref>] and the GitLab repository.</p></sec><sec id="s2-3"><title>Ethical Considerations</title><p>This study was reviewed and approved by the University of Iowa Institutional Review Board (199701065). The institutional review board granted a waiver of informed consent under US federal regulation 45 CFR 46.116(f) (also known as the &#x201C;Common Rule&#x201D;), because, although audiograms were originally collected in clinical and research settings, the dataset used for this study was fully deidentified prior to analysis [<xref ref-type="bibr" rid="ref25">25</xref>]. All procedures adhered to the ethical standards of the institutional and national research committees and to the 1964 Declaration of Helsinki and its later amendments. This paper does not contain any individual&#x2019;s data in any form, including individual details, images, or videos. No compensation was provided to participants, as this study involved secondary analysis of a fully deidentified existing dataset and no participants were directly recruited or enrolled.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Introduction to AudioGene Translational Dashboard</title><p>The AudioGene Translational Dashboard combines advanced ML models with several visualization tools to create a platform that facilitates the prioritization of ADNSHL-associated genes in genetic testing results. Using patient data, the AudioGene Translational Dashboard generates gene rankings and enables auditory scientists and health care providers to explore these predictions interactively through various visualization tools. Gene ranking in phenotype-genotype associations can aid in the interpretation of complex genetic data, thereby providing greater context and confidence in diagnostic decisions [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref15">15</xref>].</p><p>The &#x201C;70/30&#x201D; phenomenon serves as an indicator for health care providers, providing them with the necessary context through visualizations to assess the reliability of the predictions. When the top 3 predictions include the correct gene, health care providers can have greater confidence in proceeding with targeted genetic testing.</p></sec><sec id="s3-2"><title>Case Studies and Clinical Implications</title><p>Several case studies have been carried out to demonstrate the effectiveness of the AudioGene Translational Dashboard in the diagnosis of specific genetic types of ADNSHL [<xref ref-type="bibr" rid="ref3">3</xref>].</p><sec id="s3-2-1"><title>Case 1: <italic>MYO7A</italic> Gene&#x2014;Patient 1 (ID 5)</title><p>In this case study, we analyzed the results from a patient diagnosed with <italic>MYO7A</italic>-related hearing loss. Our dataset included 2 audiograms, which were predicted by AG4 to be associated with <italic>MYO7A</italic>-related, <italic>EYA4</italic>-related, or <italic>WFS1</italic>-related hearing loss (<xref ref-type="fig" rid="figure2">Figure 2A</xref>).</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Application of the AudioGene Translational Dashboard for patient-level gene prediction and visualization. (A) Predictions for patient (ID 5), highlighting the top 3 genes associated with the audiological characteristics observed; (B) audioprofile of MYO7A with the patient&#x2019;s (ID 5) audiograms in red, taken at the ages of 54 and 57 years; (C) audioprofile of EYA4 with the patient&#x2019;s (ID 5) audiograms in red, taken at 54 and 57 years of age; (D) audioprofile of WFS1 with the patient&#x2019;s (ID 5) audiograms in red, taken at the ages of 54 and 57 years; and (E) 3D plot of audiograms in the training set reduced to 3 dimensions for visualization, with genes in cluster 6 colored (genes not in cluster are light gray; patient [ID 5] is the red dot hovered over by the displayed label; and the green arrows point to MYO7A [green dots], the pale green arrows point to EYA4 [pale green dots], the pale yellow arrow points to WFS1 [pale yellow dots], the pink arrow points to COCH [pink dots], and the light green arrow points to KCNQ4 [light green dots]).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="bioinform_v7i1e85212_fig02.png"/></fig><p>Examining these predictions relative to the patient&#x2019;s audiograms, the following observations can be made: (1) <italic>MYO7A&#x2019;s</italic> audioprofile is similar in the low-to-mid frequencies but diverges in the high frequencies (<xref ref-type="fig" rid="figure2">Figure 2B</xref>); (2) <italic>EYA4</italic> displays a comparable shape, but the patient&#x2019;s thresholds are consistently lower than typical values (<xref ref-type="fig" rid="figure2">Figure 2C</xref>); and (3) <italic>WFS1</italic> shares some similarities in the low-to-mid frequencies but diverges in the high frequencies, as observed with <italic>MYO7A</italic> (<xref ref-type="fig" rid="figure2">Figure 2D</xref>).</p><p>The audioprofiles for the 3 genes all show moderate to moderately severe hearing loss thresholds, with either close similarity (&#x003C;5 dB) at several frequencies or similar shapes. Therefore, we can conclude that the correct gene is likely captured within the top 3 predictions.</p></sec><sec id="s3-2-2"><title>Case 2: <italic>MYO6</italic> Gene&#x2014;Patient 2 (ID 2)</title><p>In our second case, we explored the results from a patient (ID 2) previously diagnosed with <italic>MYO6</italic>-related hearing loss. Our dataset contains only 1 audiogram, with gene predictions shown in <xref ref-type="fig" rid="figure3">Figure 3A</xref> by AG9.1.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Application of the AudioGene Translational Dashboard for patient-level gene prediction, audioprofile comparison, and cluster-based visualization. (A) Predictions for the patient (ID 2), with the top 3 genes being WFS1, TECTA, and MYO7A; (B) audioprofile of WFS1 with the patient&#x2019;s (ID 2) audiograms in red, taken at 20 years of age; (C) audioprofile of TECTA with the patient&#x2019;s (ID 2) audiograms in red, taken at the age of 20 years; (D) audioprofile of MYO7A with the patient&#x2019;s (ID 2) audiograms in red, taken at the age of 20 years; and (E) 3D plot of audiograms in the training set reduced to 3 dimensions for visualization, with genes in cluster 18 colored (genes not in the cluster are light gray; patient [ID 2] is the red dot hovered over by the displayed label; and the light blue arrow points to KCNQ4 [light blue dots], the red arrow points to WFS1 [red dots], the purple arrow points to POU4F3 [purple dots], and the brown arrow points to GSDME [brown dots]).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="bioinform_v7i1e85212_fig03.png"/></fig><p>None of the top 3 candidate genes (<italic>WFS1</italic>, <italic>TECTA</italic>, or <italic>MYO7A</italic>) display an audioprofile that closely aligns with the patient&#x2019;s thresholds (<xref ref-type="fig" rid="figure3">Figure 3B&#x2013;D</xref>). This mismatch strongly suggests that the true causative gene (<italic>MYO6</italic>) does not appear among the model&#x2019;s top 3 predictions for this patient.</p><p>From the clustering interface, we observe that the genes closest to the patient&#x2019;s audiogram by the KNN metric (<italic>WFS1</italic>, <italic>EYA4</italic>, and <italic>KCNQ4</italic>) also fail to match the patient&#x2019;s observed audioprofile in any convincing way. Furthermore, <italic>KCNQ4</italic>, <italic>GSDME</italic>, and <italic>POU4F3</italic>, which are noted to have multiple data points near the patient&#x2019;s cluster, likewise show audioprofiles inconsistent with the patient&#x2019;s hearing loss. These factors combine to produce inconclusive predictions in this case. When we then integrated these data with the genetic data&#x2014;which identified no genetic variants in <italic>WFS1</italic>, <italic>TECTA</italic>, or <italic>MYO7A</italic> and confirmed a known variant in <italic>MYO6</italic>&#x2014;we verified that the correct gene was not found among the model&#x2019;s top 3 predictions. This outcome highlights how conflicting audioprofiles and clustering results can indicate that a prediction should be viewed with caution.</p></sec><sec id="s3-2-3"><title>Case 3: <italic>WFS1</italic> Gene&#x2014;Patient 3 (ID 13)</title><p>In our third case, the patient (ID 13) was diagnosed with <italic>WFS1</italic>-related hearing loss and, based on 3 audiograms, was predicted by AG4 to have <italic>TECTA</italic>-related, <italic>WFS1</italic>-related, or <italic>COL11A2</italic>-related hearing loss (<xref ref-type="fig" rid="figure4">Figure 4A</xref>).</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Application of the AudioGene Translational Dashboard for patient-level gene prediction, audiometric profile comparison, and cluster-based visualization. (A) Predictions for the patient (ID 13), with the top 3 genes being TECTA, WFS1, and COL11A2; (B) audioprofile of TECTA with the patient&#x2019;s (ID 13) audiograms in red, taken at the ages of 26, 27, and 29 years; (C) audioprofile of WFS1 with the patient&#x2019;s (ID 13) audiograms in red, taken at the ages of 26, 27, and 29 years; (D) audioprofile of COL11A2 with the patient&#x2019;s (ID 13) audiograms in red, taken at the ages of 26, 27, and 29 years; and (E) 3D plot of audiograms in the training set converted into 3 dimensions, with genes in cluster 20 colored (genes not in cluster are light gray; patient [ID 13] is the red dot hovered over the displayed label; and the light brown arrow points to WFS1 [light brown dots], the green arrow points to MYO7A [green dots], and the brown arrow points to TECTA [brown dots]).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="bioinform_v7i1e85212_fig04.png"/></fig><p>When examining these predictions in relation to the patient&#x2019;s audiograms, the following conclusions emerge regarding why <italic>WFS1</italic> is likely the correct gene and is captured within the top 3 predictions. First, the audioprofile for <italic>TECTA</italic> (<xref ref-type="fig" rid="figure4">Figure 4B</xref>) shows some similarities; however, it does not fully capture the nuanced relationship between low-frequency and high-frequency thresholds observed in the patient&#x2019;s data. Second, <italic>COL11A2</italic> (<xref ref-type="fig" rid="figure4">Figure 4D</xref>) also exhibits differences that diverge from the patient&#x2019;s pattern. Finally, <italic>WFS1</italic> (<xref ref-type="fig" rid="figure4">Figure 4C</xref>) demonstrates an especially close match to the patient&#x2019;s audiometric profile, particularly in the way it mirrors better hearing at the high frequencies relative to the low frequencies. Although one might contend that <italic>TECTA</italic> or <italic>COL11A2</italic> could also be considered candidates based on partial pattern matches, the overall evidence&#x2014;supported by the 3D clustering in <xref ref-type="fig" rid="figure4">Figure 4E</xref>&#x2014;reinforces that <italic>WFS1</italic> provides the best fit.</p><p>Thus, whether one emphasizes the possibility of <italic>TECTA</italic> or <italic>COL11A2</italic> as contenders, the integrated data confirm that the correct gene, <italic>WFS1</italic>, is indeed within the top 3 predictions. This close alignment between the patient&#x2019;s audiometric data and the <italic>WFS1</italic> reference profile, combined with supporting clustering analysis, enhances confidence in the diagnostic utility of the AudioGene Translational Dashboard.</p></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>These studies demonstrate how the model can raise or lower confidence in variant interpretation based on whether the correct genetic cause of ADNSHL appears among the top 3 predicted genes. Cases 1 and 3 illustrate scenarios in which the model successfully includes the causative gene in its top predictions and closely matches the patient&#x2019;s audiometric data, thereby justifying a higher level of trust in the result. In contrast, case 2 underscores how mismatched audioprofiles and inconclusive clustering can reveal when the actual gene of interest is likely missing from the top 3 predictions. The interactive visualizations of the AudioGene Translational Dashboard, such as the APS and spatial analysis tools, remain valuable in identifying gene-specific patterns that align with clinical observations [<xref ref-type="bibr" rid="ref20">20</xref>].</p><p>However, there are important limitations of the AudioGene Translational Dashboard, especially concerning smaller gene classes. The sparsity of data and the lower accuracy of models in these categories can make the AudioGene Translational Dashboard and phenotypic predictions less reliable. However, by presenting visualizations of the data distribution and class imbalance, these limitations become more apparent, allowing data interpretation to be adjusted accordingly [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref4">4</xref>].</p></sec><sec id="s4-2"><title>Conclusions</title><p>The AudioGene Translational Dashboard represents an advancement in the field of genetic diagnostics for ADNSHL. By integrating advanced ML algorithms with interactive visualization tools, the AudioGene Translational Dashboard enhances health care providers&#x2019; ability to interpret genetic data and make more informed diagnostic decisions.</p><p>A central feature of the AudioGene Translational Dashboard is the &#x201C;70/30&#x201D; phenomenon, which provides health care providers with critical context for confidence in genetic predictions. When the top 3 predictions are likely to contain the correct gene, the tool serves as a &#x201C;green flag&#x201D; for health care providers, increasing diagnostic confidence. Conversely, it alerts health care providers when predictions may be less reliable, serving as a &#x201C;red flag&#x201D; and prompting further investigation.</p><p>The AudioGene Translational Dashboard is an example of XAI in clinical settings, offering a context-driven method with increased transparency for the diagnosis of ADNSHL. Future developments will focus on incorporating custom model building, enhancing class imbalance functionality, and implementing user suggestions. The AudioGene Translational Dashboard not only advances genetic diagnostics for hearing loss but also serves as an example of a hybrid ML system.</p></sec></sec></body><back><notes><sec><title>Funding</title><p>This research was supported by the National Institutes of Health and the National Institute on Deafness and Other Communication Disorders through grants DC002842, DC012049, and DC017955. These funding sources provided financial support for the development and testing of the AudioGene Translational Dashboard tool, contributing to advancements in machine learning and visualization methodologies for the diagnosis of autosomal dominant nonsyndromic hearing loss.</p></sec><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are not publicly available but are available from the corresponding author on reasonable request. The source code for the AudioGene Translational Dashboard is publicly available [<xref ref-type="bibr" rid="ref26">26</xref>] under the GNU General Public License version 3.0 or later.</p></sec></notes><fn-group><fn fn-type="con"><p>BD led the design and development of the AudioGene Translational Dashboard, implemented the machine learning models, and drafted the manuscript. NS contributed to the integration of visualization tools and assisted with manuscript preparation. DW and AMO were responsible for data acquisition and preprocessing and contributed to tool validation. KTAB and HA provided expertise in genetic diagnostics and guided the tool&#x2019;s clinical relevance. MS supported statistical analysis and interpretation of the results. RJHS and TB provided project oversight, secured funding, and critically revised the manuscript. TC supervised the software engineering components and contributed to system architecture design. All authors reviewed and approved the final manuscript.</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">ADNSHL</term><def><p>autosomal dominant nonsyndromic hearing loss</p></def></def-item><def-item><term id="abb2">AG4</term><def><p>AudioGene version 4</p></def></def-item><def-item><term id="abb3">AG9.1</term><def><p>AudioGene version 9.1</p></def></def-item><def-item><term id="abb4">APS</term><def><p>audioprofile surface</p></def></def-item><def-item><term id="abb5">KNN</term><def><p>k-nearest neighbor</p></def></def-item><def-item><term id="abb6">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb7">SERN</term><def><p>SQL, Express.js, React.js, and Node.js</p></def></def-item><def-item><term id="abb8">SVM</term><def><p>support vector machine</p></def></def-item><def-item><term id="abb9">UMAP</term><def><p>uniform manifold approximation and projection</p></def></def-item><def-item><term id="abb10">XAI</term><def><p>explainable artificial intelligence</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Walls</surname><given-names>WD</given-names> </name><name name-style="western"><surname>Azaiez</surname><given-names>H</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>RJ</given-names> </name></person-group><source>Hereditary Hearing Loss Homepage</source><access-date>2026-03-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://hereditaryhearingloss.org">https://hereditaryhearingloss.org</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Taylor</surname><given-names>KR</given-names> </name><name name-style="western"><surname>Deluca</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Shearer</surname><given-names>AE</given-names> </name><etal/></person-group><article-title>AudioGene: predicting hearing loss genotypes from phenotypes to guide genetic screening</article-title><source>Hum Mutat</source><year>2013</year><month>04</month><volume>34</volume><issue>4</issue><fpage>539</fpage><lpage>545</lpage><pub-id pub-id-type="doi">10.1002/humu.22268</pub-id><pub-id pub-id-type="medline">23280582</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="thesis"><person-group person-group-type="author"><name name-style="western"><surname>DeSollar</surname><given-names>BR</given-names> </name></person-group><article-title>AGTD - The AudioGene Translational Dashboard: a hybrid machine learning and visualization interface for genetic diagnosis of autosomal dominant non-syndromic hearing loss [Master&#x2019;s thesis]</article-title><year>2024</year><access-date>2026-03-28</access-date><publisher-name>University of Iowa</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://iro.uiowa.edu/esploro/outputs/graduate/9984647256502771">https://iro.uiowa.edu/esploro/outputs/graduate/9984647256502771</ext-link></comment></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="thesis"><person-group person-group-type="author"><name name-style="western"><surname>Ryan</surname><given-names>S</given-names> </name></person-group><article-title>Machine learning prediction of genetic hearing loss via selective intraensemble data partitioning [Master&#x2019;s thesis]</article-title><year>2024</year><access-date>2026-03-28</access-date><publisher-name>University of Iowa</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://iro.uiowa.edu/esploro/outputs/graduate/Machine-learning-prediction-of-genetic-hearing/9984647557802771">https://iro.uiowa.edu/esploro/outputs/graduate/Machine-learning-prediction-of-genetic-hearing/9984647557802771</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="thesis"><person-group person-group-type="author"><name name-style="western"><surname>Nwakama</surname><given-names>CC</given-names> </name></person-group><article-title>AudioGene 9.0: novel ensemble machine learning classification of 23 classes of autosomal non-syndromic hearing loss (deafness) [Master&#x2019;s thesis]</article-title><year>2021</year><access-date>2026-03-28</access-date><publisher-name>University of Iowa</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://iro.uiowa.edu/view/pdfCoverPage?instCode=01IOWA_INST&#x0026;filePid=13841170450002771&#x0026;download=true">https://iro.uiowa.edu/view/pdfCoverPage?instCode=01IOWA_INST&#x0026;filePid=13841170450002771&#x0026;download=true</ext-link></comment></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gunning</surname><given-names>D</given-names> </name><name name-style="western"><surname>Stefik</surname><given-names>M</given-names> </name><name name-style="western"><surname>Choi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Miller</surname><given-names>T</given-names> </name><name name-style="western"><surname>Stumpf</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>GZ</given-names> </name></person-group><article-title>XAI-Explainable artificial intelligence</article-title><source>Sci Robot</source><year>2019</year><month>12</month><day>18</day><volume>4</volume><issue>37</issue><fpage>eaay7120</fpage><pub-id pub-id-type="doi">10.1126/scirobotics.aay7120</pub-id><pub-id pub-id-type="medline">33137719</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smith</surname><given-names>RJH</given-names> </name><name name-style="western"><surname>Bale</surname><given-names>JF</given-names>  <suffix>Jr</suffix></name><name name-style="western"><surname>White</surname><given-names>KR</given-names> </name></person-group><article-title>Sensorineural hearing loss in children</article-title><source>Lancet</source><year>2005</year><month>03</month><volume>365</volume><issue>9462</issue><fpage>879</fpage><lpage>890</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(05)71047-3</pub-id><pub-id pub-id-type="medline">15752533</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Venkatesh</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Moorchung</surname><given-names>N</given-names> </name><name name-style="western"><surname>Puri</surname><given-names>B</given-names> </name></person-group><article-title>Genetics of non syndromic hearing loss</article-title><source>Med J Armed Forces India</source><year>2015</year><month>10</month><volume>71</volume><issue>4</issue><fpage>363</fpage><lpage>368</lpage><pub-id pub-id-type="doi">10.1016/j.mjafi.2015.07.003</pub-id><pub-id pub-id-type="medline">26663965</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="web"><article-title>API reference&#x2014;Pandas 1.5.3 documentation</article-title><source>Pandas</source><access-date>2026-03-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://pandas.pydata.org/pandas-docs/version/1.5/reference/index.html">https://pandas.pydata.org/pandas-docs/version/1.5/reference/index.html</ext-link></comment></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Albarrak</surname><given-names>AM</given-names> </name></person-group><article-title>Improving the trustworthiness of interactive visualization tools for healthcare data through a medical fuzzy expert system</article-title><source>Diagnostics (Basel)</source><year>2023</year><month>05</month><day>13</day><volume>13</volume><issue>10</issue><fpage>1733</fpage><pub-id pub-id-type="doi">10.3390/diagnostics13101733</pub-id><pub-id pub-id-type="medline">37238218</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Frank</surname><given-names>E</given-names> </name><name name-style="western"><surname>Hall</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Witten</surname><given-names>IH</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Witten</surname><given-names>IH</given-names> </name><name name-style="western"><surname>Frank</surname><given-names>E</given-names> </name><name name-style="western"><surname>Hall</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Pal</surname><given-names>CJ</given-names> </name></person-group><article-title>The WEKA workbench</article-title><source>Data Mining: Practical Machine Learning Tools and Techniques</source><year>2016</year><edition>4</edition><publisher-name>Morgan Kaufmann</publisher-name></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Deza</surname><given-names>E</given-names> </name><name name-style="western"><surname>Deza</surname><given-names>MM</given-names> </name></person-group><source>Encyclopedia of Distances</source><year>2009</year><publisher-name>Springer</publisher-name></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Krzywinski</surname><given-names>M</given-names> </name><name name-style="western"><surname>Schein</surname><given-names>J</given-names> </name><name name-style="western"><surname>Birol</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Circos: an information aesthetic for comparative genomics</article-title><source>Genome Res</source><year>2009</year><month>09</month><volume>19</volume><issue>9</issue><fpage>1639</fpage><lpage>1645</lpage><pub-id pub-id-type="doi">10.1101/gr.092759.109</pub-id><pub-id pub-id-type="medline">19541911</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cover</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hart</surname><given-names>P</given-names> </name></person-group><article-title>Nearest neighbor pattern classification</article-title><source>IEEE Trans Inform Theory</source><year>1967</year><volume>13</volume><issue>1</issue><fpage>21</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.1109/TIT.1967.1053964</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>MacQueen</surname><given-names>J</given-names> </name></person-group><article-title>Some methods for classification and analysis of multivariate observations</article-title><source>Berkeley Symposium on Mathematical Statistics and Probability</source><year>1967</year><publisher-name>University of California Press</publisher-name><fpage>281</fpage><lpage>297</lpage></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thorvaldsd&#x00F3;ttir</surname><given-names>H</given-names> </name><name name-style="western"><surname>Robinson</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Mesirov</surname><given-names>JP</given-names> </name></person-group><article-title>Integrative Genomics Viewer (IGV): high-performance genomics data visualization and exploration</article-title><source>Brief Bioinform</source><year>2013</year><month>03</month><volume>14</volume><issue>2</issue><fpage>178</fpage><lpage>192</lpage><pub-id pub-id-type="doi">10.1093/bib/bbs017</pub-id><pub-id pub-id-type="medline">22517427</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>TF</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>CJ</given-names> </name><name name-style="western"><surname>Weng</surname><given-names>RC</given-names> </name></person-group><article-title>Probability estimates for multi-class classification by pairwise coupling</article-title><source>J Mach Learn Res</source><year>2004</year><access-date>2026-03-28</access-date><volume>5</volume><fpage>975</fpage><lpage>1005</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.jmlr.org/papers/volume5/wu04a/wu04a.pdf">https://www.jmlr.org/papers/volume5/wu04a/wu04a.pdf</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weininger</surname><given-names>O</given-names> </name><name name-style="western"><surname>Warnecke</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lesinski-Schiedat</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lenarz</surname><given-names>T</given-names> </name><name name-style="western"><surname>Stolle</surname><given-names>S</given-names> </name></person-group><article-title>Computational analysis based on audioprofiles: a new possibility for patient stratification in office-based otology</article-title><source>Audiol Res</source><year>2019</year><month>09</month><day>2</day><volume>9</volume><issue>2</issue><fpage>230</fpage><pub-id pub-id-type="doi">10.4081/audiores.2019.230</pub-id><pub-id pub-id-type="medline">31728177</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Merkel</surname><given-names>D</given-names> </name></person-group><article-title>Docker: lightweight Linux containers for consistent development and deployment</article-title><source>Linux J</source><year>2014</year><volume>2014</volume><issue>239</issue><fpage>2</fpage><pub-id pub-id-type="doi">10.5555/2600239.2600241</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Taylor</surname><given-names>KR</given-names> </name><name name-style="western"><surname>Booth</surname><given-names>KT</given-names> </name><name name-style="western"><surname>Azaiez</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Audioprofile surfaces: the 21st century audiogram</article-title><source>Ann Otol Rhinol Laryngol</source><year>2016</year><month>05</month><volume>125</volume><issue>5</issue><fpage>361</fpage><lpage>368</lpage><pub-id pub-id-type="doi">10.1177/0003489415614863</pub-id><pub-id pub-id-type="medline">26530094</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McInnes</surname><given-names>L</given-names> </name><name name-style="western"><surname>Healy</surname><given-names>J</given-names> </name><name name-style="western"><surname>Saul</surname><given-names>N</given-names> </name><name name-style="western"><surname>Gro&#x00DF;berger</surname><given-names>L</given-names> </name></person-group><article-title>UMAP: Uniform Manifold Approximation and Projection</article-title><source>J Open Source Softw</source><year>2018</year><volume>3</volume><issue>29</issue><fpage>861</fpage><pub-id pub-id-type="doi">10.21105/joss.00861</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="web"><article-title>Docker Compose</article-title><source>Docker Docs</source><access-date>2026-03-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://docs.docker.com/compose">https://docs.docker.com/compose</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="web"><source>React</source><access-date>2026-03-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://reactjs.org">https://reactjs.org</ext-link></comment></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><article-title>Plotly JavaScript open source graphing library</article-title><source>Plotly</source><access-date>2026-03-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://plotly.com/javascript">https://plotly.com/javascript</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="web"><article-title>45 CFR &#x00A7;46.116 - General requirements for informed consent</article-title><source>Code of Federal Regulations</source><year>2018</year><access-date>2026-04-10</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ecfr.gov/current/title-45/subtitle-A/subchapter-A/part-46/subpart-A/section-46.116">https://www.ecfr.gov/current/title-45/subtitle-A/subchapter-A/part-46/subpart-A/section-46.116</ext-link></comment></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Schaefer</surname><given-names>N</given-names> </name></person-group><article-title>AudioGene</article-title><access-date>2026-04-07</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://research-git.uiowa.edu/morl/audiogene/website/AudioGene">https://research-git.uiowa.edu/morl/audiogene/website/AudioGene</ext-link></comment></nlm-citation></ref></ref-list></back></article>