<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD JATS (Z39.96) Journal Archiving and Interchange DTD v1.2 20190208//EN" "http://jats.nlm.nih.gov/archiving/1.2/JATS-archivearticle1.dtd">
<article article-type="brief-report" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-title-group>
        <journal-title>microPublication Biology</journal-title>
      </journal-title-group>
      <issn pub-type="epub">2578-9430</issn>
      <publisher>
        <publisher-name>Caltech Library</publisher-name>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="doi">10.17912/micropub.biology.001926</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>new finding</subject>
        </subj-group>
        <subj-group subj-group-type="heading">
          <subject>methodology</subject>
        </subj-group>
        <subj-group subj-group-type="subject">
          <subject>computational biology</subject>
        </subj-group>
        <subj-group subj-group-type="subject">
          <subject>methods</subject>
        </subj-group>
        <subj-group subj-group-type="species">
          <subject>universal</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Variable deep learning training horizons reveal the temporal complexity of biological systems</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="author" equal-contrib="yes">
          <name>
            <surname>Chiu</surname>
            <given-names>Po-Hao</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/onceptualization">Conceptualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation">Data curation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis">Formal analysis</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation">Investigation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology">Methodology</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation">Validation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization">Visualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing - original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft">Writing - original draft</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing - review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing-review-editing">Writing - review &amp; editing</role>
          <xref ref-type="aff" rid="aff1">1</xref>
        </contrib>
        <contrib contrib-type="author" equal-contrib="yes">
          <name>
            <surname>Evarts</surname>
            <given-names>Jacob I</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/onceptualization">Conceptualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation">Data curation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis">Formal analysis</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation">Investigation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology">Methodology</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation">Validation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization">Visualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing - original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft">Writing - original draft</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing - review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing-review-editing">Writing - review &amp; editing</role>
          <xref ref-type="aff" rid="aff2">2</xref>
        </contrib>
        <contrib contrib-type="author" equal-contrib="yes">
          <name>
            <surname>Feng</surname>
            <given-names>Patrick</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/onceptualization">Conceptualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Data curation" vocab-term-identifier="https://credit.niso.org/contributor-roles/data-curation">Data curation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis">Formal analysis</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation">Investigation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Methodology" vocab-term-identifier="https://credit.niso.org/contributor-roles/methodology">Methodology</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Validation" vocab-term-identifier="https://credit.niso.org/contributor-roles/validation">Validation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization">Visualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing - original draft" vocab-term-identifier="https://credit.niso.org/contributor-roles/writing-original-draft">Writing - original draft</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing - review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing-review-editing">Writing - review &amp; editing</role>
          <xref ref-type="aff" rid="aff2">2</xref>
        </contrib>
        <contrib contrib-type="author">
          <name>
            <surname>Bagheri</surname>
            <given-names>Neda</given-names>
          </name>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Conceptualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/onceptualization">Conceptualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Formal analysis" vocab-term-identifier="https://credit.niso.org/contributor-roles/formal-analysis">Formal analysis</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Funding acquisition" vocab-term-identifier="https://credit.niso.org/contributor-roles/funding-acquisition">Funding acquisition</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Investigation" vocab-term-identifier="https://credit.niso.org/contributor-roles/investigation">Investigation</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Project administration" vocab-term-identifier="https://credit.niso.org/contributor-roles/project-administration">Project administration</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Supervision" vocab-term-identifier="https://credit.niso.org/contributor-roles/supervision">Supervision</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Visualization" vocab-term-identifier="https://credit.niso.org/contributor-roles/visualization">Visualization</role>
          <role vocab="credit" vocab-identifier="https://credit.niso.org/" vocab-term="Writing - review &amp; editing" vocab-term-identifier="https://credit.niso.org/contributor-roles/Writing-review-editing">Writing - review &amp; editing</role>
          <xref ref-type="aff" rid="aff2">2</xref>
          <xref ref-type="aff" rid="aff1">1</xref>
          <xref ref-type="corresp" rid="cor1">§</xref>
        </contrib>
        <aff id="aff1">
          <label>1</label>
          Chemical Engineering, University of Washington, Seattle, Washington, United States
        </aff>
        <aff id="aff2">
          <label>2</label>
          Biology, University of Washington, Seattle, Washington, United States
        </aff>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <anonymous/>
        </contrib>
      </contrib-group>
      <author-notes>
        <corresp id="cor1">
          <label>§</label>
          Correspondence to: Neda Bagheri (
          <email>nbagheri@uw.edu</email>
          )
        </corresp>
        <fn fn-type="coi-statement">
          <p>The authors declare that there are no conflicts of interest present.</p>
        </fn>
      </author-notes>
      <pub-date date-type="pub" publication-format="electronic">
        <day>18</day>
        <month>2</month>
        <year>2026</year>
      </pub-date>
      <pub-date date-type="collection" publication-format="electronic">
        <year>2026</year>
      </pub-date>
      <volume>2026</volume>
      <elocation-id>10.17912/micropub.biology.001926</elocation-id>
      <history>
        <date date-type="received">
          <day>30</day>
          <month>10</month>
          <year>2025</year>
        </date>
        <date date-type="rev-recd">
          <day>9</day>
          <month>2</month>
          <year>2026</year>
        </date>
        <date date-type="accepted">
          <day>13</day>
          <month>2</month>
          <year>2026</year>
        </date>
      </history>
      <permissions>
        <copyright-statement>Copyright: © 2026 by the authors</copyright-statement>
        <copyright-year>2026</copyright-year>
        <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
          <license-p>This is an open-access article distributed under the terms of the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited.</license-p>
        </license>
      </permissions>
      <abstract>
        <p>
          The increasing quantity of time-series images presents new opportunities for extracting biological insights from data. Here, we introduce a deep learning framework with a variable input sequence length to predict cell and colony morphologies. We apply this framework to 
          <italic>in silico</italic>
           and 
          <italic>in vitro</italic>
           microscopy datasets, evaluating the impact of temporal data on performance. We find that while performance increases monotonically with increasing 
          <italic>in silico</italic>
           training data, performance is varied in the 
          <italic>in vitro</italic>
           case studies. The varying results reflect the intrinsic challenges stochastic, complex biological systems pose to data-driven modeling, and offer a new method through which we can identify biological transition points using temporal dynamics.
        </p>
      </abstract>
      <funding-group>
        <funding-statement>This work was supported by the Washington Research Foundation (N.B.) and the National Science Foundation Graduate Research Fellowship grant No. DGE-2140004 (J.I.E.). </funding-statement>
      </funding-group>
    </article-meta>
  </front>
  <body>
    <fig position="anchor" id="f1">
      <label>Figure 1. FNOs trained on varying temporal input horizons exhibit different performance outcomes</label>
      <caption>
        <p>
          (
          <bold>a</bold>
          ) The deep learning pipeline enables interrogation of the impact of the training window horizon on prediction accuracy. An ARCADE simulation sequence is shown with gradient colors that correspond with tumor age. A variable input window W
          <sub>i</sub>
           is passed through an FNO that is trained to predict the final timepoint of the time series, which we term the prediction horizon P.  (
          <bold>b</bold>
          ) FNO predictions based on increasing training horizons are shown for three of 500 samples of the in silico tumor simulation data. The FNO predicts images at the 15th time point based on an input window W
          <sub>i</sub>
           where 1≤i≤14. The leftmost column shows the ground truth image highlighted in yellow, and each subsequent column shows the predicted image given a window length W
          <sub>i</sub>
          . Each row corresponds to a unique sample.  (
          <bold>c</bold>
          ) The test loss (quantified by the mean squared error, MSE) of 
          <italic>in silico</italic>
           simulation data is shown as a function of training window size. Results highlight nearly monotonic improvement of performance with increased training window horizon. The image in the bottom left corner is the ground truth of sample 1 in panel b (highlighted in yellow), and notable predictions are shown for select window horizons.  (
          <bold>d</bold>
          ) FNO predictions based on increasing training horizons are shown for three of 133 samples of the in vitro tumor spheroid data. The FNO predicts images at the 12th time point based on an input window W
          <sub>i</sub>
           where 1≤i≤11. The leftmost column shows the ground truth image highlighted in yellow, and each subsequent column shows the predicted image given a window length W
          <sub>i</sub>
          . Each row corresponds to a unique sample.  (
          <bold>e</bold>
          ) The test loss (MSE) of 
          <italic>in vitro</italic>
           spheroid images is shown as a function of training window size. Unlike the simulated tumor case study, there are considerable fluctuations in FNO performance even with an increase in window horizon and temporal information. The image in the bottom left corner is the ground truth of sample 133 in panel d (highlighted in yellow), and notable predictions are shown for select window horizons.  (
          <bold>f</bold>
          ) FNO predictions based on increasing training horizons are shown for three of 77 samples of the in vitro tumor cell death data. The FNO predicts images at the 16th time point based on an input window W
          <sub>i</sub>
           where 1≤i≤15. The leftmost column shows the ground truth image highlighted in yellow, and each subsequent column shows the predicted image given a window length W
          <sub>i</sub>
          . Each row corresponds to a unique sample.  (
          <bold>g</bold>
          ) The test loss (MSE) of individual cell death images is shown as a function of training window size. Results are as variable as the other in vitro dataset. However, the spikes in test loss seem to derive from noisy backgrounds which contrast with the pure black of the ground truth. Excluding those poorly performing models, the loss curve is notably more monotonic than the cultured spheroid one. The image in the bottom left corner is the ground truth (highlighted in yellow) and notable predictions are shown for select window horizons.
        </p>
      </caption>
      <graphic xlink:href="25789430-2026-micropub.biology.001926"/>
    </fig>
    <sec>
      <title>Description</title>
      <p>Biological imaging techniques have achieved a high degree of spatial and temporal resolution, situating them as an important data source alongside ’omics data for biological discovery (Bagheri et al., 2022; Kuhn Cuellar et al., 2022). This increase in high-resolution temporal imaging introduces new opportunities and challenges for analysis to extract insights from morphological dynamics. Machine learning shows promise in going beyond feature extraction towards learning the underlying rules governing a system (Soelistyo et al., 2022; Rotem et al., 2024). A better understanding of how temporal dynamics affect the performance of machine learning models can help uncover the timing of biological processes (Bao et al., 2025; Toulany et al., 2023) and outline the performance-cost tradeoff curve for generating high temporal resolution data (Cain et al., 2024; Aceituno et al., 2025). Here, we develop a deep learning model to forecast future morphological states of three biological systems from time-series images using input sequences of varying lengths to probe how much temporal information is needed to make accurate predictions about future states. The three systems include: (i) simulated tumors generated by an agent-based model, (ii) cultured tumor spheroids, and (iii) individual tumor cell death dynamics.</p>
      <p>
        Fourier neural operators (FNOs) are a unique neural network architecture developed for learning dynamics of systems governed by smooth, continuous processes, and have found success in emulating scientific partial differential equations (PDEs) (Kovachki et al., 2021). Although FNOs have shown strong performance in forecasting time-series across physical systems (Kurth et al., 2023; Li et al., 2023; Long et al., 2024), their sensitivity to temporal context, particularly with biological image data, remains an open question that impacts if and how one might characterize time-dependent biological processes. In this work, we train FNOs to predict a future state of the system (the final timepoint of the dataset), which we term the prediction horizon (P), from a preceding image sequence of variable length, W
        <sub>i</sub>
        , the training horizon, where i refers to the number of time points in the training window (Fig. 1a). We hypothesized that prediction performance would increase as a function of the training window.
      </p>
      <p>
        As expected, FNO performance increases monotonically with training window size in the 
        <italic>in silico</italic>
         case study (Fig. 1b, c). Models trained on the simulated tumor dataset accurately predict general emergent properties, such as tumor size and location; however, more biologically relevant characteristics only appear when the model is trained with greater values of W
        <sub>i</sub>
        . Specifically, for W
        <sub>i</sub>
        ≤6, there is no qualitative or quantitative (measured by test loss) improvement. When W
        <sub>6&lt;i≤9</sub>
        , accurate tumor borders begin to appear. Between W
        <sub>9</sub>
         and W
        <sub>10</sub>
        , the test loss decreases significantly and a clear accurate shape for the tumor emerges. In cases where W
        <sub>i</sub>
        &gt;10, the predicted tumor has clear borders and the shape becomes increasingly precise with increasing W
        <sub>i</sub>
        .
      </p>
      <p>
        When the pipeline is applied to the tumor spheroid dataset, a different trend emerges. The models trained on the 
        <italic>in vitro</italic>
         spheroids exhibit highly variant accuracy (Fig. 1d, e). Qualitatively, predictions with W
        <sub>i&gt;6</sub>
         capture finer boundary details and steadily decrease in test loss as the window size increases. However, the prediction with W
        <sub>9</sub>
         deviates from this trend, exhibiting an unusually high loss. We believe this variation in performance is due to the relatively short time-scale of the image dataset and the lack of substantial morphological changes over that period. Despite variations in the loss function, a qualitative assessment of performance points to consistent improvement with increasing W
        <sub>i</sub>
        , suggesting that pixel-wise loss may not be a robust metric due to non-biological variations in the background of the predicted images. Nevertheless, the FNO seems to be learning increasingly more biological features and morphological characteristics of 
        <italic>in vitro</italic>
         tumors with increasing W
        <sub>i</sub>
        .
      </p>
      <p>
        Having assessed performance in two distinct tumor spheroid datasets, we evaluated the impact of the training window on a cancer cell death time-lapse dataset with a finer temporal resolution. This system features a distinct biological event (cell death) that is characterized by a rapid condensing of fluoresced materials. Quantitatively, the test loss (Fig. 1f, g), exhibits non-monotonicity similar to the tumor spheroid dataset. Despite fluctuations in model performance, the FNO appears to improve as the training horizon increases. The primary inconsistencies in the loss curve qualitatively align with poor prediction of the image background, rather than in the prediction of biologically relevant dynamics. Qualitatively, results indicate that prediction accuracy for biologically relevant dynamics improves with increasing W
        <sub>i</sub>
        , as the FNO captures more accurate cell morphologies. This trend demonstrates that learning temporal dynamics is highly dependent on biological variance and technical noise, and that qualitative validation should be considered in concert with quantitative metrics.
      </p>
      <p>In this paper, we introduce a variable training horizon pipeline using FNOs to investigate the complexity of temporal trends of several biological systems using imaging data. Our results reveal a contrast between simulated and real-world biological dynamics. The simulated tumor growth model, governed by smooth dynamics, showed a clear monotonic improvement in prediction accuracy as more temporal information was provided. However, in both the tumor spheroid and tumor cell death dynamics datasets, increasing the temporal information shows variant, non-monotonic performance landscapes. Factors that could limit the performance of data-driven models include ineffective sampling of critical transitions, biological “switches”, and timescales where dynamics can be obscured by biological stochasticity and/or technical noise. These findings align with recent work suggesting that the loss landscape for auto-regressive prediction in dynamic systems can be surprisingly uneven (Aceituno et al., 2025), which we find to be exacerbated by the nonlinear dynamics inherent in biological processes. However, models trained on denoised microscopy images obtained by image processing methods (e.g., cell segmentation) produce smoother performance landscapes despite occasional outliers. Our results highlight both a challenge and an opportunity: while data-driven methods struggle with the unpredictability of biological systems, the model failure points can be used to identify key biological transitions.</p>
    </sec>
    <sec>
      <title>Methods</title>
      <p>
        <bold>
          <italic>In silico</italic>
           tumor microenvironment simulations.
        </bold>
         Data was generated using ARCADE, an agent-based model (ABM) characterizing tumor growth in a heterogeneous and dynamic vascular microenvironment. We simulated 500 tumors under varying microenvironmental conditions over 15 days, capturing a snapshot of the 
        <italic>in silico</italic>
         2D-slice of tissue every 24 hours. The simulations used for this study are similar to those described in (Yu and Bagheri, 2020). 
      </p>
      <p>
        <bold>
          <italic>In vitro</italic>
           cancer spheroid microscopy images.
        </bold>
         Time-lapse images of tumor spheroids monitor the effects of interface stiffness on the invasiveness of tumor development (Thi Kim Ngan Ngo, 2022). This dataset contains time-lapse microscopy of 133 tumor spheroids cultured under varying extracellular matrix stiffness and surface topographies. We trained models on snapshots of these tumors taken every 2 hours over a period of 24 hours.
      </p>
      <p>
        <bold>
          <italic>In vitro</italic>
           cell death microscopy images
        </bold>
        . A LNCaP cell line was treated with doxorubicin, a cell death–inducing compound, and time-lapse images monitored the impact of this treatment (Vicar et al., 2020). The dataset we used spans 24 hours with a frame rate 1 frame per 3 minutes, and a spatial resolution of 1.59 px/μm. We employed StarDist (Schmidt et al., 2018), a deep learning-based cell segmentation model, to identify individual cell locations. We tracked individual cell’s location across time frames through a Kalman tracker. The timing of the cell death event was determined from the fluorescence intensity over time frames where a sharp increase indicates DNA condensation during cell death. We used 1 to 15 time frames prior to the cell death event for the window horizon input and predicted the system morphology 5 frames after the cell death.
      </p>
      <p>
        <bold>Spatiotemporal FNO model architecture</bold>
        . The FNO model was developed with a 3D architecture—2 spatial dimensions and 1 temporal dimension—to effectively capture the spatiotemporal dynamics of tumor emergence. We implemented the FNO using the neuraloperator library (Kovachki et al., 2021; Kossaifi et al., 2024), which uses the PyTorch library (Paszke et al., 2019). Hyperparameters were defined as: modes=(24, 24, 8), layers=8, width=32. Datasets were partitioned into model training (80%), validation (10%), and test (10%) sets. All images were center-cropped and down-sampled to a fixed spatial resolution of 128 × 128. Model training minimized the H1 loss using the Adam optimizer (Kingma and Ba, 2017).
      </p>
      <p>
        <bold>Data and code availability.</bold>
         All datasets and code are open and accessible. The deep learning model source code is available on Zenodo at 10.5281/zenodo.17478675. The ARCADE ABM v2.4 source code (used to generate the synthetic tumor images) is available on Zenodo at 10.5281/zenodo.10622155. The tumor spheroid and cell death datasets were previously published (Thi Kim Ngan Ngo, 2022; Vicar et al., 2020).
      </p>
    </sec>
  </body>
  <back>
    <ack>
      <sec>
        <p>The authors would like to thank Bagheri Lab members for their invaluable feedback.</p>
      </sec>
    </ack>
    <ref-list>
      <ref id="R1">
        <element-citation publication-type="Preprint">
          <person-group person-group-type="author">
            <name>
              <surname>Aceituno</surname>
              <given-names>Pau Vilimelis</given-names>
            </name>
            <name>
              <surname>Miller</surname>
              <given-names>Jack William</given-names>
            </name>
            <name>
              <surname>Marti</surname>
              <given-names>Noah</given-names>
            </name>
            <name>
              <surname>Farag</surname>
              <given-names>Youssef</given-names>
            </name>
            <name>
              <surname>Boussange</surname>
              <given-names>Victor</given-names>
            </name>
          </person-group>
          <year>2025</year>
          <article-title>Temporal horizons in forecasting: a performance-learnability trade-off</article-title>
          <pub-id pub-id-type="doi">10.48550/arxiv.2506.03889</pub-id>
        </element-citation>
      </ref>
      <ref id="R2">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Angione</surname>
              <given-names>Claudio</given-names>
            </name>
            <name>
              <surname>Silverman</surname>
              <given-names>Eric</given-names>
            </name>
            <name>
              <surname>Yaneske</surname>
              <given-names>Elisabeth</given-names>
            </name>
          </person-group>
          <year>2022</year>
          <month>2</month>
          <day>10</day>
          <article-title>Using machine learning as a surrogate model for agent-based simulations</article-title>
          <source>PLOS ONE</source>
          <volume>17</volume>
          <issue>2</issue>
          <issn>1932-6203</issn>
          <fpage>e0263150</fpage>
          <lpage>e0263150</lpage>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0263150</pub-id>
        </element-citation>
      </ref>
      <ref id="R3">
        <mixed-citation>Bagheri N, Carpenter AE, Lundberg E, Plant AL, Horwitz R. 2022. The new era of quantitative cell imaging—challenges and opportunities. Molecular Cell. 82: 241.</mixed-citation>
      </ref>
      <ref id="R4">
        <element-citation publication-type="posted-content">
          <person-group person-group-type="author">
            <name>
              <surname>Bao</surname>
              <given-names>Huihan</given-names>
            </name>
            <name>
              <surname>Zhang</surname>
              <given-names>Shihe</given-names>
            </name>
            <name>
              <surname>Yu</surname>
              <given-names>Zhiyang</given-names>
            </name>
            <name>
              <surname>Xu</surname>
              <given-names>Heng</given-names>
            </name>
          </person-group>
          <year>2025</year>
          <month>3</month>
          <day>17</day>
          <article-title>Deep Learning-Based High-Resolution Time Inference for Deciphering Dynamic Gene Regulation from Fixed Embryos</article-title>
          <pub-id pub-id-type="doi">10.1101/2025.03.17.643618</pub-id>
        </element-citation>
      </ref>
      <ref id="R5">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Cain</surname>
              <given-names>Jason Y</given-names>
            </name>
            <name>
              <surname>Evarts</surname>
              <given-names>Jacob I</given-names>
            </name>
            <name>
              <surname>Yu</surname>
              <given-names>Jessica S</given-names>
            </name>
            <name>
              <surname>Bagheri</surname>
              <given-names>Neda</given-names>
            </name>
          </person-group>
          <year>2024</year>
          <month>3</month>
          <day>1</day>
          <article-title>Incorporating temporal information during feature engineering bolsters emulation of spatio-temporal emergence</article-title>
          <source>Bioinformatics</source>
          <volume>40</volume>
          <issue>3</issue>
          <issn>1367-4811</issn>
          <pub-id pub-id-type="doi">10.1093/bioinformatics/btae131</pub-id>
        </element-citation>
      </ref>
      <ref id="R6">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Fadikar</surname>
              <given-names>Arindam</given-names>
            </name>
            <name>
              <surname>Higdon</surname>
              <given-names>Dave</given-names>
            </name>
            <name>
              <surname>Chen</surname>
              <given-names>Jiangzhuo</given-names>
            </name>
            <name>
              <surname>Lewis</surname>
              <given-names>Bryan</given-names>
            </name>
            <name>
              <surname>Venkatramanan</surname>
              <given-names>Srinivasan</given-names>
            </name>
            <name>
              <surname>Marathe</surname>
              <given-names>Madhav</given-names>
            </name>
          </person-group>
          <year>2018</year>
          <month>1</month>
          <day>1</day>
          <article-title>Calibrating a Stochastic, Agent-Based Model Using Quantile-Based Emulation</article-title>
          <source>SIAM/ASA Journal on Uncertainty Quantification</source>
          <volume>6</volume>
          <issue>4</issue>
          <issn>2166-2525</issn>
          <fpage>1685</fpage>
          <lpage>1706</lpage>
          <pub-id pub-id-type="doi">10.1137/17m1161233</pub-id>
        </element-citation>
      </ref>
      <ref id="R7">
        <element-citation publication-type="Preprint">
          <person-group person-group-type="author">
            <name>
              <surname>Kingma</surname>
              <given-names>Diederik P.</given-names>
            </name>
            <name>
              <surname>Ba</surname>
              <given-names>Jimmy</given-names>
            </name>
          </person-group>
          <year>2014</year>
          <article-title>Adam: A Method for Stochastic Optimization</article-title>
          <pub-id pub-id-type="doi">10.48550/arxiv.1412.6980</pub-id>
        </element-citation>
      </ref>
      <ref id="R8">
        <element-citation publication-type="Preprint">
          <person-group person-group-type="author">
            <name>
              <surname>Kossaifi</surname>
              <given-names>Jean</given-names>
            </name>
            <name>
              <surname>Kovachki</surname>
              <given-names>Nikola</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>Zongyi</given-names>
            </name>
            <name>
              <surname>Pitt</surname>
              <given-names>David</given-names>
            </name>
            <name>
              <surname>Liu-Schiaffini</surname>
              <given-names>Miguel</given-names>
            </name>
            <name>
              <surname>George</surname>
              <given-names>Robert Joseph</given-names>
            </name>
            <name>
              <surname>Bonev</surname>
              <given-names>Boris</given-names>
            </name>
            <name>
              <surname>Azizzadenesheli</surname>
              <given-names>Kamyar</given-names>
            </name>
            <name>
              <surname>Berner</surname>
              <given-names>Julius</given-names>
            </name>
            <name>
              <surname>Duruisseaux</surname>
              <given-names>Valentin</given-names>
            </name>
            <name>
              <surname>Anandkumar</surname>
              <given-names>Anima</given-names>
            </name>
          </person-group>
          <year>2024</year>
          <article-title>A Library for Learning Neural Operators</article-title>
          <pub-id pub-id-type="doi">10.48550/arxiv.2412.10354</pub-id>
        </element-citation>
      </ref>
      <ref id="R9">
        <element-citation publication-type="Text">
          <person-group person-group-type="author">
            <name>
              <surname>Kovachki</surname>
              <given-names>Nikola</given-names>
            </name>
            <name>
              <surname>Li</surname>
              <given-names>Zongyi</given-names>
            </name>
            <name>
              <surname>Liu</surname>
              <given-names>Burigede</given-names>
            </name>
            <name>
              <surname>Azizzadenesheli</surname>
              <given-names>Kamyar</given-names>
            </name>
            <name>
              <surname>Bhattacharya</surname>
              <given-names>Kaushik</given-names>
            </name>
            <name>
              <surname>Stuart</surname>
              <given-names>Andrew</given-names>
            </name>
            <name>
              <surname>Anandkumar</surname>
              <given-names>Anima</given-names>
            </name>
          </person-group>
          <year>2021</year>
          <article-title>Neural Operator: Learning Maps Between Function Spaces</article-title>
          <pub-id pub-id-type="doi">10.48550/arxiv.2108.08481</pub-id>
        </element-citation>
      </ref>
      <ref id="R10">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Kuhn Cuellar</surname>
              <given-names>Luis</given-names>
            </name>
            <name>
              <surname>Friedrich</surname>
              <given-names>Andreas</given-names>
            </name>
            <name>
              <surname>Gabernet</surname>
              <given-names>Gisela</given-names>
            </name>
            <name>
              <surname>de la Garza</surname>
              <given-names>Luis</given-names>
            </name>
            <name>
              <surname>Fillinger</surname>
              <given-names>Sven</given-names>
            </name>
            <name>
              <surname>Seyboldt</surname>
              <given-names>Adrian</given-names>
            </name>
            <name>
              <surname>Koch</surname>
              <given-names>Tobias</given-names>
            </name>
            <name>
              <surname>zur Oven-Krockhaus</surname>
              <given-names>Sven</given-names>
            </name>
            <name>
              <surname>Wanke</surname>
              <given-names>Friederike</given-names>
            </name>
            <name>
              <surname>Richter</surname>
              <given-names>Sandra</given-names>
            </name>
            <name>
              <surname>Thaiss</surname>
              <given-names>Wolfgang M.</given-names>
            </name>
            <name>
              <surname>Horger</surname>
              <given-names>Marius</given-names>
            </name>
            <name>
              <surname>Malek</surname>
              <given-names>Nisar</given-names>
            </name>
            <name>
              <surname>Harter</surname>
              <given-names>Klaus</given-names>
            </name>
            <name>
              <surname>Bitzer</surname>
              <given-names>Michael</given-names>
            </name>
            <name>
              <surname>Nahnsen</surname>
              <given-names>Sven</given-names>
            </name>
          </person-group>
          <year>2022</year>
          <month>2</month>
          <day>7</day>
          <article-title>A data management infrastructure for the integration of imaging and omics data in life sciences</article-title>
          <source>BMC Bioinformatics</source>
          <volume>23</volume>
          <issue>1</issue>
          <issn>1471-2105</issn>
          <pub-id pub-id-type="doi">10.1186/s12859-022-04584-3</pub-id>
        </element-citation>
      </ref>
      <ref id="R11">
        <element-citation publication-type="proceedings-article">
          <person-group person-group-type="author">
            <name>
              <surname>Kurth</surname>
              <given-names>Thorsten</given-names>
            </name>
            <name>
              <surname>Subramanian</surname>
              <given-names>Shashank</given-names>
            </name>
            <name>
              <surname>Harrington</surname>
              <given-names>Peter</given-names>
            </name>
            <name>
              <surname>Pathak</surname>
              <given-names>Jaideep</given-names>
            </name>
            <name>
              <surname>Mardani</surname>
              <given-names>Morteza</given-names>
            </name>
            <name>
              <surname>Hall</surname>
              <given-names>David</given-names>
            </name>
            <name>
              <surname>Miele</surname>
              <given-names>Andrea</given-names>
            </name>
            <name>
              <surname>Kashinath</surname>
              <given-names>Karthik</given-names>
            </name>
            <name>
              <surname>Anandkumar</surname>
              <given-names>Anima</given-names>
            </name>
          </person-group>
          <year>2023</year>
          <month>6</month>
          <day>26</day>
          <article-title>FourCastNet: Accelerating Global High-Resolution Weather Forecasting Using Adaptive Fourier Neural Operators</article-title>
          <source>Proceedings of the Platform for Advanced Scientific Computing Conference</source>
          <fpage>1</fpage>
          <lpage>11</lpage>
          <pub-id pub-id-type="doi">10.1145/3592979.3593412</pub-id>
        </element-citation>
      </ref>
      <ref id="R12">
        <element-citation publication-type="Preprint">
          <person-group person-group-type="author">
            <name>
              <surname>Lai</surname>
              <given-names>Guokun</given-names>
            </name>
            <name>
              <surname>Chang</surname>
              <given-names>Wei-Cheng</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>Yiming</given-names>
            </name>
            <name>
              <surname>Liu</surname>
              <given-names>Hanxiao</given-names>
            </name>
          </person-group>
          <year>2017</year>
          <article-title>Modeling Long- and Short-Term Temporal Patterns with Deep Neural Networks</article-title>
          <pub-id pub-id-type="doi">10.48550/arxiv.1703.07015</pub-id>
        </element-citation>
      </ref>
      <ref id="R13">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Lan</surname>
              <given-names>Yunduo</given-names>
            </name>
            <name>
              <surname>Shin</surname>
              <given-names>Sung-Young</given-names>
            </name>
            <name>
              <surname>Nguyen</surname>
              <given-names>Lan K.</given-names>
            </name>
          </person-group>
          <year>2025</year>
          <month>3</month>
          <day>1</day>
          <article-title>From shallow to deep: The evolution of machine learning and mechanistic model integration in cancer research</article-title>
          <source>Current Opinion in Systems Biology</source>
          <volume>40</volume>
          <issn>2452-3100</issn>
          <fpage>100541</fpage>
          <lpage>100541</lpage>
          <pub-id pub-id-type="doi">10.1016/j.coisb.2025.100541</pub-id>
        </element-citation>
      </ref>
      <ref id="R14">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Li</surname>
              <given-names>Bian</given-names>
            </name>
            <name>
              <surname>Wang</surname>
              <given-names>Hanchen</given-names>
            </name>
            <name>
              <surname>Feng</surname>
              <given-names>Shihang</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>Xiu</given-names>
            </name>
            <name>
              <surname>Lin</surname>
              <given-names>Youzuo</given-names>
            </name>
          </person-group>
          <year>2023</year>
          <article-title>Solving Seismic Wave Equations on Variable Velocity Models With Fourier Neural Operator</article-title>
          <source>IEEE Transactions on Geoscience and Remote Sensing</source>
          <volume>61</volume>
          <issn>0196-2892</issn>
          <fpage>1</fpage>
          <lpage>18</lpage>
          <pub-id pub-id-type="doi">10.1109/tgrs.2023.3333663</pub-id>
        </element-citation>
      </ref>
      <ref id="R15">
        <element-citation publication-type="proceedings-article">
          <person-group person-group-type="author">
            <name>
              <surname>Long</surname>
              <given-names>Ruiqi</given-names>
            </name>
            <name>
              <surname>Xie</surname>
              <given-names>Hongyi</given-names>
            </name>
            <name>
              <surname>Lian</surname>
              <given-names>Defu</given-names>
            </name>
          </person-group>
          <year>2024</year>
          <month>7</month>
          <day>12</day>
          <article-title>Time Series Forecasting with Multi-scale Decomposition and Fourier Neural Operators</article-title>
          <source>2024 7th International Conference on Computer Information Science and Application Technology (CISAT)</source>
          <fpage>952</fpage>
          <lpage>957</lpage>
          <pub-id pub-id-type="doi">10.1109/cisat62382.2024.10695364</pub-id>
        </element-citation>
      </ref>
      <ref id="R16">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Mao</surname>
              <given-names>Bin-Hsu</given-names>
            </name>
            <name>
              <surname>Nguyen Thi</surname>
              <given-names>Kim Mai</given-names>
            </name>
            <name>
              <surname>Tang</surname>
              <given-names>Ming-Jer</given-names>
            </name>
            <name>
              <surname>Kamm</surname>
              <given-names>Roger D</given-names>
            </name>
            <name>
              <surname>Tu</surname>
              <given-names>Ting-Yuan</given-names>
            </name>
          </person-group>
          <year>2023</year>
          <month>1</month>
          <day>1</day>
          <article-title>The interface stiffness and topographic feature dictate interfacial invasiveness of cancer spheroids</article-title>
          <source>Biofabrication</source>
          <volume>15</volume>
          <issue>1</issue>
          <issn>1758-5082</issn>
          <fpage>015023</fpage>
          <lpage>015023</lpage>
          <pub-id pub-id-type="doi">10.1088/1758-5090/acaa00</pub-id>
        </element-citation>
      </ref>
      <ref id="R17">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Murray</surname>
              <given-names>Eleanor J</given-names>
            </name>
            <name>
              <surname>Marshall</surname>
              <given-names>Brandon D L</given-names>
            </name>
            <name>
              <surname>Buchanan</surname>
              <given-names>Ashley L</given-names>
            </name>
          </person-group>
          <year>2021</year>
          <month>2</month>
          <day>17</day>
          <article-title>Emulating Target Trials to Improve Causal Inference From Agent-Based Models</article-title>
          <source>American Journal of Epidemiology</source>
          <volume>190</volume>
          <issue>8</issue>
          <issn>0002-9262</issn>
          <fpage>1652</fpage>
          <lpage>1658</lpage>
          <pub-id pub-id-type="doi">10.1093/aje/kwab040</pub-id>
        </element-citation>
      </ref>
      <ref id="R18">
        <element-citation publication-type="Preprint">
          <person-group person-group-type="author">
            <name>
              <surname>Paszke</surname>
              <given-names>Adam</given-names>
            </name>
            <name>
              <surname>Gross</surname>
              <given-names>Sam</given-names>
            </name>
            <name>
              <surname>Massa</surname>
              <given-names>Francisco</given-names>
            </name>
            <name>
              <surname>Lerer</surname>
              <given-names>Adam</given-names>
            </name>
            <name>
              <surname>Bradbury</surname>
              <given-names>James</given-names>
            </name>
            <name>
              <surname>Chanan</surname>
              <given-names>Gregory</given-names>
            </name>
            <name>
              <surname>Killeen</surname>
              <given-names>Trevor</given-names>
            </name>
            <name>
              <surname>Lin</surname>
              <given-names>Zeming</given-names>
            </name>
            <name>
              <surname>Gimelshein</surname>
              <given-names>Natalia</given-names>
            </name>
            <name>
              <surname>Antiga</surname>
              <given-names>Luca</given-names>
            </name>
            <name>
              <surname>Desmaison</surname>
              <given-names>Alban</given-names>
            </name>
            <name>
              <surname>Köpf</surname>
              <given-names>Andreas</given-names>
            </name>
            <name>
              <surname>Yang</surname>
              <given-names>Edward</given-names>
            </name>
            <name>
              <surname>DeVito</surname>
              <given-names>Zach</given-names>
            </name>
            <name>
              <surname>Raison</surname>
              <given-names>Martin</given-names>
            </name>
            <name>
              <surname>Tejani</surname>
              <given-names>Alykhan</given-names>
            </name>
            <name>
              <surname>Chilamkurthy</surname>
              <given-names>Sasank</given-names>
            </name>
            <name>
              <surname>Steiner</surname>
              <given-names>Benoit</given-names>
            </name>
            <name>
              <surname>Fang</surname>
              <given-names>Lu</given-names>
            </name>
            <name>
              <surname>Bai</surname>
              <given-names>Junjie</given-names>
            </name>
            <name>
              <surname>Chintala</surname>
              <given-names>Soumith</given-names>
            </name>
          </person-group>
          <year>2019</year>
          <article-title>PyTorch: An Imperative Style, High-Performance Deep Learning Library</article-title>
          <pub-id pub-id-type="doi">10.48550/arxiv.1912.01703</pub-id>
        </element-citation>
      </ref>
      <ref id="R19">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Rotem</surname>
              <given-names>Oded</given-names>
            </name>
            <name>
              <surname>Schwartz</surname>
              <given-names>Tamar</given-names>
            </name>
            <name>
              <surname>Maor</surname>
              <given-names>Ron</given-names>
            </name>
            <name>
              <surname>Tauber</surname>
              <given-names>Yishay</given-names>
            </name>
            <name>
              <surname>Shapiro</surname>
              <given-names>Maya Tsarfati</given-names>
            </name>
            <name>
              <surname>Meseguer</surname>
              <given-names>Marcos</given-names>
            </name>
            <name>
              <surname>Gilboa</surname>
              <given-names>Daniella</given-names>
            </name>
            <name>
              <surname>Seidman</surname>
              <given-names>Daniel S.</given-names>
            </name>
            <name>
              <surname>Zaritsky</surname>
              <given-names>Assaf</given-names>
            </name>
          </person-group>
          <year>2024</year>
          <month>8</month>
          <day>27</day>
          <article-title>Visual interpretability of image-based classification models by generative latent space disentanglement applied to in vitro fertilization</article-title>
          <source>Nature Communications</source>
          <volume>15</volume>
          <issue>1</issue>
          <issn>2041-1723</issn>
          <pub-id pub-id-type="doi">10.1038/s41467-024-51136-9</pub-id>
        </element-citation>
      </ref>
      <ref id="R20">
        <mixed-citation>Schmidt U, Weigert M, Broaddus C, Myers G. 2018. Cell Detection with Star-convex Polygons.</mixed-citation>
      </ref>
      <ref id="R21">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Soelistyo</surname>
              <given-names>Christopher J.</given-names>
            </name>
            <name>
              <surname>Vallardi</surname>
              <given-names>Giulia</given-names>
            </name>
            <name>
              <surname>Charras</surname>
              <given-names>Guillaume</given-names>
            </name>
            <name>
              <surname>Lowe</surname>
              <given-names>Alan R.</given-names>
            </name>
          </person-group>
          <year>2022</year>
          <month>6</month>
          <day>30</day>
          <article-title>Learning biophysical determinants of cell fate with deep neural networks</article-title>
          <source>Nature Machine Intelligence</source>
          <volume>4</volume>
          <issue>7</issue>
          <issn>2522-5839</issn>
          <fpage>636</fpage>
          <lpage>644</lpage>
          <pub-id pub-id-type="doi">10.1038/s42256-022-00503-6</pub-id>
        </element-citation>
      </ref>
      <ref id="R22">
        <element-citation publication-type="Dataset">
          <person-group person-group-type="author">
            <name>
              <surname>Thi Kim Ngan Ngo</surname>
            </name>
          </person-group>
          <year>2022</year>
          <article-title>The interface stiffness and topographic feature dictate interfacial invasiveness of cancer spheroids</article-title>
          <pub-id pub-id-type="doi">10.17632/4v625rp3cj.1</pub-id>
        </element-citation>
      </ref>
      <ref id="R23">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Toulany</surname>
              <given-names>Nikan</given-names>
            </name>
            <name>
              <surname>Morales-Navarrete</surname>
              <given-names>Hernán</given-names>
            </name>
            <name>
              <surname>Čapek</surname>
              <given-names>Daniel</given-names>
            </name>
            <name>
              <surname>Grathwohl</surname>
              <given-names>Jannis</given-names>
            </name>
            <name>
              <surname>Ünalan</surname>
              <given-names>Murat</given-names>
            </name>
            <name>
              <surname>Müller</surname>
              <given-names>Patrick</given-names>
            </name>
          </person-group>
          <year>2023</year>
          <month>11</month>
          <day>23</day>
          <article-title>Uncovering developmental time and tempo using deep learning</article-title>
          <source>Nature Methods</source>
          <volume>20</volume>
          <issue>12</issue>
          <issn>1548-7091</issn>
          <fpage>2000</fpage>
          <lpage>2010</lpage>
          <pub-id pub-id-type="doi">10.1038/s41592-023-02083-8</pub-id>
        </element-citation>
      </ref>
      <ref id="R24">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Vicar</surname>
              <given-names>Tomas</given-names>
            </name>
            <name>
              <surname>Raudenska</surname>
              <given-names>Martina</given-names>
            </name>
            <name>
              <surname>Gumulec</surname>
              <given-names>Jaromir</given-names>
            </name>
            <name>
              <surname>Balvan</surname>
              <given-names>Jan</given-names>
            </name>
          </person-group>
          <year>2020</year>
          <month>1</month>
          <day>31</day>
          <article-title>The Quantitative-Phase Dynamics of Apoptosis and Lytic Cell Death</article-title>
          <source>Scientific Reports</source>
          <volume>10</volume>
          <issue>1</issue>
          <issn>2045-2322</issn>
          <pub-id pub-id-type="doi">10.1038/s41598-020-58474-w</pub-id>
        </element-citation>
      </ref>
      <ref id="R25">
        <element-citation publication-type="journal">
          <person-group person-group-type="author">
            <name>
              <surname>Yu</surname>
              <given-names>Jessica S.</given-names>
            </name>
            <name>
              <surname>Bagheri</surname>
              <given-names>Neda</given-names>
            </name>
          </person-group>
          <year>2020</year>
          <month>6</month>
          <day>11</day>
          <article-title>Agent-Based Models Predict Emergent Behavior of Heterogeneous Cell Populations in Dynamic Microenvironments</article-title>
          <source>Frontiers in Bioengineering and Biotechnology</source>
          <volume>8</volume>
          <issn>2296-4185</issn>
          <pub-id pub-id-type="doi">10.3389/fbioe.2020.00249</pub-id>
        </element-citation>
      </ref>
    </ref-list>
  </back>
</article>