generated from github/welcome-to-github
-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
82 lines (82 loc) · 36.9 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
<!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <title>Doanh C. Bui</title> <meta name="author" content="Doanh C. Bui"> <meta name="description" content="A simple, whitespace theme for academics. Based on [*folio](https://github.com/bogoli/-folio) design. "> <meta name="keywords" content="doanh c bui, bui cao doanh, doanhbc"> <link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha256-DF7Zhf293AJxJNTmh5zhoYYIMs2oXitRfBjY+9L//AY=" crossorigin="anonymous"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/css/mdb.min.css" integrity="sha256-jpjYvU3G3N6nrrBwXJoVEYI/0zw8htfFnhT9ljN3JJw=" crossorigin="anonymous"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fortawesome/[email protected]/css/all.min.css" integrity="sha256-mUZM63G8m73Mcidfrv5E+Y61y7a12O5mW4ezU3bxqW4=" crossorigin="anonymous"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/css/academicons.min.css" integrity="sha256-i1+4qU2G2860dGGIOJscdC30s9beBXjFfzjWLjBRsBg=" crossorigin="anonymous"> <link rel="stylesheet" type="text/css" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700|Roboto+Slab:100,300,400,500,700|Material+Icons"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jwarby/jekyll-pygments-themes@master/github.css" media="" id="highlight_theme_light"> <link rel="shortcut icon" href="data:image/svg+xml,<svg%20xmlns=%22http://www.w3.org/2000/svg%22%20viewBox=%220%200%20100%20100%22><text%20y=%22.9em%22%20font-size=%2290%22>%E2%9A%9B%EF%B8%8F</text></svg>"> <link rel="stylesheet" href="/assets/css/main.css"> <link rel="canonical" href="https://caodoanh2001.github.io/"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jwarby/jekyll-pygments-themes@master/native.css" media="none" id="highlight_theme_dark"> <script src="/assets/js/theme.js"></script> <script src="/assets/js/dark_mode.js"></script> </head> <body class="fixed-top-nav "> <header> <nav id="navbar" class="navbar navbar-light navbar-expand-sm fixed-top"> <div class="container"> <button class="navbar-toggler collapsed ml-auto" type="button" data-toggle="collapse" data-target="#navbarNav" aria-controls="navbarNav" aria-expanded="false" aria-label="Toggle navigation"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar top-bar"></span> <span class="icon-bar middle-bar"></span> <span class="icon-bar bottom-bar"></span> </button> <div class="collapse navbar-collapse text-right" id="navbarNav"> <ul class="navbar-nav ml-auto flex-nowrap"> <li class="nav-item active"> <a class="nav-link" href="/">about<span class="sr-only">(current)</span></a> </li> <li class="nav-item "> <a class="nav-link" href="/blog/">blog</a> </li> <li class="nav-item "> <a class="nav-link" href="/publications/">publications</a> </li> <li class="nav-item "> <a class="nav-link" href="/cv/">cv</a> </li> <li class="toggle-container"> <button id="light-toggle" title="Change theme"> <i class="fas fa-moon"></i> <i class="fas fa-sun"></i> </button> </li> </ul> </div> </div> </nav> <progress id="progress" value="0"> <div class="progress-container"> <span class="progress-bar"></span> </div> </progress> </header> <div class="container mt-5"> <div class="post"> <header class="post-header"> <h1 class="post-title"> <span class="font-weight-bold">Doanh</span> C. Bui </h1> <p class="desc"><a href="http://www.kwaklab.net/" rel="external nofollow noopener" target="_blank">School of Electrical and Engineering, Korea University</a>. Seoul, Republic of Korea.</p> </header> <article> <div class="profile float-right"> <figure> <picture> <source class="responsive-img-srcset" media="(max-width: 480px)" srcset="/assets/img/doanh-480.webp"></source> <source class="responsive-img-srcset" media="(max-width: 800px)" srcset="/assets/img/doanh-800.webp"></source> <source class="responsive-img-srcset" media="(max-width: 1400px)" srcset="/assets/img/doanh-1400.webp"></source> <img src="/assets/img/doanh.png" class="img-fluid z-depth-1 rounded" width="auto" height="auto" alt="doanh.png" onerror="this.onerror=null; $('.responsive-img-srcset').remove();"> </picture> </figure> <div class="address"> <p>Seoul, Republic of Korea</p> </div> </div> <div class="clearfix"> <p>My name is Doanh C. Bui, and I am currently pursuing a master’s degree in Electrical Engineering at Korea University, where I am in my final semester under the supervision of Prof. Jin Tae Kwak at the <a href="http://www.kwaklab.net/" rel="external nofollow noopener" target="_blank">QuIIL Laboratory</a>. Prior to this, I obtained a Bachelor’s degree in Computer Science from the University of Information Technology (UIT), VNU-HCM in 2022, where I was supervised by Dr. Khang Nguyen at the <a href="https://uit-together.github.io/" rel="external nofollow noopener" target="_blank">UIT-Together Laboratory</a>. From November 2022 to February 2023, I also served as a teaching assistant at the Faculty of Software Engineering at UIT, where I was responsible for lecturing practice credits for the Introduction to Programming course during the Fall semester of 2022.</p> <p>Throughout my academic journey, I have focused on various aspects of Computer Vision, specifically in areas such as Object Detection, Document Image Understanding, Image Captioning, and Human-Object Interaction. Presently, my research revolves around leveraging image processing techniques for histopathology images.</p> </div> <h2><a href="/news/" style="color: inherit;">news</a></h2> <div class="news"> <div class="table-responsive" style="max-height: 30vw"> <table class="table table-sm table-borderless"> <tr> <th scope="row">Dec 10, 2024</th> <td> <a class="news-title" href="/news/announcement_aaai/">One paper has been accepted for presentation at AAAI2025 (A*-ranked)</a> </td> </tr> <tr> <th scope="row">Nov 29, 2024</th> <td> <a class="news-title" href="/news/announcement_master_thesis/">Defense master thesis about Computational Methods for Multi-Class Cancer Classification</a> </td> </tr> <tr> <th scope="row">Oct 7, 2024</th> <td> <a class="news-title" href="/news/announcement_leopard/">Placed 6th at MICCAI2024-LEOPARD challenge</a> </td> </tr> <tr> <th scope="row">Sep 20, 2024</th> <td> <a class="news-title" href="/news/announcement_accv2024/">One paper has been accepted for presentation at ACCV2024 (B-ranked)</a> </td> </tr> <tr> <th scope="row">Aug 6, 2024</th> <td> <a class="news-title" href="/news/announcement_mva/">One paper has been accepted by Machine Vision and Applications (SCIE Q2, IF = 2.4)</a> </td> </tr> <tr> <th scope="row">Jul 25, 2024</th> <td> <a class="news-title" href="/news/announcement_tmi/">One paper has been accepted by IEEE Transactions on Medical Imaging (SCIE Q1, IF = 8.9)</a> </td> </tr> <tr> <th scope="row">Jun 17, 2024</th> <td> <a class="news-title" href="/news/announcement_miccai2024/">One paper has been accepted for presentation at MICCAI2024 (A-ranked)</a> </td> </tr> <tr> <th scope="row">Mar 6, 2024</th> <td> <a class="news-title" href="/news/announcement_tcsvt/">One paper has been accepted by IEEE Transactions on Circuits and Systems for Video Technology (SCIE Q1, IF = 8.4)</a> </td> </tr> <tr> <th scope="row">Mar 2, 2024</th> <td> <a class="news-title" href="/news/announcement_cmpb/">One paper has been accepted by Computer Methods and Programs in Biomedicine (SCIE Q1, IF = 6.1)</a> </td> </tr> <tr> <th scope="row">Feb 5, 2024</th> <td> <a class="news-title" href="/news/announcement_thompson/">One paper has been accepted for presentation at MICCAI2023-Thompson (1st Place in Track 2 - Visual Question Answering)</a> </td> </tr> </table> </div> </div> <h2><a href="/publications/" style="color: inherit;">selected publications</a></h2> <div class="publications"> <ol class="bibliography"> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/higda.jpeg"></div> <div id="higda2024" class="col-sm-8"> <div class="title">HiGDA: Hierarchical Graph of Nodes to Learn Local-to-Global Topology for Semi-Supervised Domain Adaptation</div> <div class="author"> Ba Hung Ngo*, Doanh C. Bui*, Nhat-Tuong Do-Tran, and Tae Jong Choi</div> <div class="periodical"> <em>In 39th Annual AAAI Conference on Artificial Intelligence (AAAI) (Accepted)</em>, 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://arxiv.org/pdf/2412.11819" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-altmetric-id="" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-pmid="" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@inproceedings</span><span class="p">{</span><span class="nl">higda2024</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Ngo*, Ba Hung and Bui*, Doanh C. and Do-Tran, Nhat-Tuong and Choi, Tae Jong}</span><span class="p">,</span>
<span class="na">booktitle</span> <span class="p">=</span> <span class="s">{39th Annual AAAI Conference on Artificial Intelligence (AAAI) (Accepted)}</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{HiGDA: Hierarchical Graph of Nodes to Learn Local-to-Global Topology for Semi-Supervised Domain Adaptation}</span><span class="p">,</span>
<span class="na">volume</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/MECFormer.png"></div> <div id="bui2024mecformer" class="col-sm-8"> <div class="title">MECFormer: Multi-task Whole Slide Image Classification with Expert Consultation Network</div> <div class="author"> Doanh C. Bui, and Jin Tae Kwak</div> <div class="periodical"> <em>In 17th Asian Conference on Computer Vision (ACCV) (Accepted)</em>, 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://openaccess.thecvf.com/content/ACCV2024/html/Bui_MECFormer_Multi-task_Whole_Slide_Image_Classification_with_Expert_Consultation_Network_ACCV_2024_paper.html" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-altmetric-id="" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-pmid="" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@inproceedings</span><span class="p">{</span><span class="nl">bui2024mecformer</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Bui, Doanh C. and Kwak, Jin Tae}</span><span class="p">,</span>
<span class="na">booktitle</span> <span class="p">=</span> <span class="s">{17th Asian Conference on Computer Vision (ACCV) (Accepted)}</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{MECFormer: Multi-task Whole Slide Image Classification with Expert Consultation Network}</span><span class="p">,</span>
<span class="na">volume</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/TMD.png"></div> <div id="BuiTMDNet2024" class="col-sm-8"> <div class="title">Transformer with Multi-level Grid Features and Depth Pooling for Image Captioning</div> <div class="author"> Doanh C. Bui, Tam V. Nguyen, and Khang Nguyen</div> <div class="periodical"> <em>Machine Vision and Applications</em>, Aug 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://link.springer.com/article/10.1007/s00138-024-01599-z" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-altmetric-id="" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-pmid="" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@article</span><span class="p">{</span><span class="nl">BuiTMDNet2024</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Bui, Doanh C. and Nguyen, Tam V. and Nguyen, Khang}</span><span class="p">,</span>
<span class="na">journal</span> <span class="p">=</span> <span class="s">{Machine Vision and Applications}</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{Transformer with Multi-level Grid Features and Depth Pooling for Image Captioning}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="na">month</span> <span class="p">=</span> <span class="nv">aug</span><span class="p">,</span>
<span class="na">day</span> <span class="p">=</span> <span class="s">{20}</span><span class="p">,</span>
<span class="na">volume</span> <span class="p">=</span> <span class="s">{35}</span><span class="p">,</span>
<span class="na">number</span> <span class="p">=</span> <span class="s">{5}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{118}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/scubanet.png"></div> <div id="scubanet2024" class="col-sm-8"> <div class="title">Spatially-constrained and -unconstrained bi-graph interaction network for multi-organ pathology image classification</div> <div class="author"> Doanh C. Bui, Boram Song, Kyungeun Kim, and Jin Tae Kwak</div> <div class="periodical"> <em>IEEE Transactions on Medical Imaging</em>, Aug 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://ieeexplore.ieee.org/document/10616189" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-doi="10.1109/TMI.2024.3436080" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-doi="10.1109/TMI.2024.3436080" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@article</span><span class="p">{</span><span class="nl">scubanet2024</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Bui, Doanh C. and Song, Boram and Kim, Kyungeun and Kwak, Jin Tae}</span><span class="p">,</span>
<span class="na">journal</span> <span class="p">=</span> <span class="s">{IEEE Transactions on Medical Imaging}</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{Spatially-constrained and -unconstrained bi-graph interaction network for multi-organ pathology image classification}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="na">volume</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">number</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{1-1}</span><span class="p">,</span>
<span class="na">keywords</span> <span class="p">=</span> <span class="s">{Pathology;Transformers;Cancer;Image edge detection;Vectors;Computational modeling;Image classification;Computational pathology;cancer classification;graph neural network;Transformer;interaction}</span><span class="p">,</span>
<span class="na">doi</span> <span class="p">=</span> <span class="s">{10.1109/TMI.2024.3436080}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/thompson.jpg"></div> <div id="10.1007/978-3-031-71626-3_10" class="col-sm-8"> <div class="title">QuIIL at T3 Challenge: Towards Automation in Life-Saving Intervention Procedures from First-Person View</div> <div class="author"> Trinh T. L. Vuong, Doanh C. Bui, and Jin Tae Kwak</div> <div class="periodical"> <em>In AI for Brain Lesion Detection and Trauma Video Action Recognition</em>, Aug 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="abstract btn btn-sm z-depth-0" role="button">Abs</a> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://link.springer.com/chapter/10.1007/978-3-031-71626-3_10" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-altmetric-id="" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-pmid="" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="abstract hidden"> <p>In this paper, we present our solutions for a spectrum of automation tasks in life-saving intervention procedures within the Trauma THOMPSON (T3) Challenge, encompassing action recognition, action anticipation, and Visual Question Answering (VQA). For action recognition and anticipation, we propose a pre-processing strategy that samples and stitches multiple inputs into a single image and then incorporates momentum- and attention-based knowledge distillation to improve the performance of the two tasks. For training, we present an action dictionary-guided design, which consistently yields the most favorable results across our experiments. In the realm of VQA, we leverage object-level features and deploy co-attention networks to train both object and question features. Notably, we introduce a novel frame-question cross-attention mechanism at the network’s core for enhanced performance. Our solutions achieve the }}2^{nd}}}rank in action recognition and anticipation tasks and }}1^{st}}}rank in the VQA task. The source code is available at https://github.com/QuIIL/QuIIL_thompson_solution.</p> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@inproceedings</span><span class="p">{</span><span class="nl">10.1007/978-3-031-71626-3_10</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{T. L. Vuong, Trinh and C. Bui, Doanh and Kwak, Jin Tae}</span><span class="p">,</span>
<span class="na">editor</span> <span class="p">=</span> <span class="s">{Bao, Rina and Grant, Ellen and Kirkpatrick, Andrew and Wachs, Juan and Ou, Yangming}</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{QuIIL at T3 Challenge: Towards Automation in Life-Saving Intervention Procedures from First-Person View}</span><span class="p">,</span>
<span class="na">booktitle</span> <span class="p">=</span> <span class="s">{AI for Brain Lesion Detection and Trauma Video Action Recognition}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="na">publisher</span> <span class="p">=</span> <span class="s">{Springer Nature Switzerland}</span><span class="p">,</span>
<span class="na">address</span> <span class="p">=</span> <span class="s">{Cham}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{82--93}</span><span class="p">,</span>
<span class="na">isbn</span> <span class="p">=</span> <span class="s">{978-3-031-71626-3}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/daxnet.png"></div> <div id="BUI2024108112" class="col-sm-8"> <div class="title">DAX-Net: A dual-branch dual-task adaptive cross-weight feature fusion network for robust multi-class cancer classification in pathology images</div> <div class="author"> Doanh C. Bui, Boram Song, Kyungeun Kim, and Jin Tae Kwak</div> <div class="periodical"> <em>Computer Methods and Programs in Biomedicine</em>, Aug 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://www.sciencedirect.com/science/article/pii/S0169260724001081" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-doi="https://doi.org/10.1016/j.cmpb.2024.108112" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-doi="https://doi.org/10.1016/j.cmpb.2024.108112" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@article</span><span class="p">{</span><span class="nl">BUI2024108112</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{DAX-Net: A dual-branch dual-task adaptive cross-weight feature fusion network for robust multi-class cancer classification in pathology images}</span><span class="p">,</span>
<span class="na">journal</span> <span class="p">=</span> <span class="s">{Computer Methods and Programs in Biomedicine}</span><span class="p">,</span>
<span class="na">volume</span> <span class="p">=</span> <span class="s">{248}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{108112}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="na">issn</span> <span class="p">=</span> <span class="s">{0169-2607}</span><span class="p">,</span>
<span class="na">doi</span> <span class="p">=</span> <span class="s">{https://doi.org/10.1016/j.cmpb.2024.108112}</span><span class="p">,</span>
<span class="na">url</span> <span class="p">=</span> <span class="s">{https://www.sciencedirect.com/science/article/pii/S0169260724001081}</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Bui, Doanh C. and Song, Boram and Kim, Kyungeun and Kwak, Jin Tae}</span><span class="p">,</span>
<span class="na">keywords</span> <span class="p">=</span> <span class="s">{Cancer classification, Hybrid model, CNN, Transformer, Feature fusion, Multi-task learning}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/astt.png"></div> <div id="10466765" class="col-sm-8"> <div class="title">Transformer-based Spatio-Temporal Unsupervised Traffic Anomaly Detection in Aerial Videos</div> <div class="author"> Tung Minh Tran, Doanh C. Bui, Tam V. Nguyen, and Khang Nguyen</div> <div class="periodical"> <em>IEEE Transactions on Circuits and Systems for Video Technology</em>, Aug 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://ieeexplore.ieee.org/abstract/document/10466765" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-doi="10.1109/TCSVT.2024.3376399" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-doi="10.1109/TCSVT.2024.3376399" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@article</span><span class="p">{</span><span class="nl">10466765</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Tran, Tung Minh and Bui, Doanh C. and Nguyen, Tam V. and Nguyen, Khang}</span><span class="p">,</span>
<span class="na">journal</span> <span class="p">=</span> <span class="s">{IEEE Transactions on Circuits and Systems for Video Technology}</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{Transformer-based Spatio-Temporal Unsupervised Traffic Anomaly Detection in Aerial Videos}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="na">volume</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">number</span> <span class="p">=</span> <span class="s">{}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{1-1}</span><span class="p">,</span>
<span class="na">keywords</span> <span class="p">=</span> <span class="s">{Surveillance;Anomaly detection;Transformers;Traffic control;Computational modeling;Training;Task analysis;Anomaly Detection;Convolutional Neural Networks;Vision Transformers;Traffic Surveillance;Aerial Images}</span><span class="p">,</span>
<span class="na">doi</span> <span class="p">=</span> <span class="s">{10.1109/TCSVT.2024.3376399}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/c2tnet.png"></div> <div id="Bui_2024_WACV" class="col-sm-8"> <div class="title">C2T-Net: Channel-Aware Cross-Fused Transformer-Style Networks for Pedestrian Attribute Recognition</div> <div class="author"> Doanh C. Bui, Thinh V. Le, and Ba Hung Ngo</div> <div class="periodical"> <em>In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) Workshops</em>, Jan 2024 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://openaccess.thecvf.com/content/WACV2024W/RWS/html/Bui_C2T-Net_Channel-Aware_Cross-Fused_Transformer-Style_Networks_for_Pedestrian_Attribute_Recognition_WACVW_2024_paper.html" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-altmetric-id="" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-pmid="" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@inproceedings</span><span class="p">{</span><span class="nl">Bui_2024_WACV</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Bui, Doanh C. and Le, Thinh V. and Ngo, Ba Hung}</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{C2T-Net: Channel-Aware Cross-Fused Transformer-Style Networks for Pedestrian Attribute Recognition}</span><span class="p">,</span>
<span class="na">booktitle</span> <span class="p">=</span> <span class="s">{Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision (WACV) Workshops}</span><span class="p">,</span>
<span class="na">month</span> <span class="p">=</span> <span class="nv">jan</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2024}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{351-358}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> <li> <div class="row"> <div class="col-sm-2 preview"><img class="preview z-depth-1 rounded" src="/assets/img/publication_preview/PRL.png"></div> <div id="nguyen2023improving" class="col-sm-8"> <div class="title">Improving human–object interaction with auxiliary semantic information and enhanced instance representation</div> <div class="author"> Khang Nguyen, Thinh V Le, Huyen Ngoc N Van, and Doanh C Bui</div> <div class="periodical"> <em>Pattern Recognition Letters</em>, Jan 2023 </div> <div class="periodical"> </div> <div class="links"> <a class="bibtex btn btn-sm z-depth-0" role="button">Bib</a> <a href="https://www.sciencedirect.com/science/article/pii/S0167865523002611" class="btn btn-sm z-depth-0" role="button" rel="external nofollow noopener" target="_blank">PDF</a> </div> <div class="badges"> <span class="altmetric-embed" data-doi="https://doi.org/10.1016/j.patrec.2023.09.013" data-hide-no-mentions="true" data-hide-less-than="15" data-badge-type="2" data-badge-popover="right"></span> <span class="__dimensions_badge_embed__" data-doi="https://doi.org/10.1016/j.patrec.2023.09.013" data-hide-zero-citations="true" data-style="small_rectangle" data-legend="hover-right" style="margin-bottom: 6px;"></span> </div> <div class="bibtex hidden"> <figure class="highlight"><pre><code class="language-bibtex" data-lang="bibtex"><span class="nc">@article</span><span class="p">{</span><span class="nl">nguyen2023improving</span><span class="p">,</span>
<span class="na">title</span> <span class="p">=</span> <span class="s">{Improving human--object interaction with auxiliary semantic information and enhanced instance representation}</span><span class="p">,</span>
<span class="na">author</span> <span class="p">=</span> <span class="s">{Nguyen, Khang and Le, Thinh V and Van, Huyen Ngoc N and Bui, Doanh C}</span><span class="p">,</span>
<span class="na">journal</span> <span class="p">=</span> <span class="s">{Pattern Recognition Letters}</span><span class="p">,</span>
<span class="na">volume</span> <span class="p">=</span> <span class="s">{175}</span><span class="p">,</span>
<span class="na">pages</span> <span class="p">=</span> <span class="s">{38--43}</span><span class="p">,</span>
<span class="na">year</span> <span class="p">=</span> <span class="s">{2023}</span><span class="p">,</span>
<span class="na">publisher</span> <span class="p">=</span> <span class="s">{Elsevier}</span><span class="p">,</span>
<span class="na">doi</span> <span class="p">=</span> <span class="s">{https://doi.org/10.1016/j.patrec.2023.09.013}</span><span class="p">,</span>
<span class="p">}</span></code></pre></figure> </div> </div> </div> </li> </ol> </div> <div class="social"> <div class="contact-icons"> <a href="mailto:%64%6F%61%6E%68%62%63@%6B%6F%72%65%61.%61%63.%6B%72" title="email"><i class="fas fa-envelope"></i></a> <a href="https://orcid.org/0000-0003-1310-5808" title="ORCID" rel="external nofollow noopener" target="_blank"><i class="ai ai-orcid"></i></a> <a href="https://scholar.google.com/citations?user=WHviN4AAAAAJ" title="Google Scholar" rel="external nofollow noopener" target="_blank"><i class="ai ai-google-scholar"></i></a> <a href="https://github.com/caodoanh2001" title="GitHub" rel="external nofollow noopener" target="_blank"><i class="fab fa-github"></i></a> <a href="https://www.linkedin.com/in/buicaodoanh" title="LinkedIn" rel="external nofollow noopener" target="_blank"><i class="fab fa-linkedin"></i></a> </div> <div class="contact-note"> Feel free to contact me via email at [email protected] </div> </div> </article> </div> </div> <footer class="fixed-bottom"> <div class="container mt-0"> © Copyright 2024 Doanh C. Bui. Powered by <a href="https://jekyllrb.com/" target="_blank" rel="external nofollow noopener">Jekyll</a> with <a href="https://github.com/alshedivat/al-folio" rel="external nofollow noopener" target="_blank">al-folio</a> theme. Hosted by <a href="https://pages.github.com/" target="_blank" rel="external nofollow noopener">GitHub Pages</a>. Photos from <a href="https://unsplash.com" target="_blank" rel="external nofollow noopener">Unsplash</a>. </div> </footer> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/jquery.min.js" integrity="sha256-/xUj+3OJU5yExlq6GSYGSHk7tPXikynS7ogEvDej/m4=" crossorigin="anonymous"></script> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha256-fgLAgv7fyCGopR/gBNq2iW3ZKIdqIcyshnUULC4vex8=" crossorigin="anonymous"></script> <script src="https://cdn.jsdelivr.net/npm/[email protected]/js/mdb.min.js" integrity="sha256-NdbiivsvWt7VYCt6hYNT3h/th9vSTL4EDWeGs5SN3DA=" crossorigin="anonymous"></script> <script defer src="https://cdn.jsdelivr.net/npm/[email protected]/dist/masonry.pkgd.min.js" integrity="sha256-Nn1q/fx0H7SNLZMQ5Hw5JLaTRZp0yILA/FRexe19VdI=" crossorigin="anonymous"></script> <script defer src="https://cdn.jsdelivr.net/npm/imagesloaded@4/imagesloaded.pkgd.min.js"></script> <script defer src="/assets/js/masonry.js" type="text/javascript"></script> <script defer src="https://cdn.jsdelivr.net/npm/[email protected]/dist/medium-zoom.min.js" integrity="sha256-7PhEpEWEW0XXQ0k6kQrPKwuoIomz8R8IYyuU1Qew4P8=" crossorigin="anonymous"></script> <script defer src="/assets/js/zoom.js"></script> <script defer src="/assets/js/common.js"></script> <script async src="https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js"></script> <script async src="https://badge.dimensions.ai/badge.js"></script> <script type="text/javascript">window.MathJax={tex:{tags:"ams"}};</script> <script defer type="text/javascript" id="MathJax-script" src="https://cdn.jsdelivr.net/npm/[email protected]/es5/tex-mml-chtml.js"></script> <script defer src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script> <script type="text/javascript">function progressBarSetup(){"max"in document.createElement("progress")?(initializeProgressElement(),$(document).on("scroll",function(){progressBar.attr({value:getCurrentScrollPosition()})}),$(window).on("resize",initializeProgressElement)):(resizeProgressBar(),$(document).on("scroll",resizeProgressBar),$(window).on("resize",resizeProgressBar))}function getCurrentScrollPosition(){return $(window).scrollTop()}function initializeProgressElement(){let e=$("#navbar").outerHeight(!0);$("body").css({"padding-top":e}),$("progress-container").css({"padding-top":e}),progressBar.css({top:e}),progressBar.attr({max:getDistanceToScroll(),value:getCurrentScrollPosition()})}function getDistanceToScroll(){return $(document).height()-$(window).height()}function resizeProgressBar(){progressBar.css({width:getWidthPercentage()+"%"})}function getWidthPercentage(){return getCurrentScrollPosition()/getDistanceToScroll()*100}const progressBar=$("#progress");window.onload=function(){setTimeout(progressBarSetup,50)};</script> </body> </html>