From e848907e546aca874b1796878311d43f94bf268b Mon Sep 17 00:00:00 2001 From: Joshua Lochner Date: Tue, 26 Nov 2024 18:06:33 +0200 Subject: [PATCH] Add new models (Janus, Qwen2-VL, JinaCLIP, LLaVA-OneVision, ViTPose, MGP-STR) & refactor processors. (#1001) * Extract processor classes into separate folders * Fix typo * Define which classes use `processor_config.json` * [WIP] Add support for `deepseek-ai/Janus-1.3B` * Fix unit tests * Remove redundant `extends` JSDoc * Fix JSDoc * Update Janus JSDoc * Improve `VLChatProcessor` processor types * Expose ImageFeatureExtractor as copy of ImageProcessor * Add support for `LLaVA-OneVision` * Add support for ViTPose * Add ViTPose to README * Bump dependencies * Add support for `MGP-STR` models * Documentation fixes * Add support for `Qwen2VLImageProcessor` * Format tests folder * Use `AutoImageProcessor` for image processors * Add support for `Qwen2VLProcessor` * Fix `image_grid_thw` dtype * Fix bigint product * [WIP] Support for qwen2vl models * Add support for JinaCLIP models * Add listed support for Janus * Fix qwen2vl processor unit test * Update dependency versions * Export logits processors * Expose batch_decode for processor * Qwen2VL - Implement `get_rope_index` * Add `Qwen2VLForConditionalGeneration` unit tests * Update dependencies * Update `onnxslim==0.1.42` * `tokenizer.default_chat_template` has been removed * Add listed support for Qwen2-VL * Fix `.from_pretrained` function type --- README.md | 6 + docs/scripts/build_readme.py | 2 + docs/snippets/1_quick-tour.snippet | 4 +- docs/snippets/5_supported-tasks.snippet | 2 +- docs/snippets/6_supported-models.snippet | 6 + package-lock.json | 49 +- package.json | 6 +- scripts/convert.py | 11 +- scripts/requirements.txt | 10 +- src/base/feature_extraction_utils.js | 54 + src/base/image_processors_utils.js | 1089 +++++++ src/base/processing_utils.js | 145 + src/configs.js | 9 +- src/models.js | 631 +++- ...xtraction_audio_spectrogram_transformer.js | 90 + src/models/auto/feature_extraction_auto.js | 41 + src/models/auto/image_processing_auto.js | 29 + src/models/auto/processing_auto.js | 100 + src/models/beit/image_processing_beit.js | 5 + src/models/bit/image_processing_bit.js | 5 + .../image_processing_chinese_clip.js | 5 + src/models/clap/feature_extraction_clap.js | 159 + src/models/clip/image_processing_clip.js | 6 + .../convnext/image_processing_convnext.js | 45 + src/models/deit/image_processing_deit.js | 6 + src/models/detr/image_processing_detr.js | 52 + src/models/donut/image_processing_donut.js | 31 + src/models/dpt/image_processing_dpt.js | 6 + .../image_processing_efficientnet.js | 13 + src/models/feature_extractors.js | 12 + src/models/florence2/processing_florence2.js | 128 + src/models/glpn/image_processing_glpn.js | 5 + src/models/image_processors.js | 36 + src/models/janus/image_processing_janus.js | 26 + src/models/janus/processing_janus.js | 123 + .../jina_clip/image_processing_jina_clip.js | 5 + .../image_processing_llava_onevision.js | 5 + .../image_processing_mask2former.js | 5 + .../maskformer/image_processing_maskformer.js | 18 + src/models/mgp_str/processing_mgp_str.js | 170 ++ .../image_processing_mobilenet_v1.js | 7 + .../image_processing_mobilenet_v2.js | 7 + .../image_processing_mobilenet_v3.js | 7 + .../image_processing_mobilenet_v4.js | 7 + .../mobilevit/image_processing_mobilevit.js | 6 + src/models/nougat/image_processing_nougat.js | 5 + src/models/owlv2/image_processing_owlv2.js | 5 + src/models/owlvit/image_processing_owlvit.js | 12 + src/models/owlvit/processing_owlvit.js | 7 + src/models/processors.js | 10 + src/models/pvt/image_processing_pvt.js | 5 + .../pyannote/feature_extraction_pyannote.js | 28 + src/models/pyannote/processing_pyannote.js | 71 + .../qwen2_vl/image_processing_qwen2_vl.js | 52 + src/models/qwen2_vl/processing_qwen2_vl.js | 52 + .../rt_detr/image_processing_rt_detr.js | 12 + src/models/sam/image_processing_sam.js | 242 ++ src/models/sam/processing_sam.js | 20 + .../sapiens/image_processing_sapiens.js | 13 + .../feature_extraction_seamless_m4t.js | 180 ++ .../segformer/image_processing_segformer.js | 13 + src/models/siglip/image_processing_siglip.js | 5 + .../speecht5/feature_extraction_speecht5.js | 4 + src/models/speecht5/processing_speecht5.js | 17 + .../swin2sr/image_processing_swin2sr.js | 24 + src/models/vit/image_processing_vit.js | 7 + .../vitmatte/image_processing_vitmatte.js | 50 + .../vitpose/image_processing_vitpose.js | 89 + .../wav2vec2/feature_extraction_wav2vec2.js | 44 + src/models/wav2vec2/processing_wav2vec2.js | 15 + .../wespeaker/feature_extraction_wespeaker.js | 100 + .../whisper/feature_extraction_whisper.js | 84 + src/models/whisper/processing_whisper.js | 21 + src/models/yolos/image_processing_yolos.js | 12 + src/pipelines.js | 16 +- src/processors.js | 2655 ----------------- src/tokenizers.js | 3 + src/transformers.js | 17 +- src/utils/constants.js | 9 +- tests/models/roberta/tokenization.js | 4 +- tests/processors.test.js | 106 +- tests/tiny_random.test.js | 118 +- 82 files changed, 4521 insertions(+), 2790 deletions(-) create mode 100644 src/base/feature_extraction_utils.js create mode 100644 src/base/image_processors_utils.js create mode 100644 src/base/processing_utils.js create mode 100644 src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js create mode 100644 src/models/auto/feature_extraction_auto.js create mode 100644 src/models/auto/image_processing_auto.js create mode 100644 src/models/auto/processing_auto.js create mode 100644 src/models/beit/image_processing_beit.js create mode 100644 src/models/bit/image_processing_bit.js create mode 100644 src/models/chinese_clip/image_processing_chinese_clip.js create mode 100644 src/models/clap/feature_extraction_clap.js create mode 100644 src/models/clip/image_processing_clip.js create mode 100644 src/models/convnext/image_processing_convnext.js create mode 100644 src/models/deit/image_processing_deit.js create mode 100644 src/models/detr/image_processing_detr.js create mode 100644 src/models/donut/image_processing_donut.js create mode 100644 src/models/dpt/image_processing_dpt.js create mode 100644 src/models/efficientnet/image_processing_efficientnet.js create mode 100644 src/models/feature_extractors.js create mode 100644 src/models/florence2/processing_florence2.js create mode 100644 src/models/glpn/image_processing_glpn.js create mode 100644 src/models/image_processors.js create mode 100644 src/models/janus/image_processing_janus.js create mode 100644 src/models/janus/processing_janus.js create mode 100644 src/models/jina_clip/image_processing_jina_clip.js create mode 100644 src/models/llava_onevision/image_processing_llava_onevision.js create mode 100644 src/models/mask2former/image_processing_mask2former.js create mode 100644 src/models/maskformer/image_processing_maskformer.js create mode 100644 src/models/mgp_str/processing_mgp_str.js create mode 100644 src/models/mobilenet_v1/image_processing_mobilenet_v1.js create mode 100644 src/models/mobilenet_v2/image_processing_mobilenet_v2.js create mode 100644 src/models/mobilenet_v3/image_processing_mobilenet_v3.js create mode 100644 src/models/mobilenet_v4/image_processing_mobilenet_v4.js create mode 100644 src/models/mobilevit/image_processing_mobilevit.js create mode 100644 src/models/nougat/image_processing_nougat.js create mode 100644 src/models/owlv2/image_processing_owlv2.js create mode 100644 src/models/owlvit/image_processing_owlvit.js create mode 100644 src/models/owlvit/processing_owlvit.js create mode 100644 src/models/processors.js create mode 100644 src/models/pvt/image_processing_pvt.js create mode 100644 src/models/pyannote/feature_extraction_pyannote.js create mode 100644 src/models/pyannote/processing_pyannote.js create mode 100644 src/models/qwen2_vl/image_processing_qwen2_vl.js create mode 100644 src/models/qwen2_vl/processing_qwen2_vl.js create mode 100644 src/models/rt_detr/image_processing_rt_detr.js create mode 100644 src/models/sam/image_processing_sam.js create mode 100644 src/models/sam/processing_sam.js create mode 100644 src/models/sapiens/image_processing_sapiens.js create mode 100644 src/models/seamless_m4t/feature_extraction_seamless_m4t.js create mode 100644 src/models/segformer/image_processing_segformer.js create mode 100644 src/models/siglip/image_processing_siglip.js create mode 100644 src/models/speecht5/feature_extraction_speecht5.js create mode 100644 src/models/speecht5/processing_speecht5.js create mode 100644 src/models/swin2sr/image_processing_swin2sr.js create mode 100644 src/models/vit/image_processing_vit.js create mode 100644 src/models/vitmatte/image_processing_vitmatte.js create mode 100644 src/models/vitpose/image_processing_vitpose.js create mode 100644 src/models/wav2vec2/feature_extraction_wav2vec2.js create mode 100644 src/models/wav2vec2/processing_wav2vec2.js create mode 100644 src/models/wespeaker/feature_extraction_wespeaker.js create mode 100644 src/models/whisper/feature_extraction_whisper.js create mode 100644 src/models/whisper/processing_whisper.js create mode 100644 src/models/yolos/image_processing_yolos.js delete mode 100644 src/processors.js diff --git a/README.md b/README.md index a7cdf82ff..73520ea02 100644 --- a/README.md +++ b/README.md @@ -338,16 +338,20 @@ You can refine your search by selecting the task you're interested in (e.g., [te 1. **[Hiera](https://huggingface.co/docs/transformers/model_doc/hiera)** (from Meta) released with the paper [Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles](https://arxiv.org/pdf/2306.00989) by Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer. 1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed. 1. **JAIS** (from Core42) released with the paper [Jais and Jais-chat: Arabic-Centric Foundation and Instruction-Tuned Open Generative Large Language Models](https://arxiv.org/pdf/2308.16149) by Neha Sengupta, Sunil Kumar Sahu, Bokang Jia, Satheesh Katipomu, Haonan Li, Fajri Koto, William Marshall, Gurpreet Gosal, Cynthia Liu, Zhiming Chen, Osama Mohammed Afzal, Samta Kamboj, Onkar Pandit, Rahul Pal, Lalit Pradhan, Zain Muhammad Mujahid, Massa Baali, Xudong Han, Sondos Mahmoud Bsharat, Alham Fikri Aji, Zhiqiang Shen, Zhengzhong Liu, Natalia Vassilieva, Joel Hestness, Andy Hock, Andrew Feldman, Jonathan Lee, Andrew Jackson, Hector Xuguang Ren, Preslav Nakov, Timothy Baldwin, Eric Xing. +1. **Janus** (from DeepSeek) released with the paper [Janus: Decoupling Visual Encoding for Unified Multimodal Understanding and Generation](https://arxiv.org/abs/2410.13848) Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, Ping Luo. +1. **JinaCLIP** (from Jina AI) released with the paper [Jina CLIP: Your CLIP Model Is Also Your Text Retriever](https://arxiv.org/abs/2405.20204) by Andreas Koukounas, Georgios Mastrapas, Michael Günther, Bo Wang, Scott Martens, Isabelle Mohr, Saba Sturua, Mohammad Kalim Akram, Joan Fontanals Martínez, Saahil Ognawala, Susana Guzman, Maximilian Werk, Nan Wang, Han Xiao. 1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang. 1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. 1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom. 1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (from Microsoft Research & University of Wisconsin-Madison) released with the paper [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee. +1. **[LLaVA-OneVision](https://huggingface.co/docs/transformers/model_doc/llava_onevision)** (from ByteDance & NTU & CUHK & HKUST) released with the paper [LLaVA-OneVision: Easy Visual Task Transfer](https://arxiv.org/abs/2408.03326) by Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, Chunyuan Li 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao. 1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli. 1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. @@ -377,6 +381,7 @@ You can refine your search by selecting the task you're interested in (e.g., [te 1. **[PVT](https://huggingface.co/docs/transformers/main/model_doc/pvt)** (from Nanjing University, The University of Hong Kong etc.) released with the paper [Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions](https://arxiv.org/pdf/2102.12122.pdf) by Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao. 1. **PyAnnote** released in the repository [pyannote/pyannote-audio](https://github.com/pyannote/pyannote-audio) by Hervé Bredin. 1. **[Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2)** (from the Qwen team, Alibaba Group) released with the paper [Qwen Technical Report](https://arxiv.org/abs/2309.16609) by Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou and Tianhang Zhu. +1. **[Qwen2-VL](https://huggingface.co/docs/transformers/model_doc/qwen2_vl)** (from the Qwen team, Alibaba Group) released with the paper [Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond](https://arxiv.org/abs/2308.12966) by Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, Jingren Zhou. 1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. 1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. @@ -401,6 +406,7 @@ You can refine your search by selecting the task you're interested in (e.g., [te 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. 1. **[ViTMatte](https://huggingface.co/docs/transformers/model_doc/vitmatte)** (from HUST-VL) released with the paper [ViTMatte: Boosting Image Matting with Pretrained Plain Vision Transformers](https://arxiv.org/abs/2305.15272) by Jingfeng Yao, Xinggang Wang, Shusheng Yang, Baoyuan Wang. 1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. +1. **[ViTPose](https://huggingface.co/docs/transformers/model_doc/vitpose)** (from The University of Sydney) released with the paper [ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation](https://arxiv.org/abs/2204.12484) by Yufei Xu, Jing Zhang, Qiming Zhang, Dacheng Tao. 1. **[VITS](https://huggingface.co/docs/transformers/model_doc/vits)** (from Kakao Enterprise) released with the paper [Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech](https://arxiv.org/abs/2106.06103) by Jaehyeon Kim, Jungil Kong, Juhee Son. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-BERT](https://huggingface.co/docs/transformers/main/model_doc/wav2vec2-bert)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. diff --git a/docs/scripts/build_readme.py b/docs/scripts/build_readme.py index 84bb30cf0..2b88f1421 100644 --- a/docs/scripts/build_readme.py +++ b/docs/scripts/build_readme.py @@ -69,6 +69,8 @@ CUSTOM_LINK_MAP = { '/custom_usage#convert-your-models-to-onnx': '#convert-your-models-to-onnx', './api/env': DOCS_BASE_URL + '/api/env', + './guides/webgpu': DOCS_BASE_URL + '/guides/webgpu', + './guides/dtypes': DOCS_BASE_URL + '/guides/dtypes', } diff --git a/docs/snippets/1_quick-tour.snippet b/docs/snippets/1_quick-tour.snippet index 3d6233729..27fdd2214 100644 --- a/docs/snippets/1_quick-tour.snippet +++ b/docs/snippets/1_quick-tour.snippet @@ -52,7 +52,7 @@ const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncase }); ``` -For more information, check out the [WebGPU guide](/guides/webgpu). +For more information, check out the [WebGPU guide](./guides/webgpu). > [!WARNING] > The WebGPU API is still experimental in many browsers, so if you run into any issues, @@ -62,7 +62,7 @@ In resource-constrained environments, such as web browsers, it is advisable to u the model to lower bandwidth and optimize performance. This can be achieved by adjusting the `dtype` option, which allows you to select the appropriate data type for your model. While the available options may vary depending on the specific model, typical choices include `"fp32"` (default for WebGPU), `"fp16"`, `"q8"` -(default for WASM), and `"q4"`. For more information, check out the [quantization guide](../guides/dtypes). +(default for WASM), and `"q4"`. For more information, check out the [quantization guide](./guides/dtypes). ```javascript // Run the model at 4-bit quantization const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', { diff --git a/docs/snippets/5_supported-tasks.snippet b/docs/snippets/5_supported-tasks.snippet index 0d1929de1..f481fbf96 100644 --- a/docs/snippets/5_supported-tasks.snippet +++ b/docs/snippets/5_supported-tasks.snippet @@ -7,7 +7,7 @@ |--------------------------|----|-------------|------------| | [Fill-Mask](https://huggingface.co/tasks/fill-mask) | `fill-mask` | Masking some of the words in a sentence and predicting which words should replace those masks. | ✅ [(docs)](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.FillMaskPipeline)
[(models)](https://huggingface.co/models?pipeline_tag=fill-mask&library=transformers.js) | | [Question Answering](https://huggingface.co/tasks/question-answering) | `question-answering` | Retrieve the answer to a question from a given text. | ✅ [(docs)](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.QuestionAnsweringPipeline)
[(models)](https://huggingface.co/models?pipeline_tag=question-answering&library=transformers.js) | -| [Sentence Similarity](https://huggingface.co/tasks/sentence-similarity) | `sentence-similarity` | Determining how similar two texts are. | ✅ [(docs)](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.FeatureExtractionPipeline)
[(models)](https://huggingface.co/models?pipeline_tag=feature-extraction&library=transformers.js) | +| [Sentence Similarity](https://huggingface.co/tasks/sentence-similarity) | `sentence-similarity` | Determining how similar two texts are. | ✅ [(docs)](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.FeatureExtractionPipeline)
[(models)](https://huggingface.co/models?pipeline_tag=sentence-similarity&library=transformers.js) | | [Summarization](https://huggingface.co/tasks/summarization) | `summarization` | Producing a shorter version of a document while preserving its important information. | ✅ [(docs)](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.SummarizationPipeline)
[(models)](https://huggingface.co/models?pipeline_tag=summarization&library=transformers.js) | | [Table Question Answering](https://huggingface.co/tasks/table-question-answering) | `table-question-answering` | Answering a question about information from a given table. | ❌ | | [Text Classification](https://huggingface.co/tasks/text-classification) | `text-classification` or `sentiment-analysis` | Assigning a label or class to a given text. | ✅ [(docs)](https://huggingface.co/docs/transformers.js/api/pipelines#module_pipelines.TextClassificationPipeline)
[(models)](https://huggingface.co/models?pipeline_tag=text-classification&library=transformers.js) | diff --git a/docs/snippets/6_supported-models.snippet b/docs/snippets/6_supported-models.snippet index 85005c705..8dee3ac42 100644 --- a/docs/snippets/6_supported-models.snippet +++ b/docs/snippets/6_supported-models.snippet @@ -53,16 +53,20 @@ 1. **[Hiera](https://huggingface.co/docs/transformers/model_doc/hiera)** (from Meta) released with the paper [Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles](https://arxiv.org/pdf/2306.00989) by Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer. 1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed. 1. **JAIS** (from Core42) released with the paper [Jais and Jais-chat: Arabic-Centric Foundation and Instruction-Tuned Open Generative Large Language Models](https://arxiv.org/pdf/2308.16149) by Neha Sengupta, Sunil Kumar Sahu, Bokang Jia, Satheesh Katipomu, Haonan Li, Fajri Koto, William Marshall, Gurpreet Gosal, Cynthia Liu, Zhiming Chen, Osama Mohammed Afzal, Samta Kamboj, Onkar Pandit, Rahul Pal, Lalit Pradhan, Zain Muhammad Mujahid, Massa Baali, Xudong Han, Sondos Mahmoud Bsharat, Alham Fikri Aji, Zhiqiang Shen, Zhengzhong Liu, Natalia Vassilieva, Joel Hestness, Andy Hock, Andrew Feldman, Jonathan Lee, Andrew Jackson, Hector Xuguang Ren, Preslav Nakov, Timothy Baldwin, Eric Xing. +1. **Janus** (from DeepSeek) released with the paper [Janus: Decoupling Visual Encoding for Unified Multimodal Understanding and Generation](https://arxiv.org/abs/2410.13848) Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, Ping Luo. +1. **JinaCLIP** (from Jina AI) released with the paper [Jina CLIP: Your CLIP Model Is Also Your Text Retriever](https://arxiv.org/abs/2405.20204) by Andreas Koukounas, Georgios Mastrapas, Michael Günther, Bo Wang, Scott Martens, Isabelle Mohr, Saba Sturua, Mohammad Kalim Akram, Joan Fontanals Martínez, Saahil Ognawala, Susana Guzman, Maximilian Werk, Nan Wang, Han Xiao. 1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang. 1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. 1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom. 1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (from Microsoft Research & University of Wisconsin-Madison) released with the paper [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee. +1. **[LLaVA-OneVision](https://huggingface.co/docs/transformers/model_doc/llava_onevision)** (from ByteDance & NTU & CUHK & HKUST) released with the paper [LLaVA-OneVision: Easy Visual Task Transfer](https://arxiv.org/abs/2408.03326) by Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, Chunyuan Li 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. 1. **[MusicGen](https://huggingface.co/docs/transformers/model_doc/musicgen)** (from Meta) released with the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. +1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao. 1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli. 1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. @@ -92,6 +96,7 @@ 1. **[PVT](https://huggingface.co/docs/transformers/main/model_doc/pvt)** (from Nanjing University, The University of Hong Kong etc.) released with the paper [Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions](https://arxiv.org/pdf/2102.12122.pdf) by Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao. 1. **PyAnnote** released in the repository [pyannote/pyannote-audio](https://github.com/pyannote/pyannote-audio) by Hervé Bredin. 1. **[Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2)** (from the Qwen team, Alibaba Group) released with the paper [Qwen Technical Report](https://arxiv.org/abs/2309.16609) by Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou and Tianhang Zhu. +1. **[Qwen2-VL](https://huggingface.co/docs/transformers/model_doc/qwen2_vl)** (from the Qwen team, Alibaba Group) released with the paper [Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond](https://arxiv.org/abs/2308.12966) by Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, Jingren Zhou. 1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. 1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. @@ -116,6 +121,7 @@ 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. 1. **[ViTMatte](https://huggingface.co/docs/transformers/model_doc/vitmatte)** (from HUST-VL) released with the paper [ViTMatte: Boosting Image Matting with Pretrained Plain Vision Transformers](https://arxiv.org/abs/2305.15272) by Jingfeng Yao, Xinggang Wang, Shusheng Yang, Baoyuan Wang. 1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. +1. **[ViTPose](https://huggingface.co/docs/transformers/model_doc/vitpose)** (from The University of Sydney) released with the paper [ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation](https://arxiv.org/abs/2204.12484) by Yufei Xu, Jing Zhang, Qiming Zhang, Dacheng Tao. 1. **[VITS](https://huggingface.co/docs/transformers/model_doc/vits)** (from Kakao Enterprise) released with the paper [Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech](https://arxiv.org/abs/2106.06103) by Jaehyeon Kim, Jungil Kong, Juhee Son. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-BERT](https://huggingface.co/docs/transformers/main/model_doc/wav2vec2-bert)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team. diff --git a/package-lock.json b/package-lock.json index 410ae35a7..a01f01da9 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,9 +9,9 @@ "version": "3.0.2", "license": "Apache-2.0", "dependencies": { - "@huggingface/jinja": "^0.3.0", - "onnxruntime-node": "1.19.2", - "onnxruntime-web": "1.21.0-dev.20241024-d9ca84ef96", + "@huggingface/jinja": "^0.3.2", + "onnxruntime-node": "1.20.1", + "onnxruntime-web": "1.20.1", "sharp": "^0.33.5" }, "devDependencies": { @@ -618,9 +618,10 @@ } }, "node_modules/@huggingface/jinja": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.3.0.tgz", - "integrity": "sha512-GLJzso0M07ZncFkrJMIXVU4os6GFbPocD4g8fMQPMGJubf48FtGOsUORH2rtFdXPIPelz8SLBMn8ZRmOTwXm9Q==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/@huggingface/jinja/-/jinja-0.3.2.tgz", + "integrity": "sha512-F2FvuIc+w1blGsaqJI/OErRbWH6bVJDCBI8Rm5D86yZ2wlwrGERsfIaru7XUv9eYC3DMP3ixDRRtF0h6d8AZcQ==", + "license": "MIT", "engines": { "node": ">=18" } @@ -4118,9 +4119,10 @@ "dev": true }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -8779,44 +8781,41 @@ } }, "node_modules/onnxruntime-common": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.19.2.tgz", - "integrity": "sha512-a4R7wYEVFbZBlp0BfhpbFWqe4opCor3KM+5Wm22Az3NGDcQMiU2hfG/0MfnBs+1ZrlSGmlgWeMcXQkDk1UFb8Q==" + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.20.1.tgz", + "integrity": "sha512-YiU0s0IzYYC+gWvqD1HzLc46Du1sXpSiwzKb63PACIJr6LfL27VsXSXQvt68EzD3V0D5Bc0vyJTjmMxp0ylQiw==", + "license": "MIT" }, "node_modules/onnxruntime-node": { - "version": "1.19.2", - "resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.19.2.tgz", - "integrity": "sha512-9eHMP/HKbbeUcqte1JYzaaRC8JPn7ojWeCeoyShO86TOR97OCyIyAIOGX3V95ErjslVhJRXY8Em/caIUc0hm1Q==", + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/onnxruntime-node/-/onnxruntime-node-1.20.1.tgz", + "integrity": "sha512-di/I4HDXRw+FLgq+TyHmQEDd3cEp9iFFZm0r4uJ1Wd7b/WE1VXtKWo8yemex347c6GNF/3Pv86ZfPhIWxORr0w==", "hasInstallScript": true, + "license": "MIT", "os": [ "win32", "darwin", "linux" ], "dependencies": { - "onnxruntime-common": "1.19.2", + "onnxruntime-common": "1.20.1", "tar": "^7.0.1" } }, "node_modules/onnxruntime-web": { - "version": "1.21.0-dev.20241024-d9ca84ef96", - "resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.21.0-dev.20241024-d9ca84ef96.tgz", - "integrity": "sha512-ANSQfMALvCviN3Y4tvTViKofKToV1WUb2r2VjZVCi3uUBPaK15oNJyIxhsNyEckBr/Num3JmSXlkHOD8HfVzSQ==", + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/onnxruntime-web/-/onnxruntime-web-1.20.1.tgz", + "integrity": "sha512-TePF6XVpLL1rWVMIl5Y9ACBQcyCNFThZON/jgElNd9Txb73CIEGlklhYR3UEr1cp5r0rbGI6nDwwrs79g7WjoA==", "license": "MIT", "dependencies": { "flatbuffers": "^1.12.0", "guid-typescript": "^1.0.9", "long": "^5.2.3", - "onnxruntime-common": "1.20.0-dev.20241016-2b8fc5529b", + "onnxruntime-common": "1.20.1", "platform": "^1.3.6", "protobufjs": "^7.2.4" } }, - "node_modules/onnxruntime-web/node_modules/onnxruntime-common": { - "version": "1.20.0-dev.20241016-2b8fc5529b", - "resolved": "https://registry.npmjs.org/onnxruntime-common/-/onnxruntime-common-1.20.0-dev.20241016-2b8fc5529b.tgz", - "integrity": "sha512-KZK8b6zCYGZFjd4ANze0pqBnqnFTS3GIVeclQpa2qseDpXrCQJfkWBixRcrZShNhm3LpFOZ8qJYFC5/qsJK9WQ==" - }, "node_modules/open": { "version": "8.4.2", "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz", diff --git a/package.json b/package.json index 94e59b866..5a82076cb 100644 --- a/package.json +++ b/package.json @@ -61,9 +61,9 @@ }, "homepage": "https://github.com/huggingface/transformers.js#readme", "dependencies": { - "@huggingface/jinja": "^0.3.0", - "onnxruntime-node": "1.19.2", - "onnxruntime-web": "1.21.0-dev.20241024-d9ca84ef96", + "@huggingface/jinja": "^0.3.2", + "onnxruntime-node": "1.20.1", + "onnxruntime-web": "1.20.1", "sharp": "^0.33.5" }, "devDependencies": { diff --git a/scripts/convert.py b/scripts/convert.py index ee4f413dd..002bdc34f 100644 --- a/scripts/convert.py +++ b/scripts/convert.py @@ -232,16 +232,7 @@ def main(): tokenizer = None try: # Load tokenizer - tokenizer = AutoTokenizer.from_pretrained( - tokenizer_id, **from_pretrained_kwargs) - - # To avoid inserting all chat templates into tokenizers.js, we save the chat template - # to the tokenizer_config.json file, and load it when the tokenizer is loaded. - if getattr(tokenizer, 'chat_template', None) is None and \ - getattr(tokenizer, 'use_default_system_prompt', False): - # No chat template specified, and we use the default - setattr(tokenizer, 'chat_template', - tokenizer.default_chat_template) + tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, **from_pretrained_kwargs) except KeyError: pass # No Tokenizer diff --git a/scripts/requirements.txt b/scripts/requirements.txt index 2c98b42fa..a1615f9a9 100644 --- a/scripts/requirements.txt +++ b/scripts/requirements.txt @@ -1,9 +1,9 @@ -transformers[torch]==4.46.1 -onnxruntime==1.19.2 +transformers[torch]==4.46.3 +onnxruntime==1.20.1 optimum==1.23.3 onnx==1.16.2 onnxconverter-common==1.14.0 -tqdm==4.66.5 -onnxslim==0.1.36 +tqdm==4.66.6 +onnxslim==0.1.42 --extra-index-url https://pypi.ngc.nvidia.com -onnx_graphsurgeon==0.3.27 +onnx_graphsurgeon==0.5.2 diff --git a/src/base/feature_extraction_utils.js b/src/base/feature_extraction_utils.js new file mode 100644 index 000000000..53a5e4941 --- /dev/null +++ b/src/base/feature_extraction_utils.js @@ -0,0 +1,54 @@ +import { FEATURE_EXTRACTOR_NAME } from "../utils/constants.js"; +import { Callable } from "../utils/generic.js"; +import { getModelJSON } from "../utils/hub.js"; + +/** + * Base class for feature extractors. + */ +export class FeatureExtractor extends Callable { + /** + * Constructs a new FeatureExtractor instance. + * + * @param {Object} config The configuration for the feature extractor. + */ + constructor(config) { + super(); + this.config = config + } + + /** + * Instantiate one of the processor classes of the library from a pretrained model. + * + * The processor class to instantiate is selected based on the `image_processor_type` (or `feature_extractor_type`; legacy) + * property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) + * + * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: + * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co. + * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + * user or organization name, like `dbmdz/bert-base-german-cased`. + * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`. + * @param {import('../utils/hub.js').PretrainedOptions} options Additional options for loading the processor. + * + * @returns {Promise} A new instance of the Processor class. + */ + static async from_pretrained(pretrained_model_name_or_path, options) { + const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, true, options); + return new this(preprocessorConfig); + } +} + + +/** + * Helper function to validate audio inputs. + * @param {any} audio The audio data. + * @param {string} feature_extractor The name of the feature extractor. + * @private + */ +export function validate_audio_inputs(audio, feature_extractor) { + if (!(audio instanceof Float32Array || audio instanceof Float64Array)) { + throw new Error( + `${feature_extractor} expects input to be a Float32Array or a Float64Array, but got ${audio?.constructor?.name ?? typeof audio} instead. ` + + `If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.` + ) + } +} diff --git a/src/base/image_processors_utils.js b/src/base/image_processors_utils.js new file mode 100644 index 000000000..6788258f6 --- /dev/null +++ b/src/base/image_processors_utils.js @@ -0,0 +1,1089 @@ +import { Callable } from "../utils/generic.js"; +import { Tensor, interpolate, stack } from "../utils/tensor.js"; +import { bankers_round, max, min, softmax } from "../utils/maths.js"; +import { RawImage } from "../utils/image.js"; +import { calculateReflectOffset } from "../utils/core.js"; +import { getModelJSON } from "../utils/hub.js"; +import { IMAGE_PROCESSOR_NAME } from '../utils/constants.js'; + +/** + * Named tuple to indicate the order we are using is (height x width), + * even though the Graphics' industry standard is (width x height). + * @typedef {[height: number, width: number]} HeightWidth + */ + + +/** + * @typedef {object} ImageProcessorResult + * @property {Tensor} pixel_values The pixel values of the batched preprocessed images. + * @property {HeightWidth[]} original_sizes Array of two-dimensional tuples like [[480, 640]]. + * @property {HeightWidth[]} reshaped_input_sizes Array of two-dimensional tuples like [[1000, 1330]]. + */ + + + +/** + * Helper function to constrain a value to be a multiple of a number. + * @param {number} val The value to constrain. + * @param {number} multiple The number to constrain to. + * @param {number} [minVal=0] The minimum value to constrain to. + * @param {number} [maxVal=null] The maximum value to constrain to. + * @returns {number} The constrained value. + * @private + */ +function constraint_to_multiple_of(val, multiple, minVal = 0, maxVal = null) { + const a = val / multiple; + let x = bankers_round(a) * multiple; + + if (maxVal !== null && x > maxVal) { + x = Math.floor(a) * multiple; + } + + if (x < minVal) { + x = Math.ceil(a) * multiple; + } + + return x; +} + +/** + * Rounds the height and width down to the closest multiple of size_divisibility + * @param {[number, number]} size The size of the image + * @param {number} divisor The divisor to use. + * @returns {[number, number]} The rounded size. + */ +function enforce_size_divisibility([width, height], divisor) { + return [ + Math.max(Math.floor(width / divisor), 1) * divisor, + Math.max(Math.floor(height / divisor), 1) * divisor + ]; +} + + +// Helper functions + +/** + * Converts bounding boxes from center format to corners format. + * + * @param {number[]} arr The coordinate for the center of the box and its width, height dimensions (center_x, center_y, width, height) + * @returns {number[]} The coodinates for the top-left and bottom-right corners of the box (top_left_x, top_left_y, bottom_right_x, bottom_right_y) + */ +function center_to_corners_format([centerX, centerY, width, height]) { + return [ + centerX - width / 2, + centerY - height / 2, + centerX + width / 2, + centerY + height / 2 + ]; +} + +/** + * Post-processes the outputs of the model (for object detection). + * @param {Object} outputs The outputs of the model that must be post-processed + * @param {Tensor} outputs.logits The logits + * @param {Tensor} outputs.pred_boxes The predicted boxes. + * @param {number} [threshold=0.5] The threshold to use for the scores. + * @param {[number, number][]} [target_sizes=null] The sizes of the original images. + * @param {boolean} [is_zero_shot=false] Whether zero-shot object detection was performed. + * @return {Object[]} An array of objects containing the post-processed outputs. + */ +export function post_process_object_detection(outputs, threshold = 0.5, target_sizes = null, is_zero_shot = false) { + const out_logits = outputs.logits; + const out_bbox = outputs.pred_boxes; + const [batch_size, num_boxes, num_classes] = out_logits.dims; + + if (target_sizes !== null && target_sizes.length !== batch_size) { + throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") + } + let toReturn = []; + for (let i = 0; i < batch_size; ++i) { + let target_size = target_sizes !== null ? target_sizes[i] : null; + let info = { + boxes: [], + classes: [], + scores: [] + } + let logits = out_logits[i]; + let bbox = out_bbox[i]; + + for (let j = 0; j < num_boxes; ++j) { + let logit = logits[j]; + + let indices = []; + let probs; + if (is_zero_shot) { + // Get indices of classes with high enough probability + probs = logit.sigmoid().data; + for (let k = 0; k < probs.length; ++k) { + if (probs[k] > threshold) { + indices.push(k); + } + } + + } else { + // Get most probable class + let maxIndex = max(logit.data)[1]; + + if (maxIndex === num_classes - 1) { + // This is the background class, skip it + continue; + } + // Compute softmax over classes + probs = softmax(logit.data); + + if (probs[maxIndex] < threshold) { + continue; + } + indices.push(maxIndex); + } + + for (const index of indices) { + + // Some class has a high enough probability + /** @type {number[]} */ + let box = bbox[j].data; + + // convert to [x0, y0, x1, y1] format + box = center_to_corners_format(box) + if (target_size !== null) { + box = box.map((x, i) => x * target_size[(i + 1) % 2]) + } + + info.boxes.push(box); + info.classes.push(index); + info.scores.push(probs[index]); + } + } + toReturn.push(info); + } + return toReturn; +} + + +/** + * Post-processes the outputs of the model (for semantic segmentation). + * @param {*} outputs Raw outputs of the model. + * @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size + * (height, width) of each prediction. If unset, predictions will not be resized. + * @returns {{segmentation: Tensor; labels: number[]}[]} The semantic segmentation maps. + */ +export function post_process_semantic_segmentation(outputs, target_sizes = null) { + + const logits = outputs.logits; + const batch_size = logits.dims[0]; + + if (target_sizes !== null && target_sizes.length !== batch_size) { + throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") + } + + const toReturn = []; + for (let i = 0; i < batch_size; ++i) { + const target_size = target_sizes !== null ? target_sizes[i] : null; + + let data = logits[i]; + + // 1. If target_size is not null, we need to resize the masks to the target size + if (target_size !== null) { + // resize the masks to the target size + data = interpolate(data, target_size, 'bilinear', false); + } + const [height, width] = target_size ?? data.dims.slice(-2); + + const segmentation = new Tensor( + 'int32', + new Int32Array(height * width), + [height, width] + ); + + // Buffer to store current largest value + const buffer = data[0].data; + const segmentation_data = segmentation.data; + for (let j = 1; j < data.dims[0]; ++j) { + const row = data[j].data; + for (let k = 0; k < row.length; ++k) { + if (row[k] > buffer[k]) { + buffer[k] = row[k]; + segmentation_data[k] = j; + } + } + } + + // Store which objects have labels + // This is much more efficient that creating a set of the final values + const hasLabel = new Array(data.dims[0]); + for (let j = 0; j < segmentation_data.length; ++j) { + const index = segmentation_data[j]; + hasLabel[index] = index; + } + /** @type {number[]} The unique list of labels that were detected */ + const labels = hasLabel.filter(x => x !== undefined); + + toReturn.push({ segmentation, labels }); + } + return toReturn; +} + + +/** + * Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. + * @param {Tensor} class_logits The class logits. + * @param {Tensor} mask_logits The mask logits. + * @param {number} object_mask_threshold A number between 0 and 1 used to binarize the masks. + * @param {number} num_labels The number of labels. + * @returns {[Tensor[], number[], number[]]} The binarized masks, the scores, and the labels. + * @private + */ +function remove_low_and_no_objects(class_logits, mask_logits, object_mask_threshold, num_labels) { + + const mask_probs_item = []; + const pred_scores_item = []; + const pred_labels_item = []; + + for (let j = 0; j < class_logits.dims[0]; ++j) { + const cls = class_logits[j]; + const mask = mask_logits[j]; + + const pred_label = max(cls.data)[1]; + if (pred_label === num_labels) { + // Is the background, so we ignore it + continue; + } + + const scores = softmax(cls.data); + const pred_score = scores[pred_label]; + if (pred_score > object_mask_threshold) { + mask_probs_item.push(mask); + pred_scores_item.push(pred_score); + pred_labels_item.push(pred_label); + } + } + + return [mask_probs_item, pred_scores_item, pred_labels_item]; +} + +/** + * Checks whether the segment is valid or not. + * @param {Int32Array} mask_labels Labels for each pixel in the mask. + * @param {Tensor[]} mask_probs Probabilities for each pixel in the masks. + * @param {number} k The class id of the segment. + * @param {number} mask_threshold The mask threshold. + * @param {number} overlap_mask_area_threshold The overlap mask area threshold. + * @returns {[boolean, number[]]} Whether the segment is valid or not, and the indices of the valid labels. + * @private + */ +function check_segment_validity( + mask_labels, + mask_probs, + k, + mask_threshold = 0.5, + overlap_mask_area_threshold = 0.8 +) { + // mask_k is a 1D array of indices, indicating where the mask is equal to k + const mask_k = []; + let mask_k_area = 0; + let original_area = 0; + + const mask_probs_k_data = mask_probs[k].data; + + // Compute the area of all the stuff in query k + for (let i = 0; i < mask_labels.length; ++i) { + if (mask_labels[i] === k) { + mask_k.push(i); + ++mask_k_area; + } + + if (mask_probs_k_data[i] >= mask_threshold) { + ++original_area; + } + } + let mask_exists = mask_k_area > 0 && original_area > 0; + + // Eliminate disconnected tiny segments + if (mask_exists) { + // Perform additional check + let area_ratio = mask_k_area / original_area; + mask_exists = area_ratio > overlap_mask_area_threshold; + } + + return [mask_exists, mask_k] +} + +/** + * Computes the segments. + * @param {Tensor[]} mask_probs The mask probabilities. + * @param {number[]} pred_scores The predicted scores. + * @param {number[]} pred_labels The predicted labels. + * @param {number} mask_threshold The mask threshold. + * @param {number} overlap_mask_area_threshold The overlap mask area threshold. + * @param {Set} label_ids_to_fuse The label ids to fuse. + * @param {number[]} target_size The target size of the image. + * @returns {[Tensor, Array<{id: number, label_id: number, score: number}>]} The computed segments. + * @private + */ +function compute_segments( + mask_probs, + pred_scores, + pred_labels, + mask_threshold, + overlap_mask_area_threshold, + label_ids_to_fuse = null, + target_size = null, +) { + const [height, width] = target_size ?? mask_probs[0].dims; + + const segmentation = new Tensor( + 'int32', + new Int32Array(height * width), + [height, width] + ); + const segments = []; + + // 1. If target_size is not null, we need to resize the masks to the target size + if (target_size !== null) { + // resize the masks to the target size + for (let i = 0; i < mask_probs.length; ++i) { + mask_probs[i] = interpolate(mask_probs[i], target_size, 'bilinear', false); + } + } + + // 2. Weigh each mask by its prediction score + // NOTE: `mask_probs` is updated in-place + // + // Temporary storage for the best label/scores for each pixel ([height, width]): + const mask_labels = new Int32Array(mask_probs[0].data.length); + const bestScores = new Float32Array(mask_probs[0].data.length); + + for (let i = 0; i < mask_probs.length; ++i) { + let score = pred_scores[i]; + + const mask_probs_i_data = mask_probs[i].data; + + for (let j = 0; j < mask_probs_i_data.length; ++j) { + mask_probs_i_data[j] *= score + if (mask_probs_i_data[j] > bestScores[j]) { + mask_labels[j] = i; + bestScores[j] = mask_probs_i_data[j]; + } + } + } + + let current_segment_id = 0; + + // let stuff_memory_list = {} + const segmentation_data = segmentation.data; + for (let k = 0; k < pred_labels.length; ++k) { + const pred_class = pred_labels[k]; + + // TODO add `should_fuse` + // let should_fuse = pred_class in label_ids_to_fuse + + // Check if mask exists and large enough to be a segment + const [mask_exists, mask_k] = check_segment_validity( + mask_labels, + mask_probs, + k, + mask_threshold, + overlap_mask_area_threshold + ) + + if (!mask_exists) { + // Nothing to see here + continue; + } + + // TODO + // if (pred_class in stuff_memory_list) { + // current_segment_id = stuff_memory_list[pred_class] + // } else { + // current_segment_id += 1; + // } + ++current_segment_id; + + + // Add current object segment to final segmentation map + for (const index of mask_k) { + segmentation_data[index] = current_segment_id; + } + + segments.push({ + id: current_segment_id, + label_id: pred_class, + // was_fused: should_fuse, TODO + score: pred_scores[k], + }) + + // TODO + // if(should_fuse){ + // stuff_memory_list[pred_class] = current_segment_id + // } + } + + return [segmentation, segments]; +} + +/** + * Rescales the image so that the following conditions are met: + * + * 1. Both dimensions (height and width) are divisible by 'factor'. + * 2. The total number of pixels is within the range ['min_pixels', 'max_pixels']. + * 3. The aspect ratio of the image is maintained as closely as possible. + * + * @param {number} height The height of the image. + * @param {number} width The width of the image. + * @param {number} [factor=28] The factor to use for resizing. + * @param {number} [min_pixels=56*56] The minimum number of pixels. + * @param {number} [max_pixels=14*14*4*1280] The maximum number of pixels. + * @returns {[number, number]} The new height and width of the image. + * @throws {Error} If the height or width is smaller than the factor. + */ +function smart_resize(height, width, factor = 28, min_pixels = 56 * 56, max_pixels = 14 * 14 * 4 * 1280) { + + if (height < factor || width < factor) { + throw new Error(`height:${height} or width:${width} must be larger than factor:${factor}`); + } else if (Math.max(height, width) / Math.min(height, width) > 200) { + throw new Error( + `absolute aspect ratio must be smaller than 200, got ${Math.max(height, width) / Math.min(height, width)}` + ); + } + + let h_bar = Math.round(height / factor) * factor; + let w_bar = Math.round(width / factor) * factor; + + if (h_bar * w_bar > max_pixels) { + const beta = Math.sqrt((height * width) / max_pixels); + h_bar = Math.floor((height / beta) / factor) * factor; + w_bar = Math.floor((width / beta) / factor) * factor; + } else if (h_bar * w_bar < min_pixels) { + const beta = Math.sqrt(min_pixels / (height * width)); + h_bar = Math.ceil((height * beta) / factor) * factor; + w_bar = Math.ceil((width * beta) / factor) * factor; + } + + return [h_bar, w_bar]; +} + + +/** + * Post-process the model output to generate the final panoptic segmentation. + * @param {*} outputs The model output to post process + * @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks. + * @param {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values. + * @param {number} [overlap_mask_area_threshold=0.8] The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. + * @param {Set} [label_ids_to_fuse=null] The labels in this state will have all their instances be fused together. + * @param {[number, number][]} [target_sizes=null] The target sizes to resize the masks to. + * @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>} + */ +export function post_process_panoptic_segmentation( + outputs, + threshold = 0.5, + mask_threshold = 0.5, + overlap_mask_area_threshold = 0.8, + label_ids_to_fuse = null, + target_sizes = null, +) { + if (label_ids_to_fuse === null) { + console.warn("`label_ids_to_fuse` unset. No instance will be fused.") + label_ids_to_fuse = new Set(); + } + + const class_queries_logits = outputs.class_queries_logits ?? outputs.logits; // [batch_size, num_queries, num_classes+1] + const masks_queries_logits = outputs.masks_queries_logits ?? outputs.pred_masks; // [batch_size, num_queries, height, width] + + const mask_probs = masks_queries_logits.sigmoid() // [batch_size, num_queries, height, width] + + let [batch_size, num_queries, num_labels] = class_queries_logits.dims; + num_labels -= 1; // Remove last class (background) + + if (target_sizes !== null && target_sizes.length !== batch_size) { + throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") + } + + let toReturn = []; + for (let i = 0; i < batch_size; ++i) { + let target_size = target_sizes !== null ? target_sizes[i] : null; + + let class_logits = class_queries_logits[i]; + let mask_logits = mask_probs[i]; + + let [mask_probs_item, pred_scores_item, pred_labels_item] = remove_low_and_no_objects(class_logits, mask_logits, threshold, num_labels); + + if (pred_labels_item.length === 0) { + // No mask found + let [height, width] = target_size ?? mask_logits.dims.slice(-2); + + let segmentation = new Tensor( + 'int32', + new Int32Array(height * width).fill(-1), + [height, width] + ) + toReturn.push({ + segmentation: segmentation, + segments_info: [] + }); + continue; + } + + + // Get segmentation map and segment information of batch item + let [segmentation, segments] = compute_segments( + mask_probs_item, + pred_scores_item, + pred_labels_item, + mask_threshold, + overlap_mask_area_threshold, + label_ids_to_fuse, + target_size, + ) + + toReturn.push({ + segmentation: segmentation, + segments_info: segments + }) + } + + return toReturn; +} + + +/** + * Post-processes the outputs of the model (for instance segmentation). + * @param {*} outputs Raw outputs of the model. + * @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks. + * @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size + * (height, width) of each prediction. If unset, predictions will not be resized. + * @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>} + */ +export function post_process_instance_segmentation(outputs, threshold = 0.5, target_sizes = null) { + throw new Error('`post_process_instance_segmentation` is not yet implemented.'); +} + + +/** + * @typedef {Object} ImageProcessorConfig A configuration object used to create an image processor. + * @property {function} [progress_callback=null] If specified, this function will be called during model construction, to provide the user with progress updates. + * @property {number[]} [image_mean] The mean values for image normalization. + * @property {number[]} [image_std] The standard deviation values for image normalization. + * @property {boolean} [do_rescale] Whether to rescale the image pixel values to the [0,1] range. + * @property {number} [rescale_factor] The factor to use for rescaling the image pixel values. + * @property {boolean} [do_normalize] Whether to normalize the image pixel values. + * @property {boolean} [do_resize] Whether to resize the image. + * @property {number} [resample] What method to use for resampling. + * @property {number|Object} [size] The size to resize the image to. + * @property {number|Object} [image_size] The size to resize the image to (same as `size`). + * @property {boolean} [do_flip_channel_order=false] Whether to flip the color channels from RGB to BGR. + * Can be overridden by the `do_flip_channel_order` parameter in the `preprocess` method. + * @property {boolean} [do_center_crop] Whether to center crop the image to the specified `crop_size`. + * Can be overridden by `do_center_crop` in the `preprocess` method. + * @property {boolean} [do_thumbnail] Whether to resize the image using thumbnail method. + * @property {boolean} [keep_aspect_ratio] If `true`, the image is resized to the largest possible size such that the aspect ratio is preserved. + * Can be overidden by `keep_aspect_ratio` in `preprocess`. + * @property {number} [ensure_multiple_of] If `do_resize` is `true`, the image is resized to a size that is a multiple of this value. + * Can be overidden by `ensure_multiple_of` in `preprocess`. + * + * @property {number[]} [mean] The mean values for image normalization (same as `image_mean`). + * @property {number[]} [std] The standard deviation values for image normalization (same as `image_std`). + */ + +export class ImageProcessor extends Callable { + + /** + * Constructs a new `ImageProcessor`. + * @param {ImageProcessorConfig} config The configuration object. + */ + constructor(config) { + super(); + + this.image_mean = config.image_mean ?? config.mean; + this.image_std = config.image_std ?? config.std; + + this.resample = config.resample ?? 2; // 2 => bilinear + this.do_rescale = config.do_rescale ?? true; + this.rescale_factor = config.rescale_factor ?? (1 / 255); + this.do_normalize = config.do_normalize; + + this.do_thumbnail = config.do_thumbnail; + this.size = config.size ?? config.image_size; + this.do_resize = config.do_resize ?? (this.size !== undefined); + this.size_divisibility = config.size_divisibility ?? config.size_divisor; + + this.do_center_crop = config.do_center_crop; + this.crop_size = config.crop_size; + this.do_convert_rgb = config.do_convert_rgb ?? true; + this.do_crop_margin = config.do_crop_margin; + + this.pad_size = config.pad_size; + this.do_pad = config.do_pad; + + if (this.do_pad && !this.pad_size && this.size && this.size.width !== undefined && this.size.height !== undefined) { + // Should pad, but no pad size specified + // We infer the pad size from the resize size + this.pad_size = this.size + } + + this.do_flip_channel_order = config.do_flip_channel_order ?? false; + + this.config = config; + } + + /** + * Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any + * corresponding dimension of the specified size. + * @param {RawImage} image The image to be resized. + * @param {{height:number, width:number}} size The size `{"height": h, "width": w}` to resize the image to. + * @param {string | 0 | 1 | 2 | 3 | 4 | 5} [resample=2] The resampling filter to use. + * @returns {Promise} The resized image. + */ + async thumbnail(image, size, resample = 2) { + const input_height = image.height; + const input_width = image.width; + + const output_height = size.height; + const output_width = size.width; + + // We always resize to the smallest of either the input or output size. + let height = Math.min(input_height, output_height) + let width = Math.min(input_width, output_width) + + if (height === input_height && width === input_width) { + return image; + } + if (input_height > input_width) { + width = Math.floor(input_width * height / input_height); + } else if (input_width > input_height) { + height = Math.floor(input_height * width / input_width); + } + return await image.resize(width, height, { resample }); + } + + + /** + * Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the threshold). + * @param {RawImage} image The image to be cropped. + * @param {number} gray_threshold Value below which pixels are considered to be gray. + * @returns {Promise} The cropped image. + */ + async crop_margin(image, gray_threshold = 200) { + + const gray_image = image.clone().grayscale(); + + const minValue = min(gray_image.data)[0]; + const maxValue = max(gray_image.data)[0]; + const diff = maxValue - minValue; + + if (diff === 0) { + return image; + } + + const threshold = gray_threshold / 255; + + let x_min = gray_image.width, y_min = gray_image.height, x_max = 0, y_max = 0; + const gray_image_data = gray_image.data; + for (let j = 0; j < gray_image.height; ++j) { + const row = j * gray_image.width; + for (let i = 0; i < gray_image.width; ++i) { + if ((gray_image_data[row + i] - minValue) / diff < threshold) { + // We have a non-zero pixel, so we update the min/max values accordingly + x_min = Math.min(x_min, i); + y_min = Math.min(y_min, j); + x_max = Math.max(x_max, i); + y_max = Math.max(y_max, j); + } + } + } + + image = await image.crop([x_min, y_min, x_max, y_max]); + return image; + } + + /** + * Pad the image by a certain amount. + * @param {Float32Array} pixelData The pixel data to pad. + * @param {number[]} imgDims The dimensions of the image (height, width, channels). + * @param {{width:number; height:number}|number} padSize The dimensions of the padded image. + * @param {Object} options The options for padding. + * @param {'constant'|'symmetric'} [options.mode='constant'] The type of padding to add. + * @param {boolean} [options.center=false] Whether to center the image. + * @param {number|number[]} [options.constant_values=0] The constant value to use for padding. + * @returns {[Float32Array, number[]]} The padded pixel data and image dimensions. + */ + pad_image(pixelData, imgDims, padSize, { + mode = 'constant', + center = false, + constant_values = 0, + } = {}) { + const [imageHeight, imageWidth, imageChannels] = imgDims; + + let paddedImageWidth, paddedImageHeight; + if (typeof padSize === 'number') { + paddedImageWidth = padSize; + paddedImageHeight = padSize; + } else { + paddedImageWidth = padSize.width; + paddedImageHeight = padSize.height; + } + + // Only add padding if there is a difference in size + if (paddedImageWidth !== imageWidth || paddedImageHeight !== imageHeight) { + const paddedPixelData = new Float32Array(paddedImageWidth * paddedImageHeight * imageChannels); + if (Array.isArray(constant_values)) { + // Fill with constant values, cycling through the array + for (let i = 0; i < paddedPixelData.length; ++i) { + paddedPixelData[i] = constant_values[i % imageChannels]; + } + } else if (constant_values !== 0) { + paddedPixelData.fill(constant_values); + } + + const [left, top] = center + ? [Math.floor((paddedImageWidth - imageWidth) / 2), Math.floor((paddedImageHeight - imageHeight) / 2)] + : [0, 0]; + + // Copy the original image into the padded image + for (let i = 0; i < imageHeight; ++i) { + const a = (i + top) * paddedImageWidth; + const b = i * imageWidth; + for (let j = 0; j < imageWidth; ++j) { + const c = (a + j + left) * imageChannels; + const d = (b + j) * imageChannels; + for (let k = 0; k < imageChannels; ++k) { + paddedPixelData[c + k] = pixelData[d + k]; + } + } + } + + if (mode === 'symmetric') { + if (center) { + throw new Error('`center` padding is not supported when `mode` is set to `symmetric`.'); + // TODO: Implement this + } + const h1 = imageHeight - 1; + const w1 = imageWidth - 1; + for (let i = 0; i < paddedImageHeight; ++i) { + const a = i * paddedImageWidth; + const b = calculateReflectOffset(i, h1) * imageWidth; + + for (let j = 0; j < paddedImageWidth; ++j) { + if (i < imageHeight && j < imageWidth) continue; // Do not overwrite original image + const c = (a + j) * imageChannels; + const d = (b + calculateReflectOffset(j, w1)) * imageChannels; + + // Copy channel-wise + for (let k = 0; k < imageChannels; ++k) { + paddedPixelData[c + k] = pixelData[d + k]; + } + } + } + } + + + // Update pixel data and image dimensions + pixelData = paddedPixelData; + imgDims = [paddedImageHeight, paddedImageWidth, imageChannels] + } + return [pixelData, imgDims]; + } + + /** + * Rescale the image' pixel values by `this.rescale_factor`. + * @param {Float32Array} pixelData The pixel data to rescale. + * @returns {void} + */ + rescale(pixelData) { + for (let i = 0; i < pixelData.length; ++i) { + pixelData[i] = this.rescale_factor * pixelData[i]; + } + } + + /** + * Find the target (width, height) dimension of the output image after + * resizing given the input image and the desired size. + * @param {RawImage} image The image to resize. + * @param {any} size The size to use for resizing the image. + * @returns {[number, number]} The target (width, height) dimension of the output image after resizing. + */ + get_resize_output_image_size(image, size) { + // `size` comes in many forms, so we need to handle them all here: + // 1. `size` is an integer, in which case we resize the image to be a square + + const [srcWidth, srcHeight] = image.size; + + let shortest_edge; + let longest_edge; + + if (this.do_thumbnail) { + // NOTE: custom logic for `Donut` models + const { height, width } = size; + shortest_edge = Math.min(height, width) + } + // Support both formats for backwards compatibility + else if (Number.isInteger(size)) { + shortest_edge = size; + longest_edge = this.config.max_size ?? shortest_edge; + + } else if (size !== undefined) { + // Extract known properties from `size` + shortest_edge = size.shortest_edge; + longest_edge = size.longest_edge; + } + + // If `longest_edge` and `shortest_edge` are set, maintain aspect ratio and resize to `shortest_edge` + // while keeping the largest dimension <= `longest_edge` + if (shortest_edge !== undefined || longest_edge !== undefined) { + // http://opensourcehacker.com/2011/12/01/calculate-aspect-ratio-conserving-resize-for-images-in-javascript/ + // Try resize so that shortest edge is `shortest_edge` (target) + const shortResizeFactor = shortest_edge === undefined + ? 1 // If `shortest_edge` is not set, don't upscale + : Math.max(shortest_edge / srcWidth, shortest_edge / srcHeight); + + const newWidth = srcWidth * shortResizeFactor; + const newHeight = srcHeight * shortResizeFactor; + + // The new width and height might be greater than `longest_edge`, so + // we downscale again to ensure the largest dimension is `longest_edge` + const longResizeFactor = longest_edge === undefined + ? 1 // If `longest_edge` is not set, don't downscale + : Math.min(longest_edge / newWidth, longest_edge / newHeight); + + // To avoid certain floating point precision issues, we round to 2 decimal places + let finalWidth = Math.floor(Number((newWidth * longResizeFactor).toFixed(2))); + let finalHeight = Math.floor(Number((newHeight * longResizeFactor).toFixed(2))); + + if (this.size_divisibility !== undefined) { + [finalWidth, finalHeight] = enforce_size_divisibility([finalWidth, finalHeight], this.size_divisibility) + } + return [finalWidth, finalHeight]; + + } else if (size !== undefined && size.width !== undefined && size.height !== undefined) { + // If `width` and `height` are set, resize to those dimensions + + let newWidth = size.width; + let newHeight = size.height; + + // Custom for DPT models + if (this.config.keep_aspect_ratio && this.config.ensure_multiple_of) { + + // determine new height and width + let scale_height = newHeight / srcHeight; + let scale_width = newWidth / srcWidth; + + // scale as little as possible + if (Math.abs(1 - scale_width) < Math.abs(1 - scale_height)) { + // fit width + scale_height = scale_width; + } else { + // fit height + scale_width = scale_height; + } + + newHeight = constraint_to_multiple_of(scale_height * srcHeight, this.config.ensure_multiple_of); + newWidth = constraint_to_multiple_of(scale_width * srcWidth, this.config.ensure_multiple_of); + } + + return [newWidth, newHeight]; + + } else if (this.size_divisibility !== undefined) { + return enforce_size_divisibility([srcWidth, srcHeight], this.size_divisibility); + } else if (size.min_pixels !== undefined && size.max_pixels !== undefined) { + // Custom resize logic for Qwen2-VL models + const { min_pixels, max_pixels } = size; + const factor = this.config.patch_size * this.config.merge_size; + return smart_resize(srcHeight, srcWidth, factor, min_pixels, max_pixels); + } else { + throw new Error(`Could not resize image due to unsupported \`this.size\` option in config: ${JSON.stringify(size)}`); + } + } + + /** + * Resizes the image. + * @param {RawImage} image The image to resize. + * @returns {Promise} The resized image. + */ + async resize(image) { + const [newWidth, newHeight] = this.get_resize_output_image_size(image, this.size); + return await image.resize(newWidth, newHeight, { + resample: this.resample, + }); + } + + /** + * @typedef {object} PreprocessedImage + * @property {HeightWidth} original_size The original size of the image. + * @property {HeightWidth} reshaped_input_size The reshaped input size of the image. + * @property {Tensor} pixel_values The pixel values of the preprocessed image. + */ + + /** + * Preprocesses the given image. + * + * @param {RawImage} image The image to preprocess. + * @param {Object} overrides The overrides for the preprocessing options. + * @returns {Promise} The preprocessed image. + */ + async preprocess(image, { + do_normalize = null, + do_pad = null, + do_convert_rgb = null, + do_convert_grayscale = null, + do_flip_channel_order = null, + } = {}) { + if (this.do_crop_margin) { + // NOTE: Specific to nougat processors. This is done before resizing, + // and can be interpreted as a pre-preprocessing step. + image = await this.crop_margin(image); + } + + const [srcWidth, srcHeight] = image.size; // original image size + + // Convert image to RGB if specified in config. + if (do_convert_rgb ?? this.do_convert_rgb) { + image = image.rgb(); + } else if (do_convert_grayscale) { + image = image.grayscale(); + } + + // TODO: + // For efficiency reasons, it might be best to merge the resize and center crop operations into one. + + // Resize all images + if (this.do_resize) { + image = await this.resize(image); + } + + // Resize the image using thumbnail method. + if (this.do_thumbnail) { + image = await this.thumbnail(image, this.size, this.resample); + } + + if (this.do_center_crop) { + + let crop_width; + let crop_height; + if (Number.isInteger(this.crop_size)) { + crop_width = this.crop_size; + crop_height = this.crop_size; + } else { + crop_width = this.crop_size.width; + crop_height = this.crop_size.height; + } + + image = await image.center_crop(crop_width, crop_height); + } + + /** @type {HeightWidth} */ + const reshaped_input_size = [image.height, image.width]; + + // NOTE: All pixel-level manipulation (i.e., modifying `pixelData`) + // occurs with data in the hwc format (height, width, channels), + // to emulate the behavior of the original Python code (w/ numpy). + let pixelData = Float32Array.from(image.data); + let imgDims = [image.height, image.width, image.channels]; + + if (this.do_rescale) { + this.rescale(pixelData); + } + + if (do_normalize ?? this.do_normalize) { + let image_mean = this.image_mean; + if (!Array.isArray(this.image_mean)) { + image_mean = new Array(image.channels).fill(image_mean); + } + + let image_std = this.image_std; + if (!Array.isArray(this.image_std)) { + image_std = new Array(image.channels).fill(image_mean); + } + + if (image_mean.length !== image.channels || image_std.length !== image.channels) { + throw new Error(`When set to arrays, the length of \`image_mean\` (${image_mean.length}) and \`image_std\` (${image_std.length}) must match the number of channels in the image (${image.channels}).`); + } + + for (let i = 0; i < pixelData.length; i += image.channels) { + for (let j = 0; j < image.channels; ++j) { + pixelData[i + j] = (pixelData[i + j] - image_mean[j]) / image_std[j]; + } + } + } + + // do padding after rescaling/normalizing + if (do_pad ?? this.do_pad) { + if (this.pad_size) { + const padded = this.pad_image(pixelData, [image.height, image.width, image.channels], this.pad_size); + [pixelData, imgDims] = padded; // Update pixel data and image dimensions + } else if (this.size_divisibility) { + const [paddedWidth, paddedHeight] = enforce_size_divisibility([imgDims[1], imgDims[0]], this.size_divisibility); + [pixelData, imgDims] = this.pad_image(pixelData, imgDims, { width: paddedWidth, height: paddedHeight }); + } + } + + if (do_flip_channel_order ?? this.do_flip_channel_order) { + if (imgDims[2] !== 3) { + throw new Error('Flipping channel order is only supported for RGB images.'); + } + // Convert RGB to BGR + for (let i = 0; i < pixelData.length; i += 3) { + const temp = pixelData[i]; + pixelData[i] = pixelData[i + 2]; + pixelData[i + 2] = temp; + } + } + + const pixel_values = new Tensor('float32', pixelData, imgDims) + .permute(2, 0, 1); // convert to channel dimension format (hwc -> chw) + + return { + original_size: [srcHeight, srcWidth], + reshaped_input_size: reshaped_input_size, + pixel_values, + } + } + + /** + * Calls the feature extraction process on an array of images, + * preprocesses each image, and concatenates the resulting + * features into a single Tensor. + * @param {RawImage[]} images The image(s) to extract features from. + * @param {...any} args Additional arguments. + * @returns {Promise} An object containing the concatenated pixel values (and other metadata) of the preprocessed images. + */ + async _call(images, ...args) { + if (!Array.isArray(images)) { + images = [images]; + } + /** @type {PreprocessedImage[]} */ + const imageData = await Promise.all(images.map(x => this.preprocess(x))); + + // Stack pixel values + const pixel_values = stack(imageData.map(x => x.pixel_values), 0); + + return { + pixel_values, + + // Original sizes of images + original_sizes: imageData.map(x => x.original_size), + + // Reshaped sizes of images, before padding or cropping + reshaped_input_sizes: imageData.map(x => x.reshaped_input_size), + } + } + + + /** + * Instantiate one of the processor classes of the library from a pretrained model. + * + * The processor class to instantiate is selected based on the `image_processor_type` (or `feature_extractor_type`; legacy) + * property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) + * + * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: + * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co. + * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + * user or organization name, like `dbmdz/bert-base-german-cased`. + * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`. + * @param {import('../utils/hub.js').PretrainedOptions} options Additional options for loading the processor. + * + * @returns {Promise} A new instance of the Processor class. + */ + static async from_pretrained(pretrained_model_name_or_path, options) { + const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, true, options); + return new this(preprocessorConfig); + } +} diff --git a/src/base/processing_utils.js b/src/base/processing_utils.js new file mode 100644 index 000000000..2e457e20d --- /dev/null +++ b/src/base/processing_utils.js @@ -0,0 +1,145 @@ + +/** + * @file Processors are used to prepare inputs (e.g., text, image or audio) for a model. + * + * **Example:** Using a `WhisperProcessor` to prepare an audio input for a model. + * ```javascript + * import { AutoProcessor, read_audio } from '@huggingface/transformers'; + * + * const processor = await AutoProcessor.from_pretrained('openai/whisper-tiny.en'); + * const audio = await read_audio('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000); + * const { input_features } = await processor(audio); + * // Tensor { + * // data: Float32Array(240000) [0.4752984642982483, 0.5597258806228638, 0.56434166431427, ...], + * // dims: [1, 80, 3000], + * // type: 'float32', + * // size: 240000, + * // } + * ``` + * + * @module processors + */ +import { PROCESSOR_NAME } from '../utils/constants.js'; +import { + Callable, +} from '../utils/generic.js'; +import { getModelJSON } from '../utils/hub.js'; + +/** + * @typedef {Object} ProcessorProperties Additional processor-specific properties. + * @typedef {import('../utils/hub.js').PretrainedOptions & ProcessorProperties} PretrainedProcessorOptions + */ + + +/** + * Represents a Processor that extracts features from an input. + */ +export class Processor extends Callable { + static classes = [ + 'image_processor_class', + 'tokenizer_class', + 'feature_extractor_class', + ] + static uses_processor_config = false; + + /** + * Creates a new Processor with the given components + * @param {Object} config + * @param {Record} components + */ + constructor(config, components) { + super(); + this.config = config; + this.components = components; + } + + /** + * @returns {import('./image_processors_utils.js').ImageProcessor|undefined} The image processor of the processor, if it exists. + */ + get image_processor() { + return this.components.image_processor; + } + + /** + * @returns {import('../tokenizers.js').PreTrainedTokenizer|undefined} The tokenizer of the processor, if it exists. + */ + get tokenizer() { + return this.components.tokenizer; + } + + /** + * @returns {import('./feature_extraction_utils.js').FeatureExtractor|undefined} The feature extractor of the processor, if it exists. + */ + get feature_extractor() { + return this.components.feature_extractor; + } + + apply_chat_template(messages, options = {}) { + if (!this.tokenizer) { + throw new Error('Unable to apply chat template without a tokenizer.'); + } + return this.tokenizer.apply_chat_template(messages, { + tokenize: false, // default to false + ...options, + }); + } + + batch_decode(...args) { + if (!this.tokenizer) { + throw new Error('Unable to decode without a tokenizer.'); + } + return this.tokenizer.batch_decode(...args); + } + + + /** + * Calls the feature_extractor function with the given input. + * @param {any} input The input to extract features from. + * @param {...any} args Additional arguments. + * @returns {Promise} A Promise that resolves with the extracted features. + */ + async _call(input, ...args) { + for (const item of [this.image_processor, this.feature_extractor, this.tokenizer]) { + if (item) { + return item(input, ...args); + } + } + throw new Error('No image processor, feature extractor, or tokenizer found.'); + } + + + /** + * Instantiate one of the processor classes of the library from a pretrained model. + * + * The processor class to instantiate is selected based on the `feature_extractor_type` property of the config object + * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) + * + * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: + * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co. + * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + * user or organization name, like `dbmdz/bert-base-german-cased`. + * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`. + * @param {PretrainedProcessorOptions} options Additional options for loading the processor. + * + * @returns {Promise} A new instance of the Processor class. + */ + static async from_pretrained(pretrained_model_name_or_path, options) { + + const [config, components] = await Promise.all([ + // TODO: + this.uses_processor_config + ? getModelJSON(pretrained_model_name_or_path, PROCESSOR_NAME, true, options) + : {}, + Promise.all( + this.classes + .filter((cls) => cls in this) + .map(async (cls) => { + const component = await this[cls].from_pretrained(pretrained_model_name_or_path, options); + return [cls.replace(/_class$/, ''), component]; + }) + ).then(Object.fromEntries) + ]); + + return new this(config, components); + } +} diff --git a/src/configs.js b/src/configs.js index 2a8666e30..2c277aeb1 100644 --- a/src/configs.js +++ b/src/configs.js @@ -68,6 +68,7 @@ function getNormalizedConfig(config) { case 'llava': case 'paligemma': case 'florence2': + case 'llava_onevision': init_normalized_config = getNormalizedConfig(config.text_config); break; case 'moondream1': @@ -76,6 +77,9 @@ function getNormalizedConfig(config) { case 'musicgen': init_normalized_config = getNormalizedConfig(config.decoder); break; + case 'multi_modality': + init_normalized_config = getNormalizedConfig(config.language_config); + break; // Decoder-only models case 'gpt2': @@ -105,6 +109,7 @@ function getNormalizedConfig(config) { case 'mistral': case 'starcoder2': case 'qwen2': + case 'qwen2_vl': mapping['num_heads'] = 'num_key_value_heads'; mapping['num_layers'] = 'num_hidden_layers'; mapping['hidden_size'] = 'hidden_size'; @@ -225,14 +230,12 @@ function getNormalizedConfig(config) { */ export function getKeyValueShapes(config, { prefix = 'past_key_values', + batch_size=1, } = {}) { /** @type {Record} */ const decoderFeeds = {}; const normalized_config = config.normalized_config; - // TODO support batches (i.e., batch_size > 1) - const batch_size = 1; - if (normalized_config.is_encoder_decoder && ( 'num_encoder_heads' in normalized_config && 'num_decoder_heads' in normalized_config )) { diff --git a/src/models.js b/src/models.js index f2ec1ed0f..177a87601 100644 --- a/src/models.js +++ b/src/models.js @@ -61,7 +61,6 @@ import { } from './utils/generic.js'; import { - isIntegralNumber, mergeArrays, pick, } from './utils/core.js'; @@ -99,17 +98,20 @@ import { import { cat, - full_like, mean, + zeros, + zeros_like, ones, ones_like, + full, + full_like, stack, std_mean, Tensor, - zeros_like, } from './utils/tensor.js'; +import { RawImage } from './utils/image.js'; -import { dynamic_time_warping, medianFilter } from './utils/maths.js'; +import { dynamic_time_warping, max, medianFilter } from './utils/maths.js'; import { EosTokenCriteria, MaxLengthCriteria, StoppingCriteriaList } from './generation/stopping_criteria.js'; import { LogitsSampler } from './generation/logits_sampler.js'; import { apis } from './env.js'; @@ -128,6 +130,7 @@ const MODEL_TYPES = { MaskGeneration: 5, ImageTextToText: 6, Musicgen: 7, + MultiModality: 8, } ////////////////////////////////////////////////// @@ -386,7 +389,7 @@ async function sessionRun(session, inputs) { } catch (e) { // This usually occurs when the inputs are of the wrong type. console.error(`An error occurred during model execution: "${e}".`); - console.error('Inputs given to model:', checkedInputs); + console.error('Inputs given to model:', checkedInputs) throw e; } } @@ -579,11 +582,11 @@ async function imageTextToTextForward(self, { if (!inputs_embeds) { // 1. Extract the input embeddings - inputs_embeds = await self.encode_text({ input_ids }); + inputs_embeds = await self.encode_text({ input_ids, ...kwargs }); // 2. Possibly, merge text and images if (pixel_values && input_ids.dims[1] !== 1) { - const image_features = await self.encode_image({ pixel_values }); + const image_features = await self.encode_image({ pixel_values, ...kwargs }); ({ inputs_embeds, attention_mask } = self._merge_input_ids_with_image_features({ image_features, @@ -604,6 +607,16 @@ async function imageTextToTextForward(self, { } } + if (!position_ids) { + + if (self.config.model_type === 'qwen2_vl') { + // Special case for qwen2_vl models + // @ts-ignore + const { image_grid_thw, video_grid_thw } = kwargs; + [position_ids] = self.get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) + } + } + const outputs = await decoderForward(self, { inputs_embeds, past_key_values, @@ -615,34 +628,54 @@ async function imageTextToTextForward(self, { return outputs; } -function createPositionIds(model_inputs, past_key_values = null) { - // If the model supports providing position_ids, we create position_ids on the fly for batch generation, - // by computing the cumulative sum of the attention mask along the sequence length dimension. - // - // Equivalent to: - // position_ids = attention_mask.long().cumsum(-1) - 1 - // position_ids.masked_fill_(attention_mask == 0, 1) - // if past_key_values: - // position_ids = position_ids[:, -input_ids.shape[1] :] - const { input_ids, inputs_embeds, attention_mask } = model_inputs; +/** + * Helper function to perform the following: + * ```python + * x = attention_mask.long().cumsum(-1) - 1 + * x.masked_fill_(attention_mask == 0, 1) + * ``` + * @param {Tensor} attention_mask + * @returns {{data: BigInt64Array, dims: number[]}} + */ +function cumsum_masked_fill(attention_mask) { const [bz, seq_len] = attention_mask.dims; + const attn_mask_data = attention_mask.data; - const data = new BigInt64Array(attention_mask.data.length); + const data = new BigInt64Array(attn_mask_data.length); for (let i = 0; i < bz; ++i) { const start = i * seq_len; let sum = BigInt(0); for (let j = 0; j < seq_len; ++j) { const index = start + j; - if (attention_mask.data[index] === 0n) { + if (attn_mask_data[index] === 0n) { data[index] = BigInt(1); } else { // === 1n data[index] = sum; - sum += attention_mask.data[index]; + sum += attn_mask_data[index]; } } } + return { data, dims: attention_mask.dims }; + +} + +/** + * If the model supports providing position_ids, we create position_ids on the fly for batch generation, + * by computing the cumulative sum of the attention mask along the sequence length dimension. + * + * Equivalent to: + * ```python + * position_ids = attention_mask.long().cumsum(-1) - 1 + * position_ids.masked_fill_(attention_mask == 0, 1) + * if past_key_values: + * position_ids = position_ids[:, -input_ids.shape[1] :] + * ``` + */ +function createPositionIds(model_inputs, past_key_values = null) { + const { input_ids, inputs_embeds, attention_mask } = model_inputs; - let position_ids = new Tensor('int64', data, attention_mask.dims); + const { data, dims } = cumsum_masked_fill(attention_mask); + let position_ids = new Tensor('int64', data, dims); if (past_key_values) { const offset = -(input_ids ?? inputs_embeds).dims.at(1); position_ids = position_ids.slice(null, [offset, null]); @@ -716,6 +749,52 @@ function image_text_to_text_prepare_inputs_for_generation(self, ...args) { } } +function multimodality_prepare_inputs_for_generation(self, input_ids, model_inputs, generation_config) { + const has_past_key_values = !!model_inputs.past_key_values; + + if (generation_config.guidance_scale !== null && generation_config.guidance_scale > 1) { + if (has_past_key_values) { + model_inputs.input_ids = cat([ + model_inputs.input_ids, + model_inputs.input_ids, + ], 0) + // NOTE: attention_mask handled in generation + } else { + model_inputs.input_ids = cat([ + model_inputs.input_ids, + full_like(model_inputs.input_ids, BigInt(generation_config.pad_token_id)), + ], 0); + model_inputs.attention_mask = cat([ + model_inputs.attention_mask, + full_like(model_inputs.attention_mask, 0n), + ], 0); + } + } + + if (has_past_key_values || !model_inputs.pixel_values) { + model_inputs.pixel_values = full([0, 0, 3, 384, 384], 1.0); + } + + if (has_past_key_values) { + const num_img_tokens = 0; + const num_text_tokens = 1; + const has_image = num_img_tokens > 0 ? 1 : 0; + + const batch_size = 1; + model_inputs.images_seq_mask = new Tensor( + 'bool', + new Array(num_img_tokens + num_text_tokens).fill(true).fill(false, 0, num_text_tokens), + [batch_size, num_img_tokens + num_text_tokens], + ); + model_inputs.images_emb_mask = new Tensor( + 'bool', + new Array(num_img_tokens).fill(!!has_image), + [batch_size, 1, num_img_tokens], + ); + } + return model_inputs; +} + ////////////////////////////////////////////////// ////////////////////////////////////////////////// @@ -769,6 +848,11 @@ export class PreTrainedModel extends Callable { this._prepare_inputs_for_generation = image_text_to_text_prepare_inputs_for_generation; break; + case MODEL_TYPES.MultiModality: + this.can_generate = true; + this._prepare_inputs_for_generation = multimodality_prepare_inputs_for_generation; + break; + default: // should be MODEL_TYPES.EncoderOnly this._forward = encoderForward; @@ -912,6 +996,21 @@ export class PreTrainedModel extends Callable { }, options), ]); + } else if (modelType === MODEL_TYPES.MultiModality) { + info = await Promise.all([ + constructSessions(pretrained_model_name_or_path, { + prepare_inputs_embeds: 'prepare_inputs_embeds', + model: 'language_model', + lm_head: 'lm_head', + gen_head: 'gen_head', + gen_img_embeds: 'gen_img_embeds', + image_decode: 'image_decode', + }, options), + getOptionalConfigs(pretrained_model_name_or_path, { + generation_config: 'generation_config.json', + }, options), + ]); + } else { // should be MODEL_TYPES.EncoderOnly if (modelType !== MODEL_TYPES.EncoderOnly) { console.warn(`Model type for '${modelName ?? config?.model_type}' not found, assuming encoder-only architecture. Please report this at ${GITHUB_ISSUE_URL}.`) @@ -1658,7 +1757,8 @@ export class PreTrainedModel extends Callable { const dtype = session?.config?.kv_cache_dtype ?? 'float32'; const empty = (dtype === 'float16') ? new Uint16Array() : []; - const shapes = getKeyValueShapes(this.config); + const batch_size = (decoderFeeds[this.main_input_name] ?? decoderFeeds.attention_mask).dims?.[0] ?? 1; + const shapes = getKeyValueShapes(this.config, { batch_size }); for (const name in shapes) { decoderFeeds[name] = new Tensor(dtype, empty, shapes[name]); @@ -3277,6 +3377,7 @@ export class LlavaForConditionalGeneration extends LlavaPreTrainedModel { } ////////////////////////////////////////////////// +export class LlavaOnevisionForConditionalGeneration extends LlavaForConditionalGeneration { } // NOTE: extends LlavaForConditionalGeneration export class Moondream1ForConditionalGeneration extends LlavaForConditionalGeneration { } // NOTE: extends LlavaForConditionalGeneration export class Florence2PreTrainedModel extends PreTrainedModel { @@ -3437,7 +3538,7 @@ export class CLIPModel extends CLIPPreTrainedModel { } * The text model from CLIP without any head or projection on top. */ export class CLIPTextModel extends CLIPPreTrainedModel { - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'text_model'; @@ -3472,7 +3573,7 @@ export class CLIPTextModel extends CLIPPreTrainedModel { * ``` */ export class CLIPTextModelWithProjection extends CLIPPreTrainedModel { - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'text_model'; @@ -3484,7 +3585,7 @@ export class CLIPTextModelWithProjection extends CLIPPreTrainedModel { * The vision model from CLIP without any head or projection on top. */ export class CLIPVisionModel extends CLIPPreTrainedModel { - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'vision_model'; @@ -3519,7 +3620,7 @@ export class CLIPVisionModel extends CLIPPreTrainedModel { * ``` */ export class CLIPVisionModelWithProjection extends CLIPPreTrainedModel { - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'vision_model'; @@ -3605,8 +3706,7 @@ export class SiglipModel extends SiglipPreTrainedModel { } * ``` */ export class SiglipTextModel extends SiglipPreTrainedModel { - - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'text_model'; @@ -3641,7 +3741,7 @@ export class SiglipTextModel extends SiglipPreTrainedModel { * ``` */ export class SiglipVisionModel extends CLIPPreTrainedModel { - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'vision_model'; @@ -3655,6 +3755,31 @@ export class ChineseCLIPPreTrainedModel extends PreTrainedModel { } export class ChineseCLIPModel extends ChineseCLIPPreTrainedModel { } ////////////////////////////////////////////////// +////////////////////////////////////////////////// +// JinaCLIP models +export class JinaCLIPPreTrainedModel extends PreTrainedModel { } + +export class JinaCLIPModel extends JinaCLIPPreTrainedModel { } + +export class JinaCLIPTextModel extends JinaCLIPPreTrainedModel { + /** @type {typeof PreTrainedModel.from_pretrained} */ + static async from_pretrained(pretrained_model_name_or_path, options = {}) { + // Update default model file name if not provided + options.model_file_name ??= 'text_model'; + return super.from_pretrained(pretrained_model_name_or_path, options); + } +} + +export class JinaCLIPVisionModel extends JinaCLIPPreTrainedModel { + /** @type {typeof PreTrainedModel.from_pretrained} */ + static async from_pretrained(pretrained_model_name_or_path, options = {}) { + // Update default model file name if not provided + options.model_file_name ??= 'vision_model'; + return super.from_pretrained(pretrained_model_name_or_path, options); + } +} +////////////////////////////////////////////////// + ////////////////////////////////////////////////// // CLIPSeg models @@ -3898,6 +4023,285 @@ export class Qwen2Model extends Qwen2PreTrainedModel { } export class Qwen2ForCausalLM extends Qwen2PreTrainedModel { } ////////////////////////////////////////////////// +export class Qwen2VLPreTrainedModel extends PreTrainedModel { + forward_params = [ + // Text inputs + 'input_ids', + 'attention_mask', + 'position_ids', + 'past_key_values', + + // Vision inputs + 'pixel_values', + 'image_grid_thw', + ]; +} +export class Qwen2VLForConditionalGeneration extends Qwen2VLPreTrainedModel { + + /** + * Calculate the 3D rope index based on image and video's temporal, height and width in LLM. + * + * Explanation: + * Each embedding sequence contains vision embedding and text embedding or just contains text embedding. + * + * For pure text embedding sequence, the rotary position embedding has no difference with mordern LLMs. + * Examples: + * input_ids: [T T T T T], here T is for text. + * temporal position_ids: [0, 1, 2, 3, 4] + * height position_ids: [0, 1, 2, 3, 4] + * width position_ids: [0, 1, 2, 3, 4] + * + * For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part + * and 1D rotary position embeddin for text part. + * Examples: + * Assume we have a video input with 3 temporal patches, 2 height patches and 2 width patches. + * input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision. + * vision temporal position_ids: [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] + * vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1] + * vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] + * text temporal position_ids: [3, 4, 5, 6, 7] + * text height position_ids: [3, 4, 5, 6, 7] + * text width position_ids: [3, 4, 5, 6, 7] + * Here we calculate the text start position_ids as the max vision position_ids plus 1. + * + * @param {Tensor} input_ids Indices of input sequence tokens in the vocabulary. Tensor of shape `(batch_size, sequence_length)`. + * @param {Tensor} image_grid_thw (Optional) The temporal, height and width of feature shape of each image in LLM. Tensor of shape `(num_images, 3)`. + * @param {Tensor} video_grid_thw (Optional) The temporal, height and width of feature shape of each video in LLM. Tensor of shape `(num_videos, 3)`. + * @param {Tensor} attention_mask (Optional) Mask to avoid performing attention on padding token indices. Tensor of shape `(batch_size, sequence_length)`. Mask values selected in `[0, 1]`: + * - 1 for tokens that are **not masked**, + * - 0 for tokens that are **masked**. + * @returns {[Tensor, Tensor]} [position_ids, mrope_position_deltas] with: + * - position_ids: Tensor of shape `(3, batch_size, sequence_length)`. + * - mrope_position_deltas: Tensor of shape `(batch_size)`. + */ + get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask) { + // @ts-ignore + const { vision_config, image_token_id, video_token_id, vision_start_token_id } = this.config; + const spatial_merge_size = vision_config.spatial_merge_size ?? 2; + + const mrope_position_deltas = []; + if (image_grid_thw || video_grid_thw) { + let total_input_ids = input_ids.tolist(); + if (!attention_mask) { + attention_mask = ones_like(input_ids); + } + + const attention_mask_list = attention_mask.tolist(); + const position_ids_list = Array.from({ length: 3 }, _ => Array.from({ length: input_ids.dims[0] }, _ => Array.from({ length: input_ids.dims[1] }, _ => 1))); + + const image_grid_thw_list = image_grid_thw ? image_grid_thw.tolist() : []; + const video_grid_thw_list = video_grid_thw ? video_grid_thw.tolist() : []; + + let image_index = 0; + let video_index = 0; + for (let i = 0; i < total_input_ids.length; ++i) { + const ids = total_input_ids[i].filter((_, j) => attention_mask_list[i][j] == 1); + + const vision_start_indices = ids.reduce((acc, x, idx) => { + if (x == vision_start_token_id) acc.push(idx); + return acc; + }, []); + + const vision_tokens = vision_start_indices.map(x => ids[x + 1]); + const image_nums = vision_tokens.filter(x => x == image_token_id).length; + const video_nums = vision_tokens.filter(x => x == video_token_id).length; + + let llm_pos_ids_list = []; + let st = 0; + let remain_images = image_nums; + let remain_videos = video_nums; + for (let j = 0; j < vision_tokens.length; ++j) { + const next_image_token = ids.findIndex((x, i) => i > st && x == image_token_id); + const next_video_token = ids.findIndex((x, i) => i > st && x == video_token_id); + + const ed_image = (remain_images > 0 && next_image_token !== -1) + ? next_image_token + : ids.length + 1; + + const ed_video = (remain_videos > 0 && next_video_token !== -1) + ? next_video_token + : ids.length + 1; + + let ed; + let t, h, w; + if (ed_image < ed_video) { + ([t, h, w] = image_grid_thw_list[image_index]); + ++image_index; + --remain_images; + ed = ed_image; + } else { + ([t, h, w] = video_grid_thw_list[video_index]); + ++video_index; + --remain_videos; + ed = ed_video; + } + + const [llm_grid_t, llm_grid_h, llm_grid_w] = [ + Number(t), + Math.floor(Number(h) / spatial_merge_size), + Math.floor(Number(w) / spatial_merge_size) + ] + const text_len = ed - st; + const st_idx = llm_pos_ids_list.length > 0 + ? max(llm_pos_ids_list.at(-1))[0] + 1 + : 0; + + llm_pos_ids_list.push( + Array.from({ length: 3 * text_len }, (_, i) => st_idx + (i % text_len)) + ) + + const offset = text_len + st_idx; + const grid_size = llm_grid_t * llm_grid_h * llm_grid_w; + const t_index = Array.from({ length: grid_size }, (_, i) => offset + Math.floor(i / (llm_grid_h * llm_grid_w))) + const h_index = Array.from({ length: grid_size }, (_, i) => offset + Math.floor(i / llm_grid_w) % llm_grid_h) + const w_index = Array.from({ length: grid_size }, (_, i) => offset + i % llm_grid_w) + + llm_pos_ids_list.push([t_index, h_index, w_index].flat()) + + st = ed + grid_size; + } + + if (st < ids.length) { + const st_idx = llm_pos_ids_list.length > 0 + ? max(llm_pos_ids_list.at(-1))[0] + 1 + : 0; + const text_len = ids.length - st; + + llm_pos_ids_list.push( + Array.from({ length: 3 * text_len }, (_, i) => (st_idx + (i % text_len))) + ) + } + + // NOTE: Each item in llm_pos_ids_list is an array of shape (3, text_len), + // meaning to perform concatenation along dim=1, we can do the following: + const num_items = llm_pos_ids_list.reduce((acc, x) => acc + x.length, 0); + const llm_positions = new Array(num_items); + let index = 0; + for (let x = 0; x < 3; ++x) { + for (let y = 0; y < llm_pos_ids_list.length; ++y) { + const val = llm_pos_ids_list[y]; + const text_len = val.length / 3; + for (let z = x * text_len; z < (x + 1) * text_len; ++z) { + llm_positions[index++] = val[z]; + } + } + } + + let count = 0; + const attn_mask = attention_mask_list[i]; + for (let y = 0; y < attn_mask.length; ++y) { + if (attn_mask[y] == 1) { + for (let x = 0; x < 3; ++x) { + position_ids_list[x][i][y] = llm_positions[x * num_items / 3 + count]; + } + ++count; + } + } + + const max_llm_positions = max(llm_positions)[0]; + mrope_position_deltas.push(max_llm_positions + 1 - total_input_ids[i].length); + } + + return [ + new Tensor('int64', position_ids_list.flat(Infinity), [3, input_ids.dims[0], input_ids.dims[1]]), + new Tensor('int64', mrope_position_deltas, [mrope_position_deltas.length, 1]), + ]; + + } else { // Text-only + if (attention_mask) { + const { data, dims } = cumsum_masked_fill(attention_mask); + + const position_ids = BigInt64Array.from( + { length: 3 * data.length }, + (_, i) => data[i % data.length] + ); + const mrope_position_deltas = Array.from( + { length: dims[0] }, + (_, i) => max(data.subarray(dims[1] * i, dims[1] * (i + 1)))[0] + 1 + dims[1] + ); + + return [ + new Tensor('int64', position_ids, [3, ...dims]), + new Tensor('int64', mrope_position_deltas, [mrope_position_deltas.length, 1]), + ] + } else { + const [batch_size, seq_length] = input_ids.dims; + const position_ids = BigInt64Array.from( + { length: 3 * batch_size * seq_length }, + (_, i) => BigInt(Math.floor(i % seq_length / batch_size)), + ); + + return [ + new Tensor('int64', position_ids, [3, ...input_ids.dims]), + zeros([batch_size, 1]), + ] + } + } + } + + async encode_image({ pixel_values, image_grid_thw }) { + const features = (await sessionRun(this.sessions['vision_encoder'], { pixel_values, grid_thw: image_grid_thw })).image_features; + return features; + } + + _merge_input_ids_with_image_features({ + inputs_embeds, + image_features, + input_ids, + attention_mask, + }) { + // @ts-ignore + const { image_token_id } = this.config; + const image_tokens = input_ids.tolist().map(ids => + ids.reduce((acc, x, idx) => { + if (x == image_token_id) acc.push(idx); + return acc; + }, []) + ); + const n_image_tokens = image_tokens.reduce((acc, x) => acc + x.length, 0); + const n_image_features = image_features.dims[0]; + if (n_image_tokens !== n_image_features) { + throw new Error(`Image features and image tokens do not match: tokens: ${n_image_tokens}, features ${n_image_features}`); + } + + // Equivalent to performing a masked_scatter + let img = 0; + for (let i = 0; i < image_tokens.length; ++i) { + const tokens = image_tokens[i]; + const embeds = inputs_embeds[i]; + for (let j = 0; j < tokens.length; ++j) { + embeds[tokens[j]].data.set(image_features[img++].data) + } + } + return { inputs_embeds, attention_mask } + } + + prepare_inputs_for_generation(input_ids, model_inputs, generation_config) { + // Overwritten -- in specific circumstances we don't want to forward image inputs to the model + if (model_inputs.attention_mask && !model_inputs.position_ids) { + // Calculate position_ids and rope_deltas + if (!model_inputs.past_key_values) { + ([model_inputs.position_ids, model_inputs.rope_deltas] = this.get_rope_index( + model_inputs.input_ids, + model_inputs.image_grid_thw, + model_inputs.video_grid_thw, + model_inputs.attention_mask, + )); + + } else { + model_inputs.pixel_values = null; + // model_inputs.pixel_values_videos = null; + + const delta = BigInt(Object.values(model_inputs.past_key_values)[0].dims.at(-2)); + const rope_deltas_list = model_inputs.rope_deltas.map(x => delta + x); + model_inputs.position_ids = stack([rope_deltas_list, rope_deltas_list, rope_deltas_list], 0) + } + } + + return model_inputs; + } +} + ////////////////////////////////////////////////// // Phi models @@ -3985,6 +4389,17 @@ export class ViTForImageClassification extends ViTPreTrainedModel { } ////////////////////////////////////////////////// + +////////////////////////////////////////////////// +export class VitPosePreTrainedModel extends PreTrainedModel { } + +/** + * The VitPose model with a pose estimation head on top. + */ +export class VitPoseForPoseEstimation extends VitPosePreTrainedModel { } +////////////////////////////////////////////////// + + ////////////////////////////////////////////////// export class PvtPreTrainedModel extends PreTrainedModel { } export class PvtModel extends PvtPreTrainedModel { } @@ -5583,8 +5998,7 @@ export class ClapModel extends ClapPreTrainedModel { } * ``` */ export class ClapTextModelWithProjection extends ClapPreTrainedModel { - - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'text_model'; @@ -5619,7 +6033,7 @@ export class ClapTextModelWithProjection extends ClapPreTrainedModel { * ``` */ export class ClapAudioModelWithProjection extends ClapPreTrainedModel { - /** @type {PreTrainedModel.from_pretrained} */ + /** @type {typeof PreTrainedModel.from_pretrained} */ static async from_pretrained(pretrained_model_name_or_path, options = {}) { // Update default model file name if not provided options.model_file_name ??= 'audio_model'; @@ -5970,6 +6384,138 @@ export class DecisionTransformerModel extends DecisionTransformerPreTrainedModel ////////////////////////////////////////////////// +export class MultiModalityPreTrainedModel extends PreTrainedModel { } +export class MultiModalityCausalLM extends MultiModalityPreTrainedModel { + forward_params = [ + // prepare_inputs_embeds + 'input_ids', + 'pixel_values', + 'images_seq_mask', + 'images_emb_mask', + + // language_model + 'attention_mask', + 'position_ids', + 'past_key_values', + ]; + + constructor(...args) { + super(...args); + + // State-based approach to switch out which heads to use during generation + this._generation_mode = 'text'; + } + + async forward(model_inputs) { + const mode = this._generation_mode ?? 'text'; + + // TODO support re-using PKVs for input_ids.dims[1] !== 1 + // if (model_inputs.past_key_values) { + // // && model_inputs.input_ids.dims[1] === 1 + // } + + let output_1; + if (mode === 'text' || !model_inputs.past_key_values) { + const session = this.sessions['prepare_inputs_embeds']; + const prep_inputs = pick(model_inputs, session.inputNames); + output_1 = await sessionRun(session, prep_inputs); + } else { + const session = this.sessions['gen_img_embeds']; + const prep_inputs = pick({ + image_ids: model_inputs.input_ids, + }, session.inputNames); + output_1 = await sessionRun(session, prep_inputs); + } + + const input_2 = { ...model_inputs, ...output_1 } + const output_2 = await decoderForward(this, input_2); + + const head = this.sessions[ + mode === 'text' + ? 'lm_head' + : 'gen_head' + ]; + if (!head) { + throw new Error(`Unable to find "${head}" generation head`); + } + + const output_3 = await sessionRun(head, pick(output_2, head.inputNames)) + + return { + ...output_1, + ...output_2, + ...output_3, + }; + } + + /** + * @param {import('./generation/parameters.js').GenerationFunctionParameters} options + */ + async generate(options) { + this._generation_mode = 'text'; + return super.generate(options); + } + + /** + * @param {import('./generation/parameters.js').GenerationFunctionParameters} options + */ + async generate_images(options) { + this._generation_mode = 'image'; + + const start_num_tokens = (options.inputs ?? options[this.main_input_name]).dims[1]; + const all_tokens = await super.generate(options); + + const generated_tokens = (/** @type {Tensor} */(all_tokens)).slice(null, [start_num_tokens, null]) + + const image_decode = this.sessions['image_decode']; + const { decoded_image } = await sessionRun(image_decode, { + generated_tokens, + }); + + // Equivalent to `np.clip((dec + 1) / 2 * 255, 0, 255)` + const clamped = decoded_image + .add_(1) + .mul_(255 / 2) + .clamp_(0, 255) + .to('uint8'); + + // Return as a list of images + const images = []; + for (const tensor of clamped) { + const img = RawImage.fromTensor(tensor); + images.push(img); + } + return images; + } +} + +export class MgpstrModelOutput extends ModelOutput { + constructor({ char_logits, bpe_logits, wp_logits }) { + super(); + this.char_logits = char_logits; + this.bpe_logits = bpe_logits; + this.wp_logits = wp_logits; + } + + get logits() { + return [this.char_logits, this.bpe_logits, this.wp_logits]; + } +} + +export class MgpstrPreTrainedModel extends PreTrainedModel { } + +/** + * MGP-STR Model transformer with three classification heads on top + * (three A^3 modules and three linear layer on top of the transformer encoder output) for scene text recognition (STR). + */ +export class MgpstrForSceneTextRecognition extends MgpstrPreTrainedModel { + /** + * @param {any} model_inputs + */ + async _call(model_inputs) { + return new MgpstrModelOutput(await super._call(model_inputs)); + } +} ////////////////////////////////////////////////// // PatchTST Transformer models @@ -6096,6 +6642,7 @@ const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([ ['clipseg', ['CLIPSegModel', CLIPSegModel]], ['chinese_clip', ['ChineseCLIPModel', ChineseCLIPModel]], ['siglip', ['SiglipModel', SiglipModel]], + ['jina_clip', ['JinaCLIPModel', JinaCLIPModel]], ['mobilebert', ['MobileBertModel', MobileBertModel]], ['squeezebert', ['SqueezeBertModel', SqueezeBertModel]], ['wav2vec2', ['Wav2Vec2Model', Wav2Vec2Model]], @@ -6149,6 +6696,7 @@ const MODEL_MAPPING_NAMES_ENCODER_ONLY = new Map([ ['mobilenet_v4', ['MobileNetV4Model', MobileNetV4Model]], ['maskformer', ['MaskFormerModel', MaskFormerModel]], + ['mgp-str', ['MgpstrForSceneTextRecognition', MgpstrForSceneTextRecognition]], ]); const MODEL_MAPPING_NAMES_ENCODER_DECODER = new Map([ @@ -6286,6 +6834,11 @@ const MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = new Map([ ['stablelm', ['StableLmForCausalLM', StableLmForCausalLM]], ]); +const MODEL_FOR_MULTIMODALITY_MAPPING_NAMES = new Map([ + ['multi_modality', ['MultiModalityCausalLM', MultiModalityCausalLM]], +]); + + const MODEL_FOR_MASKED_LM_MAPPING_NAMES = new Map([ ['bert', ['BertForMaskedLM', BertForMaskedLM]], ['roformer', ['RoFormerForMaskedLM', RoFormerForMaskedLM]], @@ -6329,8 +6882,10 @@ const MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = new Map([ const MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES = new Map([ ['llava', ['LlavaForConditionalGeneration', LlavaForConditionalGeneration]], + ['llava_onevision', ['LlavaOnevisionForConditionalGeneration', LlavaOnevisionForConditionalGeneration]], ['moondream1', ['Moondream1ForConditionalGeneration', Moondream1ForConditionalGeneration]], ['florence2', ['Florence2ForConditionalGeneration', Florence2ForConditionalGeneration]], + ['qwen2-vl', ['Qwen2VLForConditionalGeneration', Qwen2VLForConditionalGeneration]], ]); const MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = new Map([ @@ -6447,11 +7002,16 @@ const MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES = new Map([ ['sapiens', ['SapiensForNormalEstimation', SapiensForNormalEstimation]], ]) +const MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES = new Map([ + ['vitpose', ['VitPoseForPoseEstimation', VitPoseForPoseEstimation]], +]) + // NOTE: This is custom to Transformers.js, and is necessary because certain models // (e.g., CLIP) are split into vision and text components const MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES = new Map([ ['clip', ['CLIPVisionModelWithProjection', CLIPVisionModelWithProjection]], ['siglip', ['SiglipVisionModel', SiglipVisionModel]], + ['jina_clip', ['JinaCLIPVisionModel', JinaCLIPVisionModel]], ]) const MODEL_CLASS_TYPE_MAPPING = [ @@ -6463,6 +7023,7 @@ const MODEL_CLASS_TYPE_MAPPING = [ [MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Seq2Seq], [MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_TYPES.DecoderOnly], + [MODEL_FOR_MULTIMODALITY_MAPPING_NAMES, MODEL_TYPES.MultiModality], [MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_TYPES.Vision2Seq], @@ -6476,6 +7037,7 @@ const MODEL_CLASS_TYPE_MAPPING = [ [MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], + [MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES, MODEL_TYPES.EncoderOnly], [MODEL_FOR_MASK_GENERATION_MAPPING_NAMES, MODEL_TYPES.MaskGeneration], @@ -6506,6 +7068,7 @@ const CUSTOM_MAPPING = [ ['CLIPTextModelWithProjection', CLIPTextModelWithProjection, MODEL_TYPES.EncoderOnly], ['SiglipTextModel', SiglipTextModel, MODEL_TYPES.EncoderOnly], + ['JinaCLIPTextModel', JinaCLIPTextModel, MODEL_TYPES.EncoderOnly], ['ClapTextModelWithProjection', ClapTextModelWithProjection, MODEL_TYPES.EncoderOnly], ['ClapAudioModelWithProjection', ClapAudioModelWithProjection, MODEL_TYPES.EncoderOnly], ] @@ -6747,6 +7310,10 @@ export class AutoModelForNormalEstimation extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_NORMAL_ESTIMATION_MAPPING_NAMES]; } +export class AutoModelForPoseEstimation extends PretrainedMixin { + static MODEL_CLASS_MAPPINGS = [MODEL_FOR_POSE_ESTIMATION_MAPPING_NAMES]; +} + export class AutoModelForImageFeatureExtraction extends PretrainedMixin { static MODEL_CLASS_MAPPINGS = [MODEL_FOR_IMAGE_FEATURE_EXTRACTION_MAPPING_NAMES]; } diff --git a/src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js b/src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js new file mode 100644 index 000000000..9533f47b1 --- /dev/null +++ b/src/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js @@ -0,0 +1,90 @@ +import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; +import { Tensor } from '../../utils/tensor.js'; +import { mel_filter_bank, spectrogram, window_function } from '../../utils/audio.js'; + + +export class ASTFeatureExtractor extends FeatureExtractor { + + constructor(config) { + super(config); + + const sampling_rate = this.config.sampling_rate; + const mel_filters = mel_filter_bank( + 256, // num_frequency_bins + this.config.num_mel_bins, // num_mel_filters + 20, // min_frequency + Math.floor(sampling_rate / 2), // max_frequency + sampling_rate, // sampling_rate + null, // norm + "kaldi", // mel_scale + true, // triangularize_in_mel_space + ); + + // Do padding: + for (let i = 0; i < mel_filters.length; ++i) { + mel_filters[i].push(0); + } + this.mel_filters = mel_filters; + + this.window = window_function(400, 'hann', { + periodic: false, + }) + + this.mean = this.config.mean; + this.std = this.config.std; + } + + /** + * Computes the log-Mel spectrogram of the provided audio waveform. + * @param {Float32Array|Float64Array} waveform The audio waveform to process. + * @param {number} max_length The maximum number of frames to return. + * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. + */ + async _extract_fbank_features(waveform, max_length) { + // NOTE: We don't pad/truncate since that is passed in as `max_num_frames` + return spectrogram( + waveform, + this.window, // window + 400, // frame_length + 160, // hop_length + { + fft_length: 512, + power: 2.0, + center: false, + preemphasis: 0.97, + mel_filters: this.mel_filters, + log_mel: 'log', + mel_floor: 1.192092955078125e-07, + remove_dc_offset: true, + + // Custom + max_num_frames: max_length, + transpose: true, + } + ) + } + + + /** + * Asynchronously extracts features from a given audio using the provided configuration. + * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. + * @returns {Promise<{ input_values: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. + */ + async _call(audio) { + validate_audio_inputs(audio, 'ASTFeatureExtractor'); + + const features = await this._extract_fbank_features(audio, this.config.max_length); + if (this.config.do_normalize) { + // Normalize the input audio spectrogram to have mean=0, std=0.5 + const denom = this.std * 2; + const features_data = features.data; + for (let i = 0; i < features_data.length; ++i) { + features_data[i] = (features_data[i] - this.mean) / denom; + } + } + + return { + input_values: features.unsqueeze_(0) + }; + } +} diff --git a/src/models/auto/feature_extraction_auto.js b/src/models/auto/feature_extraction_auto.js new file mode 100644 index 000000000..5a18eabb9 --- /dev/null +++ b/src/models/auto/feature_extraction_auto.js @@ -0,0 +1,41 @@ + +import { FEATURE_EXTRACTOR_NAME, GITHUB_ISSUE_URL } from '../../utils/constants.js'; +import { getModelJSON } from '../../utils/hub.js'; +import { FeatureExtractor } from '../../base/feature_extraction_utils.js'; +import * as AllFeatureExtractors from '../feature_extractors.js'; + +export class AutoFeatureExtractor { + + /** + * Instantiate one of the feature extractor classes of the library from a pretrained model. + * + * The processor class to instantiate is selected based on the `feature_extractor_type` property of + * the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) + * + * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: + * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co. + * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + * user or organization name, like `dbmdz/bert-base-german-cased`. + * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`. + * @param {import('../../utils/hub.js').PretrainedOptions} options Additional options for loading the processor. + * + * @returns {Promise} A new instance of the Processor class. + */ + + /** @type {typeof FeatureExtractor.from_pretrained} */ + static async from_pretrained(pretrained_model_name_or_path, options={}) { + + const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, true, options); + + // Determine feature extractor class + const key = preprocessorConfig.feature_extractor_type; + const feature_extractor_class = AllFeatureExtractors[key]; + + if (!feature_extractor_class) { + throw new Error(`Unknown feature_extractor_type: '${key}'. Please report this at ${GITHUB_ISSUE_URL}.`); + } + + // Instantiate feature extractor + return new feature_extractor_class(preprocessorConfig); + } +} diff --git a/src/models/auto/image_processing_auto.js b/src/models/auto/image_processing_auto.js new file mode 100644 index 000000000..07f6c1a0d --- /dev/null +++ b/src/models/auto/image_processing_auto.js @@ -0,0 +1,29 @@ + +import { GITHUB_ISSUE_URL, IMAGE_PROCESSOR_NAME } from '../../utils/constants.js'; +import { getModelJSON } from '../../utils/hub.js'; +import { ImageProcessor } from '../../base/image_processors_utils.js'; +import * as AllImageProcessors from '../image_processors.js'; + +export class AutoImageProcessor { + + /** @type {typeof ImageProcessor.from_pretrained} */ + static async from_pretrained(pretrained_model_name_or_path, options={}) { + + const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, true, options); + + // Determine image processor class + const key = preprocessorConfig.image_processor_type ?? preprocessorConfig.feature_extractor_type; + let image_processor_class = AllImageProcessors[key]; + + if (!image_processor_class) { + if (key !== undefined) { + // Only log a warning if the class is not found and the key is set. + console.warn(`Image processor type '${key}' not found, assuming base ImageProcessor. Please report this at ${GITHUB_ISSUE_URL}.`) + } + image_processor_class = ImageProcessor; + } + + // Instantiate image processor + return new image_processor_class(preprocessorConfig); + } +} diff --git a/src/models/auto/processing_auto.js b/src/models/auto/processing_auto.js new file mode 100644 index 000000000..3b462b6e9 --- /dev/null +++ b/src/models/auto/processing_auto.js @@ -0,0 +1,100 @@ + + +import { IMAGE_PROCESSOR_NAME } from '../../utils/constants.js'; +import { getModelJSON } from '../../utils/hub.js'; +import { Processor } from '../../base/processing_utils.js'; + +import * as AllProcessors from '../processors.js'; +import * as AllImageProcessors from '../image_processors.js'; +import * as AllFeatureExtractors from '../feature_extractors.js'; + +/** + * Helper class which is used to instantiate pretrained processors with the `from_pretrained` function. + * The chosen processor class is determined by the type specified in the processor config. + * + * **Example:** Load a processor using `from_pretrained`. + * ```javascript + * let processor = await AutoProcessor.from_pretrained('openai/whisper-tiny.en'); + * ``` + * + * **Example:** Run an image through a processor. + * ```javascript + * let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16'); + * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); + * let image_inputs = await processor(image); + * // { + * // "pixel_values": { + * // "dims": [ 1, 3, 224, 224 ], + * // "type": "float32", + * // "data": Float32Array [ -1.558687686920166, -1.558687686920166, -1.5440893173217773, ... ], + * // "size": 150528 + * // }, + * // "original_sizes": [ + * // [ 533, 800 ] + * // ], + * // "reshaped_input_sizes": [ + * // [ 224, 224 ] + * // ] + * // } + * ``` + */ +export class AutoProcessor { + + /** + * Instantiate one of the processor classes of the library from a pretrained model. + * + * The processor class to instantiate is selected based on the `image_processor_type` (or `feature_extractor_type`; legacy) + * property of the config object (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) + * + * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: + * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co. + * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a + * user or organization name, like `dbmdz/bert-base-german-cased`. + * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`. + * @param {import('../../utils/hub.js').PretrainedOptions} options Additional options for loading the processor. + * + * @returns {Promise} A new instance of the Processor class. + */ + + /** @type {typeof Processor.from_pretrained} */ + static async from_pretrained(pretrained_model_name_or_path, options={}) { + + // TODO: first check for processor.json + const preprocessorConfig = await getModelJSON(pretrained_model_name_or_path, IMAGE_PROCESSOR_NAME, true, options); + + const { image_processor_type, feature_extractor_type, processor_class } = preprocessorConfig; + if (processor_class && AllProcessors[processor_class]) { + return AllProcessors[processor_class].from_pretrained(pretrained_model_name_or_path, options); + } + + if (!image_processor_type && !feature_extractor_type) { + throw new Error('No `image_processor_type` or `feature_extractor_type` found in the config.'); + } + + const components = {}; + if (image_processor_type) { + const image_processor_class = AllImageProcessors[image_processor_type]; + if (!image_processor_class) { + throw new Error(`Unknown image_processor_type: '${image_processor_type}'.`); + } + components.image_processor = new image_processor_class(preprocessorConfig); + } + + if (feature_extractor_type) { + const image_processor_class = AllImageProcessors[feature_extractor_type]; + if (image_processor_class) { + // Handle legacy case where image processors were specified as feature extractors + components.image_processor = new image_processor_class(preprocessorConfig); + } else { + const feature_extractor_class = AllFeatureExtractors[feature_extractor_type]; + if (!feature_extractor_class) { + throw new Error(`Unknown feature_extractor_type: '${feature_extractor_type}'.`); + } + components.feature_extractor = new feature_extractor_class(preprocessorConfig); + } + } + + const config = {}; + return new Processor(config, components); + } +} diff --git a/src/models/beit/image_processing_beit.js b/src/models/beit/image_processing_beit.js new file mode 100644 index 000000000..006399edf --- /dev/null +++ b/src/models/beit/image_processing_beit.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class BeitFeatureExtractor extends ImageProcessor { } diff --git a/src/models/bit/image_processing_bit.js b/src/models/bit/image_processing_bit.js new file mode 100644 index 000000000..66db82277 --- /dev/null +++ b/src/models/bit/image_processing_bit.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class BitImageProcessor extends ImageProcessor { } diff --git a/src/models/chinese_clip/image_processing_chinese_clip.js b/src/models/chinese_clip/image_processing_chinese_clip.js new file mode 100644 index 000000000..d720eb662 --- /dev/null +++ b/src/models/chinese_clip/image_processing_chinese_clip.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class ChineseCLIPFeatureExtractor extends ImageProcessor { } diff --git a/src/models/clap/feature_extraction_clap.js b/src/models/clap/feature_extraction_clap.js new file mode 100644 index 000000000..5261a10b5 --- /dev/null +++ b/src/models/clap/feature_extraction_clap.js @@ -0,0 +1,159 @@ +import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; +import { Tensor } from '../../utils/tensor.js'; +import { mel_filter_bank, spectrogram, window_function } from '../../utils/audio.js'; + + +export class ClapFeatureExtractor extends FeatureExtractor { + + constructor(config) { + super(config); + + this.mel_filters = mel_filter_bank( + this.config.nb_frequency_bins, // num_frequency_bins + this.config.feature_size, // num_mel_filters + this.config.frequency_min, // min_frequency + this.config.frequency_max, // max_frequency + this.config.sampling_rate, // sampling_rate + null, // norm + "htk", // mel_scale + ); + + this.mel_filters_slaney = mel_filter_bank( + this.config.nb_frequency_bins, // num_frequency_bins + this.config.feature_size, // num_mel_filters + this.config.frequency_min, // min_frequency + this.config.frequency_max, // max_frequency + this.config.sampling_rate, // sampling_rate + "slaney", // norm + "slaney", // mel_scale + ); + + this.window = window_function(this.config.fft_window_size, 'hann') + + } + + + /** + * Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments. + * + * Four different path are possible: + * - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram + * will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram + * are then stacked together. They will later be used for `feature_fusion`. + * - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is + * padded based on `padding`. + * - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded + * based on `padding`, and is repeated `4` times. + * - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel + * spectrogram will be computed on a random crop of the waveform. + * + * @param {Float32Array|Float64Array} waveform The input waveform. + * @param {number} max_length The maximum length of the waveform. + * @param {string} truncation The truncation strategy to use. + * @param {string} padding The padding strategy to use. + * @returns {Promise} An object containing the mel spectrogram data as a Float32Array, its dimensions as an array of numbers, and a boolean indicating whether the waveform was longer than the max length. + * @private + */ + async _get_input_mel(waveform, max_length, truncation, padding) { + + /** @type {Tensor} */ + let input_mel; + let longer = false; + const diff = waveform.length - max_length; + if (diff > 0) { + if (truncation === 'rand_trunc') { + longer = true; + const idx = Math.floor(Math.random() * (diff + 1)); + waveform = waveform.subarray(idx, idx + max_length); + + input_mel = await this._extract_fbank_features(waveform, this.mel_filters_slaney, this.config.nb_max_samples); + } else { + // TODO implement fusion strategy + throw new Error(`Truncation strategy "${truncation}" not implemented`) + } + } else { + if (diff < 0) { + let padded = new Float64Array(max_length); // already padded with zeros + padded.set(waveform); + + if (padding === 'repeat') { + for (let i = waveform.length; i < max_length; i += waveform.length) { + padded.set(waveform.subarray(0, Math.min(waveform.length, max_length - i)), i); + } + } else if (padding === 'repeatpad') { + for (let i = waveform.length; i < -diff; i += waveform.length) { + padded.set(waveform, i); + } + } + waveform = padded; + } + + if (truncation === 'fusion') { + throw new Error(`Truncation strategy "${truncation}" not implemented`) + } + + input_mel = await this._extract_fbank_features(waveform, this.mel_filters_slaney, this.config.nb_max_samples); + } + + return input_mel.unsqueeze_(0); + } + + /** + * Compute the log-mel spectrogram of the provided `waveform` using the Hann window. + * In CLAP, two different filter banks are used depending on the truncation pattern: + * - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from + * calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation` + * is set to `"fusion"`. + * - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used + * `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original + * implementation when the truncation mode is not `"fusion"`. + * + * @param {Float32Array|Float64Array} waveform The audio waveform to process. + * @param {number[][]} mel_filters The mel filters to use. + * @param {number} [max_length=null] The maximum number of frames to return. + * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. + */ + async _extract_fbank_features(waveform, mel_filters, max_length = null) { + // NOTE: We don't pad/truncate since that is passed in as `max_num_frames` + return spectrogram( + waveform, + this.window, // window + this.config.fft_window_size, // frame_length + this.config.hop_length, // hop_length + { + power: 2.0, + mel_filters, + log_mel: 'dB', + + // Custom + max_num_frames: max_length, + do_pad: false, + transpose: true, + } + ) + } + + + /** + * Asynchronously extracts features from a given audio using the provided configuration. + * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. + * @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. + */ + async _call(audio, { + max_length = null, + } = {}) { + validate_audio_inputs(audio, 'ClapFeatureExtractor'); + + // convert to mel spectrogram, truncate and pad if needed. + const padded_inputs = await this._get_input_mel( + audio, + max_length ?? this.config.nb_max_samples, + this.config.truncation, + this.config.padding, + ); + + return { + input_features: padded_inputs.unsqueeze_(0), + } + } +} diff --git a/src/models/clip/image_processing_clip.js b/src/models/clip/image_processing_clip.js new file mode 100644 index 000000000..3f2f9dcb0 --- /dev/null +++ b/src/models/clip/image_processing_clip.js @@ -0,0 +1,6 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class CLIPImageProcessor extends ImageProcessor { } +export class CLIPFeatureExtractor extends CLIPImageProcessor { } diff --git a/src/models/convnext/image_processing_convnext.js b/src/models/convnext/image_processing_convnext.js new file mode 100644 index 000000000..525e736cd --- /dev/null +++ b/src/models/convnext/image_processing_convnext.js @@ -0,0 +1,45 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class ConvNextImageProcessor extends ImageProcessor { + constructor(config) { + super(config); + + /** + * Percentage of the image to crop. Only has an effect if this.size < 384. + */ + this.crop_pct = this.config.crop_pct ?? (224 / 256); + } + + async resize(image) { + const shortest_edge = this.size?.shortest_edge; + if (shortest_edge === undefined) { + throw new Error(`Size dictionary must contain 'shortest_edge' key.`); + } + + if (shortest_edge < 384) { + // maintain same ratio, resizing shortest edge to shortest_edge/crop_pct + const resize_shortest_edge = Math.floor(shortest_edge / this.crop_pct); + + const [newWidth, newHeight] = this.get_resize_output_image_size(image, { + shortest_edge: resize_shortest_edge, + }); + + image = await image.resize(newWidth, newHeight, { + resample: this.resample, + }); + + // then crop to (shortest_edge, shortest_edge) + image = await image.center_crop(shortest_edge, shortest_edge); + } else { + // warping (no cropping) when evaluated at 384 or larger + image = await image.resize(shortest_edge, shortest_edge, { + resample: this.resample, + }); + } + + return image; + } +} +export class ConvNextFeatureExtractor extends ConvNextImageProcessor { } diff --git a/src/models/deit/image_processing_deit.js b/src/models/deit/image_processing_deit.js new file mode 100644 index 000000000..fd3857842 --- /dev/null +++ b/src/models/deit/image_processing_deit.js @@ -0,0 +1,6 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class DeiTImageProcessor extends ImageProcessor { } +export class DeiTFeatureExtractor extends DeiTImageProcessor { } \ No newline at end of file diff --git a/src/models/detr/image_processing_detr.js b/src/models/detr/image_processing_detr.js new file mode 100644 index 000000000..40ce1232f --- /dev/null +++ b/src/models/detr/image_processing_detr.js @@ -0,0 +1,52 @@ +import { + ImageProcessor, + post_process_object_detection, + post_process_panoptic_segmentation, + post_process_instance_segmentation, +} from "../../base/image_processors_utils.js"; + +import { full } from '../../utils/tensor.js'; + + +/** + * @typedef {object} DetrFeatureExtractorResultProps + * @property {import('../../utils/tensor.js').Tensor} pixel_mask + * @typedef {import('../../base/image_processors_utils.js').ImageProcessorResult & DetrFeatureExtractorResultProps} DetrFeatureExtractorResult + */ + +export class DetrImageProcessor extends ImageProcessor { + /** + * Calls the feature extraction process on an array of images, preprocesses + * each image, and concatenates the resulting features into a single Tensor. + * @param {import('../../utils/image.js').RawImage[]} images The image(s) to extract features from. + * @returns {Promise} An object containing the concatenated pixel values of the preprocessed images. + */ + async _call(images) { + const result = await super._call(images); + + // TODO support differently-sized images, for now assume all images are the same size. + // TODO support different mask sizes (not just 64x64) + // Currently, just fill pixel mask with 1s + const maskSize = [result.pixel_values.dims[0], 64, 64]; + const pixel_mask = full(maskSize, 1n); + + return { ...result, pixel_mask }; + } + + /** @type {typeof post_process_object_detection} */ + post_process_object_detection(...args) { + return post_process_object_detection(...args); + } + + /** @type {typeof post_process_panoptic_segmentation} */ + post_process_panoptic_segmentation(...args) { + return post_process_panoptic_segmentation(...args); + } + + /** @type {typeof post_process_instance_segmentation} */ + post_process_instance_segmentation(...args) { + return post_process_instance_segmentation(...args); + } +} + +export class DetrFeatureExtractor extends DetrImageProcessor { } // NOTE: extends DetrImageProcessor diff --git a/src/models/donut/image_processing_donut.js b/src/models/donut/image_processing_donut.js new file mode 100644 index 000000000..f848a9fa5 --- /dev/null +++ b/src/models/donut/image_processing_donut.js @@ -0,0 +1,31 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class DonutImageProcessor extends ImageProcessor { + pad_image(pixelData, imgDims, padSize, options = {}) { + const [imageHeight, imageWidth, imageChannels] = imgDims; + + let image_mean = this.image_mean; + if (!Array.isArray(this.image_mean)) { + image_mean = new Array(imageChannels).fill(image_mean); + } + + let image_std = this.image_std; + if (!Array.isArray(image_std)) { + image_std = new Array(imageChannels).fill(image_mean); + } + + const constant_values = image_mean.map((x, i) => - x / image_std[i]); + + return super.pad_image(pixelData, imgDims, padSize, { + center: true, + + // Since normalization is done after padding, we need to use certain constant values to ensure the same behaviour is observed. + // For more information, see https://github.com/huggingface/transformers/blob/main/src/transformers/models/donut/image_processing_donut.py#L433-L451 + constant_values, + ...options, + }); + } +} +export class DonutFeatureExtractor extends DonutImageProcessor { } diff --git a/src/models/dpt/image_processing_dpt.js b/src/models/dpt/image_processing_dpt.js new file mode 100644 index 000000000..0c19175e7 --- /dev/null +++ b/src/models/dpt/image_processing_dpt.js @@ -0,0 +1,6 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class DPTImageProcessor extends ImageProcessor { } +export class DPTFeatureExtractor extends DPTImageProcessor { } // NOTE: extends DPTImageProcessor diff --git a/src/models/efficientnet/image_processing_efficientnet.js b/src/models/efficientnet/image_processing_efficientnet.js new file mode 100644 index 000000000..9fde87156 --- /dev/null +++ b/src/models/efficientnet/image_processing_efficientnet.js @@ -0,0 +1,13 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class EfficientNetImageProcessor extends ImageProcessor { + constructor(config) { + super(config); + this.include_top = this.config.include_top ?? true; + if (this.include_top) { + this.image_std = this.image_std.map(x => x * x); + } + } +} diff --git a/src/models/feature_extractors.js b/src/models/feature_extractors.js new file mode 100644 index 000000000..869c8191b --- /dev/null +++ b/src/models/feature_extractors.js @@ -0,0 +1,12 @@ + +export * from './audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.js'; +export * from './clap/feature_extraction_clap.js'; +export * from './pyannote/feature_extraction_pyannote.js'; +export * from './seamless_m4t/feature_extraction_seamless_m4t.js'; +export * from './speecht5/feature_extraction_speecht5.js'; +export * from './wav2vec2/feature_extraction_wav2vec2.js'; +export * from './wespeaker/feature_extraction_wespeaker.js'; +export * from './whisper/feature_extraction_whisper.js'; + +// For legacy support, ImageFeatureExtractor is an alias for ImageProcessor +export { ImageProcessor as ImageFeatureExtractor } from "../base/image_processors_utils.js"; diff --git a/src/models/florence2/processing_florence2.js b/src/models/florence2/processing_florence2.js new file mode 100644 index 000000000..ec644df25 --- /dev/null +++ b/src/models/florence2/processing_florence2.js @@ -0,0 +1,128 @@ +import { Processor } from "../../base/processing_utils.js"; +import { AutoImageProcessor } from "../auto/image_processing_auto.js"; +import { AutoTokenizer } from "../../tokenizers.js"; + +export class Florence2Processor extends Processor { + static tokenizer_class = AutoTokenizer + static image_processor_class = AutoImageProcessor + + constructor(config, components) { + super(config, components); + + const { + tasks_answer_post_processing_type, + task_prompts_without_inputs, + task_prompts_with_input, + } = this.image_processor.config; + + /** @type {Map} */ + this.tasks_answer_post_processing_type = new Map(Object.entries(tasks_answer_post_processing_type ?? {})); + + /** @type {Map} */ + this.task_prompts_without_inputs = new Map(Object.entries(task_prompts_without_inputs ?? {})); + + /** @type {Map} */ + this.task_prompts_with_input = new Map(Object.entries(task_prompts_with_input ?? {})); + + this.regexes = { + quad_boxes: /(.+?)/gm, + bboxes: /([^<]+)?/gm, + } + this.size_per_bin = 1000; + } + + /** + * Helper function to construct prompts from input texts + * @param {string|string[]} text + * @returns {string[]} + */ + construct_prompts(text) { + if (typeof text === 'string') { + text = [text]; + } + + const prompts = []; + for (const t of text) { + // 1. fixed task prompts without additional inputs + if (this.task_prompts_without_inputs.has(t)) { + prompts.push(this.task_prompts_without_inputs.get(t)); + } + // 2. task prompts with additional inputs + else { + for (const [task, prompt] of this.task_prompts_with_input) { + if (t.includes(task)) { + prompts.push(prompt.replaceAll('{input}', t).replaceAll(task, '')); + break; + } + } + + // 3. default prompt + if (prompts.length !== text.length) { + prompts.push(t); + } + } + } + return prompts; + } + + /** + * Post-process the output of the model to each of the task outputs. + * @param {string} text The text to post-process. + * @param {string} task The task to post-process the text for. + * @param {[number, number]} image_size The size of the image. height x width. + */ + post_process_generation(text, task, image_size) { + const task_answer_post_processing_type = this.tasks_answer_post_processing_type.get(task) ?? 'pure_text'; + + // remove the special tokens + text = text.replaceAll('', '').replaceAll('', ''); + + let final_answer; + switch (task_answer_post_processing_type) { + case 'pure_text': + final_answer = text; + break; + + case 'description_with_bboxes': + case 'bboxes': + case 'phrase_grounding': + case 'ocr': + const key = task_answer_post_processing_type === 'ocr' ? 'quad_boxes' : 'bboxes'; + const matches = text.matchAll(this.regexes[key]); + const labels = []; + const items = []; + for (const [_, label, ...locations] of matches) { + // Push new label, or duplicate the last label + labels.push(label ? label.trim() : labels.at(-1) ?? ''); + items.push(locations.map((x, i) => + // NOTE: Add 0.5 to use the center position of the bin as the coordinate. + (Number(x) + 0.5) / this.size_per_bin * image_size[i % 2]) + ); + } + final_answer = { labels, [key]: items }; + break; + + default: + throw new Error(`Task "${task}" (of type "${task_answer_post_processing_type}") not yet implemented.`); + } + + return { [task]: final_answer } + } + + // NOTE: images and text are switched from the python version + // `images` is required, `text` is optional + async _call(images, text=null, kwargs = {}) { + + if (!images && !text){ + throw new Error('Either text or images must be provided'); + } + + const image_inputs = await this.image_processor(images, kwargs); + const text_inputs = text ? this.tokenizer(text, kwargs) : {}; + + return { + ...image_inputs, + ...text_inputs, + } + } +} diff --git a/src/models/glpn/image_processing_glpn.js b/src/models/glpn/image_processing_glpn.js new file mode 100644 index 000000000..609f1b996 --- /dev/null +++ b/src/models/glpn/image_processing_glpn.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class GLPNFeatureExtractor extends ImageProcessor { } diff --git a/src/models/image_processors.js b/src/models/image_processors.js new file mode 100644 index 000000000..64c529742 --- /dev/null +++ b/src/models/image_processors.js @@ -0,0 +1,36 @@ + +export * from './beit/image_processing_beit.js' +export * from './bit/image_processing_bit.js' +export * from './chinese_clip/image_processing_chinese_clip.js' +export * from './clip/image_processing_clip.js' +export * from './convnext/image_processing_convnext.js' +export * from './deit/image_processing_deit.js' +export * from './detr/image_processing_detr.js' +export * from './donut/image_processing_donut.js' +export * from './dpt/image_processing_dpt.js' +export * from './efficientnet/image_processing_efficientnet.js' +export * from './glpn/image_processing_glpn.js' +export * from './janus/image_processing_janus.js' +export * from './jina_clip/image_processing_jina_clip.js' +export * from './llava_onevision/image_processing_llava_onevision.js' +export * from './mask2former/image_processing_mask2former.js' +export * from './maskformer/image_processing_maskformer.js' +export * from './mobilenet_v1/image_processing_mobilenet_v1.js' +export * from './mobilenet_v2/image_processing_mobilenet_v2.js' +export * from './mobilenet_v3/image_processing_mobilenet_v3.js' +export * from './mobilenet_v4/image_processing_mobilenet_v4.js' +export * from './mobilevit/image_processing_mobilevit.js' +export * from './nougat/image_processing_nougat.js' +export * from './owlv2/image_processing_owlv2.js' +export * from './owlvit/image_processing_owlvit.js' +export * from './pvt/image_processing_pvt.js' +export * from './qwen2_vl/image_processing_qwen2_vl.js' +export * from './rt_detr/image_processing_rt_detr.js' +export * from './sam/image_processing_sam.js' +export * from './segformer/image_processing_segformer.js' +export * from './siglip/image_processing_siglip.js' +export * from './swin2sr/image_processing_swin2sr.js' +export * from './vit/image_processing_vit.js' +export * from './vitmatte/image_processing_vitmatte.js' +export * from './vitpose/image_processing_vitpose.js' +export * from './yolos/image_processing_yolos.js' diff --git a/src/models/janus/image_processing_janus.js b/src/models/janus/image_processing_janus.js new file mode 100644 index 000000000..4dae64ff4 --- /dev/null +++ b/src/models/janus/image_processing_janus.js @@ -0,0 +1,26 @@ + +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class VLMImageProcessor extends ImageProcessor { + constructor(config) { + super({ + do_pad: true, + pad_size: { + width: config.image_size, + height: config.image_size, + }, + ...config, + }); + this.constant_values = this.config.background_color.map(x => x * this.rescale_factor) + } + + pad_image(pixelData, imgDims, padSize, options) { + return super.pad_image(pixelData, imgDims, padSize, { + constant_values: this.constant_values, + center: true, + ...options, + }); + } +} diff --git a/src/models/janus/processing_janus.js b/src/models/janus/processing_janus.js new file mode 100644 index 000000000..48b9bb4dd --- /dev/null +++ b/src/models/janus/processing_janus.js @@ -0,0 +1,123 @@ + +import { Processor } from "../../base/processing_utils.js"; +import { AutoImageProcessor } from "../auto/image_processing_auto.js"; +import { AutoTokenizer } from "../../tokenizers.js"; +import { mergeArrays } from "../../utils/core.js"; +import { Tensor } from "../../utils/tensor.js"; +import { RawImage } from "../../utils/image.js"; + +export class VLChatProcessor extends Processor { + static image_processor_class = AutoImageProcessor + static tokenizer_class = AutoTokenizer + static uses_processor_config = true; + + constructor(config, components) { + super(config, components); + + this.image_tag = this.config.image_tag; + this.image_start_tag = this.config.image_start_tag; + this.image_end_tag = this.config.image_end_tag; + this.num_image_tokens = this.config.num_image_tokens; + } + + /** + * @typedef {Object} MultimodalMessageProperties Additional properties for multimodal messages. + * @property {(RawImage | string | URL)[]} [images] The images in the message. + * @typedef {(import('../../tokenizers.js').Message & MultimodalMessageProperties)[]} MultimodalConversation The conversation possibly containing multimodal inputs. + */ + + /** + * @typedef {Object} VLCChatProcessorResult The processed input. + * @property {Tensor} input_ids The input IDs. + * @property {Tensor} attention_mask The attention mask. + * @property {Tensor} images_seq_mask The image sequence mask. + * @property {Tensor} images_emb_mask The image embedding mask. + */ + + /** + * @param {MultimodalConversation} conversation The chat messages to process. + * @param {Object} options Additional options for processing. + * @param {RawImage|RawImage[]} [options.images] The images to process, if not set in the conversation. + * @param {string} [options.chat_template="default"] The chat template to use. + * @returns {Promise} The processed input. + */ + async _call(conversation, { + images = null, + chat_template = "default", + }={}) { + if (!images) { + images = await Promise.all( + conversation + .filter((msg) => msg.images) + .flatMap((msg) => msg.images) + .map((img) => RawImage.read(img)) + ); + } else if (!Array.isArray(images)) { + images = [images]; + } + + const tokenizer = this.tokenizer; + const result = tokenizer.apply_chat_template(conversation, { + tokenize: false, + add_generation_prompt: true, + chat_template, + }); + + const encode = (text) => tokenizer.encode(text, { add_special_tokens: false }); + const parts = (/** @type {string} */(result)) + .split(this.image_tag); + const num_images = parts.length - 1; + if (images.length !== num_images) { + throw new Error(`Number of images provided (${images.length}) does not match number of "${this.image_tag}" image tags (${num_images})`); + } + + const [ + image_placeholder_tag_id, + image_start_tag_id, + image_end_tag_id, + ] = tokenizer.model.convert_tokens_to_ids([ + this.image_tag, + this.image_start_tag, + this.image_end_tag, + ]); + + let input_ids = encode(parts[0]); + let images_seq_mask = new Array(input_ids.length).fill(false); + for (let i = 1; i < parts.length; ++i) { + const placeholder_image_tokens = new Array(this.num_image_tokens).fill(image_placeholder_tag_id); + const tokens = encode(parts[i]); + input_ids = mergeArrays( + input_ids, + [image_start_tag_id], placeholder_image_tokens, [image_end_tag_id], + tokens, + ); + const image_mask = new Array(this.num_image_tokens).fill(true); + images_seq_mask = mergeArrays( + images_seq_mask, + [false], image_mask, [false], + new Array(tokens.length).fill(false), + ); + } + + const dims = [1, input_ids.length]; + const final = { + input_ids: new Tensor('int64', input_ids, dims), + attention_mask: new Tensor('int64', new Array(input_ids.length).fill(1), dims), + images_seq_mask: new Tensor('bool', images_seq_mask, dims), + images_emb_mask: new Tensor( + 'bool', + new Array(num_images * this.num_image_tokens).fill(true), + [1, num_images, this.num_image_tokens], + ), + } + + if (images && images.length > 0) { + const image_inputs = await this.image_processor(images); + // Set the batch_size dimension to 1 + image_inputs.pixel_values.unsqueeze_(0); + return { ...final, ...image_inputs }; + } + + return final; + } +} diff --git a/src/models/jina_clip/image_processing_jina_clip.js b/src/models/jina_clip/image_processing_jina_clip.js new file mode 100644 index 000000000..648e80d42 --- /dev/null +++ b/src/models/jina_clip/image_processing_jina_clip.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class JinaCLIPImageProcessor extends ImageProcessor {} diff --git a/src/models/llava_onevision/image_processing_llava_onevision.js b/src/models/llava_onevision/image_processing_llava_onevision.js new file mode 100644 index 000000000..c705589de --- /dev/null +++ b/src/models/llava_onevision/image_processing_llava_onevision.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class LlavaOnevisionImageProcessor extends ImageProcessor {} diff --git a/src/models/mask2former/image_processing_mask2former.js b/src/models/mask2former/image_processing_mask2former.js new file mode 100644 index 000000000..5e02b5c38 --- /dev/null +++ b/src/models/mask2former/image_processing_mask2former.js @@ -0,0 +1,5 @@ + +import { MaskFormerImageProcessor } from "../maskformer/image_processing_maskformer.js"; + +// NOTE: extends MaskFormerImageProcessor +export class Mask2FormerImageProcessor extends MaskFormerImageProcessor { } diff --git a/src/models/maskformer/image_processing_maskformer.js b/src/models/maskformer/image_processing_maskformer.js new file mode 100644 index 000000000..6b90b0451 --- /dev/null +++ b/src/models/maskformer/image_processing_maskformer.js @@ -0,0 +1,18 @@ +import { + ImageProcessor, + post_process_panoptic_segmentation, + post_process_instance_segmentation, +} from "../../base/image_processors_utils.js"; + +export class MaskFormerImageProcessor extends ImageProcessor { + + /** @type {typeof post_process_panoptic_segmentation} */ + post_process_panoptic_segmentation(...args) { + return post_process_panoptic_segmentation(...args); + } + /** @type {typeof post_process_instance_segmentation} */ + post_process_instance_segmentation(...args) { + return post_process_instance_segmentation(...args); + } +} +export class MaskFormerFeatureExtractor extends MaskFormerImageProcessor { } diff --git a/src/models/mgp_str/processing_mgp_str.js b/src/models/mgp_str/processing_mgp_str.js new file mode 100644 index 000000000..eb4dbdf6e --- /dev/null +++ b/src/models/mgp_str/processing_mgp_str.js @@ -0,0 +1,170 @@ +import { Processor } from "../../base/processing_utils.js"; +import { AutoImageProcessor } from "../auto/image_processing_auto.js"; +import { AutoTokenizer } from "../../tokenizers.js"; +import { max, softmax } from "../../utils/maths.js"; + +const DECODE_TYPE_MAPPING = { + 'char': ['char_decode', 1], + 'bpe': ['bpe_decode', 2], + 'wp': ['wp_decode', 102], +} +export class MgpstrProcessor extends Processor { + static tokenizer_class = AutoTokenizer + static image_processor_class = AutoImageProcessor + + /** + * @returns {import('../../tokenizers.js').MgpstrTokenizer} The character tokenizer. + */ + get char_tokenizer() { + return this.components.char_tokenizer; + } + + /** + * @returns {import('../../tokenizers.js').GPT2Tokenizer} The BPE tokenizer. + */ + get bpe_tokenizer() { + return this.components.bpe_tokenizer; + } + + /** + * @returns {import('../../tokenizers.js').BertTokenizer} The WordPiece tokenizer. + */ + get wp_tokenizer() { + return this.components.wp_tokenizer; + } + + /** + * Helper function to decode the model prediction logits. + * @param {import('../../utils/tensor.js').Tensor} pred_logits Model prediction logits. + * @param {string} format Type of model prediction. Must be one of ['char', 'bpe', 'wp']. + * @returns {[string[], number[]]} The decoded sentences and their confidence scores. + */ + _decode_helper(pred_logits, format) { + if (!DECODE_TYPE_MAPPING.hasOwnProperty(format)) { + throw new Error(`Format ${format} is not supported.`); + } + + const [decoder_name, eos_token] = DECODE_TYPE_MAPPING[format]; + const decoder = this[decoder_name].bind(this); + + const [batch_size, batch_max_length] = pred_logits.dims; + const conf_scores = []; + const all_ids = []; + + /** @type {number[][][]} */ + const pred_logits_list = pred_logits.tolist(); + for (let i = 0; i < batch_size; ++i) { + const logits = pred_logits_list[i]; + const ids = []; + const scores = []; + + // Start and index=1 to skip the first token + for (let j = 1; j < batch_max_length; ++j) { + // NOTE: == to match bigint and number + const [max_prob, max_prob_index] = max(softmax(logits[j])); + scores.push(max_prob); + if (max_prob_index == eos_token) { + break; + } + ids.push(max_prob_index); + } + + const confidence_score = scores.length > 0 + ? scores.reduce((a, b) => a * b, 1) + : 0; + + all_ids.push(ids); + conf_scores.push(confidence_score); + } + + const decoded = decoder(all_ids); + return [decoded, conf_scores]; + } + + /** + * Convert a list of lists of char token ids into a list of strings by calling char tokenizer. + * @param {number[][]} sequences List of tokenized input ids. + * @returns {string[]} The list of char decoded sentences. + */ + char_decode(sequences) { + return this.char_tokenizer.batch_decode(sequences).map(str => str.replaceAll(' ', '')); + } + + /** + * Convert a list of lists of BPE token ids into a list of strings by calling BPE tokenizer. + * @param {number[][]} sequences List of tokenized input ids. + * @returns {string[]} The list of BPE decoded sentences. + */ + bpe_decode(sequences) { + return this.bpe_tokenizer.batch_decode(sequences) + } + + /** + * Convert a list of lists of word piece token ids into a list of strings by calling word piece tokenizer. + * @param {number[][]} sequences List of tokenized input ids. + * @returns {string[]} The list of wp decoded sentences. + */ + wp_decode(sequences) { + return this.wp_tokenizer.batch_decode(sequences).map(str => str.replaceAll(' ', '')); + } + + /** + * Convert a list of lists of token ids into a list of strings by calling decode. + * @param {import('../../utils/tensor.js').Tensor[]} sequences List of tokenized input ids. + * @returns {{generated_text: string[], scores: number[], char_preds: string[], bpe_preds: string[], wp_preds: string[]}} + * Dictionary of all the outputs of the decoded results. + * - generated_text: The final results after fusion of char, bpe, and wp. + * - scores: The final scores after fusion of char, bpe, and wp. + * - char_preds: The list of character decoded sentences. + * - bpe_preds: The list of BPE decoded sentences. + * - wp_preds: The list of wp decoded sentences. + */ + batch_decode([char_logits, bpe_logits, wp_logits]) { + const [char_preds, char_scores] = this._decode_helper(char_logits, 'char'); + const [bpe_preds, bpe_scores] = this._decode_helper(bpe_logits, 'bpe'); + const [wp_preds, wp_scores] = this._decode_helper(wp_logits, 'wp'); + + const generated_text = []; + const scores = []; + for (let i = 0; i < char_preds.length; ++i) { + const [max_score, max_score_index] = max([char_scores[i], bpe_scores[i], wp_scores[i]]); + generated_text.push([char_preds[i], bpe_preds[i], wp_preds[i]][max_score_index]); + scores.push(max_score); + } + + return { + generated_text, + scores, + char_preds, + bpe_preds, + wp_preds, + } + } + /** @type {typeof Processor.from_pretrained} */ + static async from_pretrained(...args) { + const base = await super.from_pretrained(...args); + + // Load Transformers.js-compatible versions of the BPE and WordPiece tokenizers + const bpe_tokenizer = await AutoTokenizer.from_pretrained("Xenova/gpt2") // openai-community/gpt2 + const wp_tokenizer = await AutoTokenizer.from_pretrained("Xenova/bert-base-uncased") // google-bert/bert-base-uncased + + // Update components + base.components = { + image_processor: base.image_processor, + char_tokenizer: base.tokenizer, + bpe_tokenizer: bpe_tokenizer, + wp_tokenizer: wp_tokenizer, + } + return base; + } + + async _call(images, text = null) { + const result = await this.image_processor(images); + + if (text) { + result.labels = this.tokenizer(text).input_ids + } + + return result; + } +} diff --git a/src/models/mobilenet_v1/image_processing_mobilenet_v1.js b/src/models/mobilenet_v1/image_processing_mobilenet_v1.js new file mode 100644 index 000000000..61246131e --- /dev/null +++ b/src/models/mobilenet_v1/image_processing_mobilenet_v1.js @@ -0,0 +1,7 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + + +export class MobileNetV1ImageProcessor extends ImageProcessor { } +export class MobileNetV1FeatureExtractor extends MobileNetV1ImageProcessor { } diff --git a/src/models/mobilenet_v2/image_processing_mobilenet_v2.js b/src/models/mobilenet_v2/image_processing_mobilenet_v2.js new file mode 100644 index 000000000..1d80a67a3 --- /dev/null +++ b/src/models/mobilenet_v2/image_processing_mobilenet_v2.js @@ -0,0 +1,7 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + + +export class MobileNetV2ImageProcessor extends ImageProcessor { } +export class MobileNetV2FeatureExtractor extends MobileNetV2ImageProcessor { } diff --git a/src/models/mobilenet_v3/image_processing_mobilenet_v3.js b/src/models/mobilenet_v3/image_processing_mobilenet_v3.js new file mode 100644 index 000000000..3a935d30d --- /dev/null +++ b/src/models/mobilenet_v3/image_processing_mobilenet_v3.js @@ -0,0 +1,7 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + + +export class MobileNetV3ImageProcessor extends ImageProcessor { } +export class MobileNetV3FeatureExtractor extends MobileNetV3ImageProcessor { } diff --git a/src/models/mobilenet_v4/image_processing_mobilenet_v4.js b/src/models/mobilenet_v4/image_processing_mobilenet_v4.js new file mode 100644 index 000000000..fc6401f73 --- /dev/null +++ b/src/models/mobilenet_v4/image_processing_mobilenet_v4.js @@ -0,0 +1,7 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + + +export class MobileNetV4ImageProcessor extends ImageProcessor { } +export class MobileNetV4FeatureExtractor extends MobileNetV4ImageProcessor { } diff --git a/src/models/mobilevit/image_processing_mobilevit.js b/src/models/mobilevit/image_processing_mobilevit.js new file mode 100644 index 000000000..356570c68 --- /dev/null +++ b/src/models/mobilevit/image_processing_mobilevit.js @@ -0,0 +1,6 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class MobileViTImageProcessor extends ImageProcessor { } +export class MobileViTFeatureExtractor extends MobileViTImageProcessor { } diff --git a/src/models/nougat/image_processing_nougat.js b/src/models/nougat/image_processing_nougat.js new file mode 100644 index 000000000..c845fce3a --- /dev/null +++ b/src/models/nougat/image_processing_nougat.js @@ -0,0 +1,5 @@ + +import { DonutImageProcessor } from "../donut/image_processing_donut.js"; + +// NOTE: extends DonutImageProcessor +export class NougatImageProcessor extends DonutImageProcessor { } diff --git a/src/models/owlv2/image_processing_owlv2.js b/src/models/owlv2/image_processing_owlv2.js new file mode 100644 index 000000000..224f49cc1 --- /dev/null +++ b/src/models/owlv2/image_processing_owlv2.js @@ -0,0 +1,5 @@ + +import { OwlViTImageProcessor } from "../owlvit/image_processing_owlvit.js"; + +// NOTE: extends OwlViTImageProcessor +export class Owlv2ImageProcessor extends OwlViTImageProcessor { } diff --git a/src/models/owlvit/image_processing_owlvit.js b/src/models/owlvit/image_processing_owlvit.js new file mode 100644 index 000000000..e7c3c69cf --- /dev/null +++ b/src/models/owlvit/image_processing_owlvit.js @@ -0,0 +1,12 @@ +import { + ImageProcessor, + post_process_object_detection, +} from "../../base/image_processors_utils.js"; + +export class OwlViTImageProcessor extends ImageProcessor { + /** @type {typeof post_process_object_detection} */ + post_process_object_detection(...args) { + return post_process_object_detection(...args); + } +} +export class OwlViTFeatureExtractor extends OwlViTImageProcessor { } diff --git a/src/models/owlvit/processing_owlvit.js b/src/models/owlvit/processing_owlvit.js new file mode 100644 index 000000000..f596dbe19 --- /dev/null +++ b/src/models/owlvit/processing_owlvit.js @@ -0,0 +1,7 @@ +import { Processor } from "../../base/processing_utils.js"; +import { AutoImageProcessor } from "../auto/image_processing_auto.js"; +import { AutoTokenizer } from "../../tokenizers.js"; +export class OwlViTProcessor extends Processor { + static tokenizer_class = AutoTokenizer + static image_processor_class = AutoImageProcessor +} diff --git a/src/models/processors.js b/src/models/processors.js new file mode 100644 index 000000000..735432812 --- /dev/null +++ b/src/models/processors.js @@ -0,0 +1,10 @@ +export * from './florence2/processing_florence2.js'; +export * from './mgp_str/processing_mgp_str.js'; +export * from './janus/processing_janus.js'; +export * from './owlvit/processing_owlvit.js'; +export * from './pyannote/processing_pyannote.js'; +export * from './qwen2_vl/processing_qwen2_vl.js'; +export * from './sam/processing_sam.js'; +export * from './speecht5/processing_speecht5.js'; +export * from './wav2vec2/processing_wav2vec2.js'; +export * from './whisper/processing_whisper.js'; diff --git a/src/models/pvt/image_processing_pvt.js b/src/models/pvt/image_processing_pvt.js new file mode 100644 index 000000000..2156dfe0d --- /dev/null +++ b/src/models/pvt/image_processing_pvt.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class PvtImageProcessor extends ImageProcessor { } diff --git a/src/models/pyannote/feature_extraction_pyannote.js b/src/models/pyannote/feature_extraction_pyannote.js new file mode 100644 index 000000000..74b40fec9 --- /dev/null +++ b/src/models/pyannote/feature_extraction_pyannote.js @@ -0,0 +1,28 @@ +import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; +import { Tensor } from '../../utils/tensor.js'; + + +export class PyAnnoteFeatureExtractor extends FeatureExtractor { + /** + * Asynchronously extracts features from a given audio using the provided configuration. + * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. + * @returns {Promise<{ input_values: Tensor; }>} The extracted input features. + */ + async _call(audio) { + validate_audio_inputs(audio, 'PyAnnoteFeatureExtractor'); + + if (audio instanceof Float64Array) { + audio = new Float32Array(audio); + } + + const shape = [ + 1, /* batch_size */ + 1, /* num_channels */ + audio.length, /* num_samples */ + ]; + return { + input_values: new Tensor('float32', audio, shape), + }; + } + +} diff --git a/src/models/pyannote/processing_pyannote.js b/src/models/pyannote/processing_pyannote.js new file mode 100644 index 000000000..cf66251a8 --- /dev/null +++ b/src/models/pyannote/processing_pyannote.js @@ -0,0 +1,71 @@ +import { Processor } from '../../base/processing_utils.js'; +import { AutoFeatureExtractor } from '../auto/feature_extraction_auto.js'; +import { max, softmax } from '../../utils/maths.js'; + +export class PyAnnoteProcessor extends Processor { + static feature_extractor_class = AutoFeatureExtractor + + /** + * Calls the feature_extractor function with the given audio input. + * @param {any} audio The audio input to extract features from. + * @returns {Promise} A Promise that resolves with the extracted features. + */ + async _call(audio) { + return await this.feature_extractor(audio) + } + + /** + * NOTE: Can return fractional values. `Math.ceil` will ensure correct value. + * @param {number} samples The number of frames in the audio. + * @returns {number} The number of frames in the audio. + */ + samples_to_frames(samples) { + return ((samples - this.config.offset) / this.config.step); + } + + /** + * Post-processes the speaker diarization logits output by the model. + * @param {import('../../utils/tensor.js').Tensor} logits The speaker diarization logits output by the model. + * @param {number} num_samples Number of samples in the input audio. + * @returns {Array>} The post-processed speaker diarization results. + */ + post_process_speaker_diarization(logits, num_samples) { + const ratio = ( + num_samples / this.samples_to_frames(num_samples) + ) / this.config.sampling_rate; + + const results = []; + for (const scores of logits.tolist()) { + const accumulated_segments = []; + + let current_speaker = -1; + for (let i = 0; i < scores.length; ++i) { + const probabilities = softmax(scores[i]); + const [score, id] = max(probabilities); + const [start, end] = [i, i + 1]; + + if (id !== current_speaker) { + // Speaker has changed + current_speaker = id; + accumulated_segments.push({ id, start, end, score }); + } else { + // Continue the current segment + accumulated_segments.at(-1).end = end; + accumulated_segments.at(-1).score += score; + } + } + + results.push(accumulated_segments.map( + // Convert frame-space to time-space + // and compute the confidence + ({ id, start, end, score }) => ({ + id, + start: start * ratio, + end: end * ratio, + confidence: score / (end - start), + }) + )); + } + return results; + } +} diff --git a/src/models/qwen2_vl/image_processing_qwen2_vl.js b/src/models/qwen2_vl/image_processing_qwen2_vl.js new file mode 100644 index 000000000..cb5c914f8 --- /dev/null +++ b/src/models/qwen2_vl/image_processing_qwen2_vl.js @@ -0,0 +1,52 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; +import { cat, Tensor } from "../../utils/tensor.js"; + +export class Qwen2VLImageProcessor extends ImageProcessor { + async _call(images, ...args) { + const { pixel_values, original_sizes, reshaped_input_sizes } = await super._call(images, ...args); + + let patches = pixel_values; + + // @ts-ignore + const { temporal_patch_size, merge_size, patch_size } = this.config; + if (patches.dims[0] === 1) { + // Equivalent to np.tile(patches, (self.temporal_patch_size, 1, 1, 1)) + patches = cat(Array.from({ length: temporal_patch_size }, () => patches), 0); + } + + const grid_t = patches.dims[0] / temporal_patch_size; + const channel = patches.dims[1]; + const grid_h = Math.floor(patches.dims[2] / patch_size); + const grid_w = Math.floor(patches.dims[3] / patch_size); + + const flatten_patches = patches + .view( + grid_t, + temporal_patch_size, + channel, + Math.floor(grid_h / merge_size), + merge_size, + patch_size, + Math.floor(grid_w / merge_size), + merge_size, + patch_size, + ) + .permute(0, 3, 6, 4, 7, 2, 1, 5, 8) + .view( + grid_t * grid_h * grid_w, + channel * temporal_patch_size * patch_size * patch_size, + ) + + const image_grid_thw = new Tensor('int64', [grid_t, grid_h, grid_w], [1, 3]); + + return { + pixel_values: flatten_patches, + image_grid_thw, + original_sizes, + reshaped_input_sizes, + } + } +} + diff --git a/src/models/qwen2_vl/processing_qwen2_vl.js b/src/models/qwen2_vl/processing_qwen2_vl.js new file mode 100644 index 000000000..d5f05535b --- /dev/null +++ b/src/models/qwen2_vl/processing_qwen2_vl.js @@ -0,0 +1,52 @@ +import { Processor } from "../../base/processing_utils.js"; +import { AutoImageProcessor } from "../auto/image_processing_auto.js"; +import { AutoTokenizer } from "../../tokenizers.js"; +import { RawImage } from "../../utils/image.js"; + +export class Qwen2VLProcessor extends Processor { + static image_processor_class = AutoImageProcessor + static tokenizer_class = AutoTokenizer + + /** + * + * @param {string|string[]} text + * @param {RawImage|RawImage[]} images + * @param {...any} args + * @returns {Promise} + */ + async _call(text, images = null, ...args) { + + if (!Array.isArray(text)) { + text = [text]; + } + + let image_inputs, image_grid_thw; + + if (images) { + image_inputs = await this.image_processor(images); + image_grid_thw = image_inputs.image_grid_thw; + } + + if (image_grid_thw) { + let merge_length = this.image_processor.config.merge_size ** 2; + let index = 0; + + const image_grid_thw_list = image_grid_thw.tolist(); + text = text.map(t => { + while (t.includes("<|image_pad|>")) { + const prod = Number(image_grid_thw_list[index++].reduce((a, b) => a * b, 1n)); + t = t.replace("<|image_pad|>", "<|placeholder|>".repeat(Math.floor(prod / merge_length))); + } + return t.replaceAll("<|placeholder|>", "<|image_pad|>"); + }); + } + + const text_inputs = this.tokenizer(text); + + return { + ...text_inputs, + ...image_inputs, + // TODO: ...videos_inputs, + } + } +} diff --git a/src/models/rt_detr/image_processing_rt_detr.js b/src/models/rt_detr/image_processing_rt_detr.js new file mode 100644 index 000000000..eef753352 --- /dev/null +++ b/src/models/rt_detr/image_processing_rt_detr.js @@ -0,0 +1,12 @@ +import { + ImageProcessor, + post_process_object_detection, +} from "../../base/image_processors_utils.js"; + + +export class RTDetrImageProcessor extends ImageProcessor { + /** @type {typeof post_process_object_detection} */ + post_process_object_detection(...args) { + return post_process_object_detection(...args); + } +} diff --git a/src/models/sam/image_processing_sam.js b/src/models/sam/image_processing_sam.js new file mode 100644 index 000000000..bd71e1f43 --- /dev/null +++ b/src/models/sam/image_processing_sam.js @@ -0,0 +1,242 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; +import { calculateDimensions } from "../../utils/core.js"; + +import { + interpolate_4d, + Tensor, +} from "../../utils/tensor.js"; + + +/** + * @typedef {object} SamImageProcessorResult + * @property {Tensor} pixel_values + * @property {import("../../base/image_processors_utils.js").HeightWidth[]} original_sizes + * @property {import("../../base/image_processors_utils.js").HeightWidth[]} reshaped_input_sizes + * @property {Tensor} [input_points] + * @property {Tensor} [input_labels] + * @property {Tensor} [input_boxes] + */ + +export class SamImageProcessor extends ImageProcessor { + + /** + * + * @param {any} input_points + * @param {import("../../base/image_processors_utils.js").HeightWidth[]} original_sizes + * @param {import("../../base/image_processors_utils.js").HeightWidth[]} reshaped_input_sizes + * @returns {Tensor} + */ + reshape_input_points(input_points, original_sizes, reshaped_input_sizes, is_bounding_box = false) { + + // Make deep copy to avoid altering user's input + input_points = structuredClone(input_points); + let shape = calculateDimensions(input_points); + + // TODO: add support for 2D input_points + if (shape.length === 3) { + // Correct user's input + if (!is_bounding_box) { + shape = [1, ...shape]; + } + input_points = [input_points]; + } else if (shape.length !== 4) { + throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.") + } + + // Reshape input points + for (let i = 0; i < input_points.length; ++i) { // batch_size + let originalImageSize = original_sizes[i]; + let reshapedImageSize = reshaped_input_sizes[i]; + + let resizeFactors = [ + reshapedImageSize[0] / originalImageSize[0], + reshapedImageSize[1] / originalImageSize[1] + ] + + for (let j = 0; j < input_points[i].length; ++j) { // point_batch_size + for (let k = 0; k < input_points[i][j].length; ++k) { // nb_points_per_image + for (let w = 0; w < input_points[i][j][k].length; ++w) { // 2 or 4 + input_points[i][j][k][w] *= resizeFactors[w % 2]; + } + } + } + } + + return new Tensor( + 'float32', + Float32Array.from(input_points.flat(Infinity)), + shape + ) + + } + + /** + * + * @param {any} input_labels + * @param {Tensor} input_points + * @returns {Tensor} + */ + add_input_labels(input_labels, input_points) { + let shape = calculateDimensions(input_labels); + if (shape.length === 2) { + // Correct user's input + shape = [1, ...shape]; + input_labels = [input_labels]; + } else if (shape.length !== 3) { + throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.") + } + + if (shape.some((x, i) => x !== input_points.dims[i])) { + throw Error(`The first ${shape.length} dimensions of 'input_points' and 'input_labels' must be the same.`) + } + return new Tensor( + 'int64', + input_labels.flat(Infinity).map(BigInt), + shape, + ) + } + /** + * @param {any[]} images The URL(s) of the image(s) to extract features from. + * @param {Object} [options] Additional options for the processor. + * @param {any} [options.input_points=null] A 3D or 4D array, representing the input points provided by the user. + * - 3D: `[point_batch_size, nb_points_per_image, 2]`. In this case, `batch_size` is assumed to be 1. + * - 4D: `[batch_size, point_batch_size, nb_points_per_image, 2]`. + * @param {any} [options.input_labels=null] A 2D or 3D array, representing the input labels for the points, used by the prompt encoder to encode the prompt. + * - 2D: `[point_batch_size, nb_points_per_image]`. In this case, `batch_size` is assumed to be 1. + * - 3D: `[batch_size, point_batch_size, nb_points_per_image]`. + * @param {number[][][]} [options.input_boxes=null] A 3D array of shape `(batch_size, num_boxes, 4)`, representing the input boxes provided by the user. + * This is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. + * The processor will generate a tensor, with each dimension corresponding respectively to the image batch size, + * the number of boxes per image and the coordinates of the top left and botton right point of the box. + * In the order (`x1`, `y1`, `x2`, `y2`): + * - `x1`: the x coordinate of the top left point of the input box + * - `y1`: the y coordinate of the top left point of the input box + * - `x2`: the x coordinate of the bottom right point of the input box + * - `y2`: the y coordinate of the bottom right point of the input box + * @returns {Promise} + */ + async _call(images, { + input_points = null, + input_labels = null, + input_boxes = null + } = {}) { + // TODO allow user to use preprocessed images + /** @type {SamImageProcessorResult} */ + const processed = await super._call(images); + + if (input_points) { + processed.input_points = this.reshape_input_points( + input_points, processed.original_sizes, processed.reshaped_input_sizes + ); + } + + if (input_labels) { + if (!processed.input_points) { + throw Error("`input_points` must be provided if `input_labels` are provided.") + } + processed.input_labels = this.add_input_labels(input_labels, processed.input_points); + } + + if (input_boxes) { + processed.input_boxes = this.reshape_input_points( + input_boxes, processed.original_sizes, processed.reshaped_input_sizes, true, + ); + } + + return processed; + } + + /** + * Remove padding and upscale masks to the original image size. + * @param {Tensor} masks Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. + * @param {[number, number][]} original_sizes The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. + * @param {[number, number][]} reshaped_input_sizes The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. + * @param {Object} options Optional parameters for post-processing. + * @param {number} [options.mask_threshold] The threshold to use for binarizing the masks. + * @param {boolean} [options.binarize] Whether to binarize the masks. + * @param {Object} [options.pad_size] The target size the images were padded to before being passed to the model. If `null`, the target size is assumed to be the processor's `pad_size`. + * @param {number} [options.pad_size.height] The height the images were padded to. + * @param {number} [options.pad_size.width] The width the images were padded to. + * @returns {Promise} Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. + */ + async post_process_masks(masks, original_sizes, reshaped_input_sizes, { + mask_threshold = 0.0, + binarize = true, + pad_size = null, + } = {}) { + // masks: [1, 1, 3, 256, 256] + + const output_masks = []; + + pad_size = pad_size ?? this.pad_size; + + /** @type {[number, number]} */ + const target_image_size = [pad_size.height, pad_size.width]; + + for (let i = 0; i < original_sizes.length; ++i) { + const original_size = original_sizes[i]; + const reshaped_input_size = reshaped_input_sizes[i]; + + // Upscale mask to padded size + let interpolated_mask = (await interpolate_4d( + masks[i], + { mode: 'bilinear', size: target_image_size } + )); + + // Crop mask + interpolated_mask = interpolated_mask.slice(null, null, [0, reshaped_input_size[0]], [0, reshaped_input_size[1]]); + + // Downscale mask + interpolated_mask = (await interpolate_4d( + interpolated_mask, + { mode: 'bilinear', size: original_size } + )); + + if (binarize) { + const data = interpolated_mask.data; + const binarizedMaskData = new Uint8Array(data.length); + for (let i = 0; i < data.length; ++i) { + if (data[i] > mask_threshold) { + binarizedMaskData[i] = 1; + } + } + interpolated_mask = new Tensor( + 'bool', + binarizedMaskData, + interpolated_mask.dims + ) + } + + output_masks.push(interpolated_mask); + } + + return output_masks; + } + + /** + * Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. + * @param {import("../../utils/image.js").RawImage} image Input original image + * @param {number} target_size Target size of the resized image + * @param {Object} options Options for generating crop boxes + * @param {number} [options.crop_n_layers] If >0, mask prediction will be run again on crops of the image. + * Sets the number of layers to run, where each layer has 2**i_layer number of image crops. + * @param {number} [options.overlap_ratio] Sets the degree to which crops overlap. In the first crop layer, + * crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. + * @param {number} [options.points_per_crop] Number of points to sample from each crop. + * @param {number} [options.crop_n_points_downscale_factor] The number of points-per-side sampled in layer n is + * scaled down by crop_n_points_downscale_factor**n. + * @returns {Object} An object containing the crop boxes, number of points per crop, cropped images, and input labels. + */ + generate_crop_boxes(image, target_size, { + crop_n_layers = 0, + overlap_ratio = 512 / 1500, + points_per_crop = 32, + crop_n_points_downscale_factor = 1, + } = {}) { + // TODO: Implement + // return { crop_boxes, points_per_crop, cropped_images, input_labels } + } +} + diff --git a/src/models/sam/processing_sam.js b/src/models/sam/processing_sam.js new file mode 100644 index 000000000..4cc0f29e5 --- /dev/null +++ b/src/models/sam/processing_sam.js @@ -0,0 +1,20 @@ +import { Processor } from "../../base/processing_utils.js"; +import { AutoImageProcessor } from "../auto/image_processing_auto.js"; + +export class SamProcessor extends Processor { + static image_processor_class = AutoImageProcessor + + async _call(...args) { + return await this.image_processor(...args); + } + + post_process_masks(...args) { + // @ts-ignore + return this.image_processor.post_process_masks(...args); + } + + reshape_input_points(...args) { + // @ts-ignore + return this.image_processor.reshape_input_points(...args); + } +} \ No newline at end of file diff --git a/src/models/sapiens/image_processing_sapiens.js b/src/models/sapiens/image_processing_sapiens.js new file mode 100644 index 000000000..df78763cf --- /dev/null +++ b/src/models/sapiens/image_processing_sapiens.js @@ -0,0 +1,13 @@ +import { + ImageProcessor, + post_process_semantic_segmentation, +} from "../../base/image_processors_utils.js"; + + +export class SapiensImageProcessor extends ImageProcessor { + /** @type {typeof post_process_semantic_segmentation} */ + post_process_semantic_segmentation(...args) { + return post_process_semantic_segmentation(...args); + } +} +export class SapiensFeatureExtractor extends SapiensImageProcessor { } diff --git a/src/models/seamless_m4t/feature_extraction_seamless_m4t.js b/src/models/seamless_m4t/feature_extraction_seamless_m4t.js new file mode 100644 index 000000000..8f02de062 --- /dev/null +++ b/src/models/seamless_m4t/feature_extraction_seamless_m4t.js @@ -0,0 +1,180 @@ +import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; +import { Tensor } from '../../utils/tensor.js'; +import { mel_filter_bank, spectrogram, window_function } from '../../utils/audio.js'; + +export class SeamlessM4TFeatureExtractor extends FeatureExtractor { + + constructor(config) { + super(config); + + const sampling_rate = this.config.sampling_rate; + const mel_filters = mel_filter_bank( + 256, // num_frequency_bins + this.config.num_mel_bins, // num_mel_filters + 20, // min_frequency + Math.floor(sampling_rate / 2), // max_frequency + sampling_rate, // sampling_rate + null, // norm + "kaldi", // mel_scale + true, // triangularize_in_mel_space + ); + + // Do padding: + for (let i = 0; i < mel_filters.length; ++i) { + mel_filters[i].push(0); + } + this.mel_filters = mel_filters; + + this.window = window_function(400, 'povey', { + periodic: false, + }) + } + + /** + * Computes the log-Mel spectrogram of the provided audio waveform. + * @param {Float32Array|Float64Array} waveform The audio waveform to process. + * @param {number} max_length The maximum number of frames to return. + * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. + */ + async _extract_fbank_features(waveform, max_length) { + // NOTE: We don't pad/truncate since that is passed in as `max_num_frames` + + // Kaldi compliance: 16-bit signed integers + // 32768 == 2 ** 15 + waveform = waveform.map((/** @type {number} */ x) => x * 32768) + + return spectrogram( + waveform, + this.window, // window + 400, // frame_length + 160, // hop_length + { + fft_length: 512, + power: 2.0, + center: false, + preemphasis: 0.97, + mel_filters: this.mel_filters, + log_mel: 'log', + mel_floor: 1.192092955078125e-07, + remove_dc_offset: true, + + // Custom + max_num_frames: max_length, + transpose: true, + } + ) + } + + /** + * Asynchronously extracts features from a given audio using the provided configuration. + * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. + * @param {Object} options Optional parameters for feature extraction. + * @param {boolean} [options.padding=true] Whether to pad the sequence to a multiple of `pad_to_multiple_of`. + * @param {number} [options.pad_to_multiple_of=2] The number to pad the sequence to a multiple of. + * @param {boolean} [options.do_normalize_per_mel_bins=true] Whether or not to zero-mean unit-variance normalize the input per mel-channel. + * @param {boolean} [options.return_attention_mask=true] Whether to return the attention mask. + * @returns {Promise<{ input_features: Tensor, attention_mask?: Tensor }>} A Promise resolving to an object containing the extracted input features and attention masks as Tensors. + */ + async _call(audio, { + padding = true, + pad_to_multiple_of = 2, + do_normalize_per_mel_bins = true, + return_attention_mask = true, + } = {}) { + validate_audio_inputs(audio, 'SeamlessM4TFeatureExtractor'); + + let features = await this._extract_fbank_features(audio, this.config.max_length); + + if (do_normalize_per_mel_bins) { + const [num_features, feature_size] = features.dims; + const data = features.data; + for (let i = 0; i < feature_size; ++i) { + let sum = 0; + for (let j = 0; j < num_features; ++j) { + sum += data[j * feature_size + i]; + } + + const mean = sum / num_features; + + let variance = 0; + for (let j = 0; j < num_features; ++j) { + variance += (data[j * feature_size + i] - mean) ** 2; + } + variance /= num_features - 1; // NOTE: We use ddof=1 + + const std = Math.sqrt(variance + 1e-7); + for (let j = 0; j < num_features; ++j) { + const index = j * feature_size + i; + data[index] = (data[index] - mean) / std; + } + } + } + + let padded_attention_mask; + if (padding) { + const [num_frames, num_channels] = features.dims; + const data = /** @type {Float32Array} */(features.data); + + const pad_size = num_frames % pad_to_multiple_of; + if (pad_size > 0) { + const padded_data = new Float32Array(num_channels * (num_frames + pad_size)); + padded_data.set(data) + padded_data.fill(this.config.padding_value, data.length) + + const numPaddedFrames = num_frames + pad_size; + features = new Tensor( + features.type, + padded_data, + [numPaddedFrames, num_channels], + ) + + if (return_attention_mask) { + padded_attention_mask = new Tensor( + 'int64', + new BigInt64Array(numPaddedFrames), + [1, numPaddedFrames], + ) + padded_attention_mask.data.fill(1n, 0, num_frames); + } + } + } + + const [num_frames, num_channels] = features.dims; + + const stride = this.config.stride; + const remainder = num_frames % stride; + if (remainder !== 0) { + throw new Error(`The number of frames (${num_frames}) must be a multiple of the stride (${stride}).`) + } + + const input_features = features.view( + 1, + Math.floor(num_frames / stride), + num_channels * stride, + ); + + const result = { input_features } + + if (return_attention_mask) { + const reshapedNumFrames = input_features.dims[1]; + + const attention_mask_data = new BigInt64Array(reshapedNumFrames); + + if (padded_attention_mask) { + const padded_attention_mask_data = padded_attention_mask.data; + for (let i = 1, j = 0; i < num_frames; i += stride, ++j) { + attention_mask_data[j] = padded_attention_mask_data[i]; + } + } else { + attention_mask_data.fill(1n); + } + result.attention_mask = new Tensor( + 'int64', + attention_mask_data, + [1, reshapedNumFrames], + ); + } + + return result; + } +} diff --git a/src/models/segformer/image_processing_segformer.js b/src/models/segformer/image_processing_segformer.js new file mode 100644 index 000000000..fe129a05a --- /dev/null +++ b/src/models/segformer/image_processing_segformer.js @@ -0,0 +1,13 @@ +import { + ImageProcessor, + post_process_semantic_segmentation, +} from "../../base/image_processors_utils.js"; + + +export class SegformerImageProcessor extends ImageProcessor { + /** @type {typeof post_process_semantic_segmentation} */ + post_process_semantic_segmentation(...args) { + return post_process_semantic_segmentation(...args); + } +} +export class SegformerFeatureExtractor extends SegformerImageProcessor { } diff --git a/src/models/siglip/image_processing_siglip.js b/src/models/siglip/image_processing_siglip.js new file mode 100644 index 000000000..5e666562b --- /dev/null +++ b/src/models/siglip/image_processing_siglip.js @@ -0,0 +1,5 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class SiglipImageProcessor extends ImageProcessor { } diff --git a/src/models/speecht5/feature_extraction_speecht5.js b/src/models/speecht5/feature_extraction_speecht5.js new file mode 100644 index 000000000..0f3f2ab38 --- /dev/null +++ b/src/models/speecht5/feature_extraction_speecht5.js @@ -0,0 +1,4 @@ + +import { FeatureExtractor } from "../../base/feature_extraction_utils.js"; + +export class SpeechT5FeatureExtractor extends FeatureExtractor { } diff --git a/src/models/speecht5/processing_speecht5.js b/src/models/speecht5/processing_speecht5.js new file mode 100644 index 000000000..08af8ba1a --- /dev/null +++ b/src/models/speecht5/processing_speecht5.js @@ -0,0 +1,17 @@ +import { Processor } from "../../base/processing_utils.js"; +import { AutoTokenizer } from "../../tokenizers.js"; +import { AutoFeatureExtractor } from "../auto/feature_extraction_auto.js"; + +export class SpeechT5Processor extends Processor { + static tokenizer_class = AutoTokenizer + static feature_extractor_class = AutoFeatureExtractor + + /** + * Calls the feature_extractor function with the given input. + * @param {any} input The input to extract features from. + * @returns {Promise} A Promise that resolves with the extracted features. + */ + async _call(input) { + return await this.feature_extractor(input) + } +} diff --git a/src/models/swin2sr/image_processing_swin2sr.js b/src/models/swin2sr/image_processing_swin2sr.js new file mode 100644 index 000000000..e53c5c4c1 --- /dev/null +++ b/src/models/swin2sr/image_processing_swin2sr.js @@ -0,0 +1,24 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class Swin2SRImageProcessor extends ImageProcessor { + pad_image(pixelData, imgDims, padSize, options = {}) { + // NOTE: In this case, `padSize` represents the size of the sliding window for the local attention. + // In other words, the image is padded so that its width and height are multiples of `padSize`. + const [imageHeight, imageWidth, imageChannels] = imgDims; + + return super.pad_image(pixelData, imgDims, { + // NOTE: For Swin2SR models, the original python implementation adds padding even when the image's width/height is already + // a multiple of `pad_size`. However, this is most likely a bug (PR: https://github.com/mv-lab/swin2sr/pull/19). + // For this reason, we only add padding when the image's width/height is not a multiple of `pad_size`. + width: imageWidth + (padSize - imageWidth % padSize) % padSize, + height: imageHeight + (padSize - imageHeight % padSize) % padSize, + }, { + mode: 'symmetric', + center: false, + constant_values: -1, + ...options, + }) + } +} \ No newline at end of file diff --git a/src/models/vit/image_processing_vit.js b/src/models/vit/image_processing_vit.js new file mode 100644 index 000000000..ad07ca27e --- /dev/null +++ b/src/models/vit/image_processing_vit.js @@ -0,0 +1,7 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class ViTImageProcessor extends ImageProcessor { } +export class ViTFeatureExtractor extends ViTImageProcessor { } + diff --git a/src/models/vitmatte/image_processing_vitmatte.js b/src/models/vitmatte/image_processing_vitmatte.js new file mode 100644 index 000000000..274862344 --- /dev/null +++ b/src/models/vitmatte/image_processing_vitmatte.js @@ -0,0 +1,50 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +import { + stack, + cat, +} from "../../utils/tensor.js"; + +export class VitMatteImageProcessor extends ImageProcessor { + /** + * Calls the feature extraction process on an array of images, preprocesses + * each image, and concatenates the resulting features into a single Tensor. + * @param {import("../../utils/image.js").RawImage[]} images The image(s) to extract features from. + * @param {import("../../utils/image.js").RawImage[]} trimaps The trimaps(s) to extract features from. + * @returns {Promise} An object containing the concatenated pixel values of the preprocessed images. + */ + async _call(images, trimaps) { + if (!Array.isArray(images)) { + images = [images]; + } + if (!Array.isArray(trimaps)) { + trimaps = [trimaps]; + } + + const imageData = await Promise.all(images.map(x => this.preprocess(x))); + const trimapData = await Promise.all(trimaps.map(x => this.preprocess(x, { + do_normalize: false, + do_convert_rgb: false, + do_convert_grayscale: true, + }))); + + + // Stack pixel values + const pixel_values = stack(imageData.map( + // Concatenate images and trimaps + (x, i) => cat([x.pixel_values, trimapData[i].pixel_values], 0) + ), 0); + + return { + pixel_values, + + // Original sizes of images + original_sizes: imageData.map(x => x.original_size), + + // Reshaped sizes of images, before padding or cropping + reshaped_input_sizes: imageData.map(x => x.reshaped_input_size), + } + } +} diff --git a/src/models/vitpose/image_processing_vitpose.js b/src/models/vitpose/image_processing_vitpose.js new file mode 100644 index 000000000..daacbc4f1 --- /dev/null +++ b/src/models/vitpose/image_processing_vitpose.js @@ -0,0 +1,89 @@ +import { + ImageProcessor, +} from "../../base/image_processors_utils.js"; + +export class VitPoseImageProcessor extends ImageProcessor { + + /** + * Transform the heatmaps into keypoint predictions and transform them back to the image. + * NOTE: This is a naive implementation and does not include advanced post-processing techniques, + * so the results may not be as accurate as the original implementation. + * @param {import('../../utils/tensor.js').Tensor} outputs The model outputs. + * @param {[number, number, number, number][][]} boxes List or array of bounding boxes for each image. + * Each box should be a list of 4 floats representing the bounding box coordinates in COCO format (top_left_x, top_left_y, width, height). + * @returns {{ + * bbox: [number, number, number, number], + * scores: number[], + * labels: number[], + * keypoints: [number, number][] + * }[][]} List of keypoints predictions for each image. + */ + post_process_pose_estimation(outputs, boxes, { + threshold = null, + // TODO: + // kernel_size = 11, + // target_sizes = null, + } = {}) { + // NOTE: boxes are 3D (batch_size, num_boxes, 4) + const heatmaps = outputs.tolist(); + const [batch_size, num_classes, height, width] = outputs.dims; + + const results = []; + for (let b = 0; b < batch_size; ++b) { + const heatmap = heatmaps[b]; + const bboxes = boxes[b]; + + const batch_results = []; + for (let n = 0; n < bboxes.length; ++n) { + const bbox = bboxes[n]; + + const keypoints = []; + const scores = []; + const labels = []; + + const xScale = bbox.at(-2) / width; + const yScale = bbox.at(-1) / height; + for (let c = 0; c < heatmap.length; ++c) { + let [xWeightedSum, yWeightedSum] = [0, 0]; + let sum = 0; + let score = -Infinity; + const row = heatmap[c]; + for (let y = 0; y < row.length; ++y) { + const col = row[y]; + for (let x = 0; x < col.length; ++x) { + const value = col[x]; + sum += value; + + score = Math.max(score, value); + + // Get weighted sum of positions + // TODO: Determine best offsets + xWeightedSum += (x + 0.5) * value; + yWeightedSum += (y) * value; + } + } + + // Ignore low scores, if threshold is set + if (threshold != null && score < threshold) continue; + + /** @type {[number, number]} */ + const keypoint = [ + xScale * xWeightedSum / sum, + yScale * yWeightedSum / sum, + ] + keypoints.push(keypoint); + labels.push(c); + scores.push(score); + } + batch_results.push({ + bbox, + scores, + labels, + keypoints, + }); + } + results.push(batch_results); + } + return results; + } +} diff --git a/src/models/wav2vec2/feature_extraction_wav2vec2.js b/src/models/wav2vec2/feature_extraction_wav2vec2.js new file mode 100644 index 000000000..51f007603 --- /dev/null +++ b/src/models/wav2vec2/feature_extraction_wav2vec2.js @@ -0,0 +1,44 @@ +import { FeatureExtractor, validate_audio_inputs } from "../../base/feature_extraction_utils.js"; +import { Tensor } from "../../utils/tensor.js"; + +export class Wav2Vec2FeatureExtractor extends FeatureExtractor { + + /** + * @param {Float32Array} input_values + * @returns {Float32Array} + */ + _zero_mean_unit_var_norm(input_values) { + // TODO support batch? + const sum = input_values.reduce((a, b) => a + b, 0); + const mean = sum / input_values.length; + const variance = input_values.reduce((a, b) => a + (b - mean) ** 2, 0) / input_values.length; + return input_values.map(x => (x - mean) / Math.sqrt(variance + 1e-7)); + } + + /** + * Asynchronously extracts features from a given audio using the provided configuration. + * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. + * @returns {Promise<{ input_values: Tensor; attention_mask: Tensor }>} A Promise resolving to an object containing the extracted input features and attention mask as Tensors. + */ + async _call(audio) { + validate_audio_inputs(audio, 'Wav2Vec2FeatureExtractor'); + + if (audio instanceof Float64Array) { + audio = new Float32Array(audio); + } + + let input_values = audio; + + // zero-mean and unit-variance normalization + if (this.config.do_normalize) { + input_values = this._zero_mean_unit_var_norm(input_values); + } + + // TODO: allow user to pass in attention mask + const shape = [1, input_values.length]; + return { + input_values: new Tensor('float32', input_values, shape), + attention_mask: new Tensor('int64', new BigInt64Array(input_values.length).fill(1n), shape) + }; + } +} diff --git a/src/models/wav2vec2/processing_wav2vec2.js b/src/models/wav2vec2/processing_wav2vec2.js new file mode 100644 index 000000000..490fe2fc9 --- /dev/null +++ b/src/models/wav2vec2/processing_wav2vec2.js @@ -0,0 +1,15 @@ +import { Processor } from "../../base/processing_utils.js"; +import { AutoFeatureExtractor } from "../auto/feature_extraction_auto.js"; + +export class Wav2Vec2ProcessorWithLM extends Processor { + static feature_extractor_class = AutoFeatureExtractor + + /** + * Calls the feature_extractor function with the given audio input. + * @param {any} audio The audio input to extract features from. + * @returns {Promise} A Promise that resolves with the extracted features. + */ + async _call(audio) { + return await this.feature_extractor(audio) + } +} diff --git a/src/models/wespeaker/feature_extraction_wespeaker.js b/src/models/wespeaker/feature_extraction_wespeaker.js new file mode 100644 index 000000000..0815f9cda --- /dev/null +++ b/src/models/wespeaker/feature_extraction_wespeaker.js @@ -0,0 +1,100 @@ +import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; +import { Tensor } from '../../utils/tensor.js'; +import { mel_filter_bank, spectrogram, window_function } from '../../utils/audio.js'; + + +export class WeSpeakerFeatureExtractor extends FeatureExtractor { + + constructor(config) { + super(config); + + const sampling_rate = this.config.sampling_rate; + const mel_filters = mel_filter_bank( + 256, // num_frequency_bins + this.config.num_mel_bins, // num_mel_filters + 20, // min_frequency + Math.floor(sampling_rate / 2), // max_frequency + sampling_rate, // sampling_rate + null, // norm + "kaldi", // mel_scale + true, // triangularize_in_mel_space + ); + + // Do padding: + for (let i = 0; i < mel_filters.length; ++i) { + mel_filters[i].push(0); + } + this.mel_filters = mel_filters; + + this.window = window_function(400, 'hamming', { + periodic: false, + }) + this.min_num_frames = this.config.min_num_frames; + } + + /** + * Computes the log-Mel spectrogram of the provided audio waveform. + * @param {Float32Array|Float64Array} waveform The audio waveform to process. + * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. + */ + async _extract_fbank_features(waveform) { + // Kaldi compliance: 16-bit signed integers + // 32768 == 2 ** 15 + waveform = waveform.map((/** @type {number} */ x) => x * 32768) + + return spectrogram( + waveform, + this.window, // window + 400, // frame_length + 160, // hop_length + { + fft_length: 512, + power: 2.0, + center: false, + preemphasis: 0.97, + mel_filters: this.mel_filters, + log_mel: 'log', + mel_floor: 1.192092955078125e-07, + remove_dc_offset: true, + + // Custom + transpose: true, + min_num_frames: this.min_num_frames, + } + ) + } + + + /** + * Asynchronously extracts features from a given audio using the provided configuration. + * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. + * @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. + */ + async _call(audio) { + validate_audio_inputs(audio, 'WeSpeakerFeatureExtractor'); + + const features = (await this._extract_fbank_features(audio)).unsqueeze_(0); + + if (this.config.fbank_centering_span === null) { + // center features with global average + const meanData = /** @type {Float32Array} */ (features.mean(1).data); + const featuresData = /** @type {Float32Array} */(features.data); + const [batch_size, num_frames, feature_size] = features.dims; + + for (let i = 0; i < batch_size; ++i) { + const offset1 = i * num_frames * feature_size; + const offset2 = i * feature_size; + for (let j = 0; j < num_frames; ++j) { + const offset3 = offset1 + j * feature_size; + for (let k = 0; k < feature_size; ++k) { + featuresData[offset3 + k] -= meanData[offset2 + k]; + } + } + } + } + + return { + input_features: features + }; + } +} diff --git a/src/models/whisper/feature_extraction_whisper.js b/src/models/whisper/feature_extraction_whisper.js new file mode 100644 index 000000000..f4d351f88 --- /dev/null +++ b/src/models/whisper/feature_extraction_whisper.js @@ -0,0 +1,84 @@ +import { FeatureExtractor, validate_audio_inputs } from '../../base/feature_extraction_utils.js'; +import { Tensor } from '../../utils/tensor.js'; +import { mel_filter_bank, spectrogram, window_function } from '../../utils/audio.js'; +import { max } from '../../utils/maths.js'; + +export class WhisperFeatureExtractor extends FeatureExtractor { + + constructor(config) { + super(config); + + // Prefer given `mel_filters` from preprocessor_config.json, or calculate them if they don't exist. + this.config.mel_filters ??= mel_filter_bank( + Math.floor(1 + this.config.n_fft / 2), // num_frequency_bins + this.config.feature_size, // num_mel_filters + 0.0, // min_frequency + 8000.0, // max_frequency + this.config.sampling_rate, // sampling_rate + "slaney", // norm + "slaney", // mel_scale + ); + + this.window = window_function(this.config.n_fft, 'hann'); + } + + /** + * Computes the log-Mel spectrogram of the provided audio waveform. + * @param {Float32Array|Float64Array} waveform The audio waveform to process. + * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. + */ + async _extract_fbank_features(waveform) { + const features = await spectrogram( + waveform, + this.window, // window + this.config.n_fft, // frame_length + this.config.hop_length, // hop_length + { + power: 2.0, + mel_filters: this.config.mel_filters, + log_mel: 'log10', + + // Custom + max_num_frames: this.config.nb_max_frames, // 3000 + } + ) + + const data = features.data; + const maxValue = max(data)[0]; + + for (let i = 0; i < data.length; ++i) { + data[i] = (Math.max(data[i], maxValue - 8.0) + 4.0) / 4.0; + } + + return features; + } + + /** + * Asynchronously extracts features from a given audio using the provided configuration. + * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. + * @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. + */ + async _call(audio) { + validate_audio_inputs(audio, 'WhisperFeatureExtractor'); + + let waveform; + if (audio.length > this.config.n_samples) { + console.warn( + "Attempting to extract features for audio longer than 30 seconds. " + + "If using a pipeline to extract transcript from a long audio clip, " + + "remember to specify `chunk_length_s` and/or `stride_length_s`." + ); + waveform = audio.slice(0, this.config.n_samples); + } else { + // pad with zeros + waveform = new Float32Array(this.config.n_samples); + waveform.set(audio); + } + + const features = await this._extract_fbank_features(waveform); + + return { + input_features: features.unsqueeze_(0) + }; + } +} diff --git a/src/models/whisper/processing_whisper.js b/src/models/whisper/processing_whisper.js new file mode 100644 index 000000000..b676273b8 --- /dev/null +++ b/src/models/whisper/processing_whisper.js @@ -0,0 +1,21 @@ +import { AutoFeatureExtractor } from "../auto/feature_extraction_auto.js" +import { AutoTokenizer } from "../../tokenizers.js" +import { Processor } from "../../base/processing_utils.js" + +/** + * Represents a WhisperProcessor that extracts features from an audio input. + */ +export class WhisperProcessor extends Processor { + static tokenizer_class = AutoTokenizer + static feature_extractor_class = AutoFeatureExtractor + + /** + * Calls the feature_extractor function with the given audio input. + * @param {any} audio The audio input to extract features from. + * @returns {Promise} A Promise that resolves with the extracted features. + */ + async _call(audio) { + return await this.feature_extractor(audio); + } +} + diff --git a/src/models/yolos/image_processing_yolos.js b/src/models/yolos/image_processing_yolos.js new file mode 100644 index 000000000..f82b08984 --- /dev/null +++ b/src/models/yolos/image_processing_yolos.js @@ -0,0 +1,12 @@ +import { + ImageProcessor, + post_process_object_detection, +} from "../../base/image_processors_utils.js"; + +export class YolosImageProcessor extends ImageProcessor { + /** @type {typeof post_process_object_detection} */ + post_process_object_detection(...args) { + return post_process_object_detection(...args); + } +} +export class YolosFeatureExtractor extends YolosImageProcessor { } diff --git a/src/pipelines.js b/src/pipelines.js index 3b7373cf9..a61cb1dde 100644 --- a/src/pipelines.js +++ b/src/pipelines.js @@ -45,8 +45,10 @@ import { } from './models.js'; import { AutoProcessor, - Processor -} from './processors.js'; +} from './models/auto/processing_auto.js'; +import { + Processor, +} from './base/processing_utils.js'; import { Callable, @@ -54,7 +56,6 @@ import { import { dispatchCallback, - pop, product, } from './utils/core.js'; import { @@ -158,7 +159,6 @@ function get_bounding_box(box, asInteger) { /** * The Pipeline class is the class from which all pipelines inherit. * Refer to this class for methods shared across different pipelines. - * @extends Callable */ export class Pipeline extends Callable { /** @@ -2131,8 +2131,8 @@ export class ImageSegmentationPipeline extends (/** @type {new (options: ImagePi fn = this.subtasks_mapping[subtask]; } else { for (let [task, func] of Object.entries(this.subtasks_mapping)) { - if (func in this.processor.feature_extractor) { - fn = this.processor.feature_extractor[func].bind(this.processor.feature_extractor); + if (func in this.processor.image_processor) { + fn = this.processor.image_processor[func].bind(this.processor.image_processor); subtask = task; break; } @@ -2362,7 +2362,7 @@ export class ObjectDetectionPipeline extends (/** @type {new (options: ImagePipe const output = await this.model({ pixel_values, pixel_mask }); // @ts-ignore - const processed = this.processor.feature_extractor.post_process_object_detection(output, threshold, imageSizes); + const processed = this.processor.image_processor.post_process_object_detection(output, threshold, imageSizes); // Add labels const id2label = this.model.config.id2label; @@ -2510,7 +2510,7 @@ export class ZeroShotObjectDetectionPipeline extends (/** @type {new (options: T const output = await this.model({ ...text_inputs, pixel_values }); // @ts-ignore - const processed = this.processor.feature_extractor.post_process_object_detection(output, threshold, imageSize, true)[0]; + const processed = this.processor.image_processor.post_process_object_detection(output, threshold, imageSize, true)[0]; let result = processed.boxes.map((box, i) => ({ score: processed.scores[i], label: candidate_labels[processed.classes[i]], diff --git a/src/processors.js b/src/processors.js deleted file mode 100644 index 9af0791be..000000000 --- a/src/processors.js +++ /dev/null @@ -1,2655 +0,0 @@ - -/** - * @file Processors are used to prepare non-textual inputs (e.g., image or audio) for a model. - * - * **Example:** Using a `WhisperProcessor` to prepare an audio input for a model. - * ```javascript - * import { AutoProcessor, read_audio } from '@huggingface/transformers'; - * - * let processor = await AutoProcessor.from_pretrained('openai/whisper-tiny.en'); - * let audio = await read_audio('https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac', 16000); - * let { input_features } = await processor(audio); - * // Tensor { - * // data: Float32Array(240000) [0.4752984642982483, 0.5597258806228638, 0.56434166431427, ...], - * // dims: [1, 80, 3000], - * // type: 'float32', - * // size: 240000, - * // } - * ``` - * - * @module processors - */ -import { - Callable, -} from './utils/generic.js'; - -import { - calculateDimensions, - calculateReflectOffset, -} from './utils/core.js'; - -import { - getModelJSON, -} from './utils/hub.js'; - -import { - min, - max, - softmax, - bankers_round, -} from './utils/maths.js'; - - -import { Tensor, cat, interpolate, stack, interpolate_4d, full } from './utils/tensor.js'; - -import { RawImage } from './utils/image.js'; -import { - window_function, - spectrogram, - mel_filter_bank, -} from './utils/audio.js'; - - -// Helper functions - -/** - * Converts bounding boxes from center format to corners format. - * - * @param {number[]} arr The coordinate for the center of the box and its width, height dimensions (center_x, center_y, width, height) - * @returns {number[]} The coodinates for the top-left and bottom-right corners of the box (top_left_x, top_left_y, bottom_right_x, bottom_right_y) - */ -function center_to_corners_format([centerX, centerY, width, height]) { - return [ - centerX - width / 2, - centerY - height / 2, - centerX + width / 2, - centerY + height / 2 - ]; -} - -/** - * Post-processes the outputs of the model (for object detection). - * @param {Object} outputs The outputs of the model that must be post-processed - * @param {Tensor} outputs.logits The logits - * @param {Tensor} outputs.pred_boxes The predicted boxes. - * @param {number} [threshold=0.5] The threshold to use for the scores. - * @param {[number, number][]} [target_sizes=null] The sizes of the original images. - * @param {boolean} [is_zero_shot=false] Whether zero-shot object detection was performed. - * @return {Object[]} An array of objects containing the post-processed outputs. - * @private - */ -function post_process_object_detection(outputs, threshold = 0.5, target_sizes = null, is_zero_shot = false) { - const out_logits = outputs.logits; - const out_bbox = outputs.pred_boxes; - const [batch_size, num_boxes, num_classes] = out_logits.dims; - - if (target_sizes !== null && target_sizes.length !== batch_size) { - throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") - } - let toReturn = []; - for (let i = 0; i < batch_size; ++i) { - let target_size = target_sizes !== null ? target_sizes[i] : null; - let info = { - boxes: [], - classes: [], - scores: [] - } - let logits = out_logits[i]; - let bbox = out_bbox[i]; - - for (let j = 0; j < num_boxes; ++j) { - let logit = logits[j]; - - let indices = []; - let probs; - if (is_zero_shot) { - // Get indices of classes with high enough probability - probs = logit.sigmoid().data; - for (let k = 0; k < probs.length; ++k) { - if (probs[k] > threshold) { - indices.push(k); - } - } - - } else { - // Get most probable class - let maxIndex = max(logit.data)[1]; - - if (maxIndex === num_classes - 1) { - // This is the background class, skip it - continue; - } - // Compute softmax over classes - probs = softmax(logit.data); - - if (probs[maxIndex] < threshold) { - continue; - } - indices.push(maxIndex); - } - - for (const index of indices) { - - // Some class has a high enough probability - /** @type {number[]} */ - let box = bbox[j].data; - - // convert to [x0, y0, x1, y1] format - box = center_to_corners_format(box) - if (target_size !== null) { - box = box.map((x, i) => x * target_size[(i + 1) % 2]) - } - - info.boxes.push(box); - info.classes.push(index); - info.scores.push(probs[index]); - } - } - toReturn.push(info); - } - return toReturn; -} - - -/** - * Post-processes the outputs of the model (for semantic segmentation). - * @param {*} outputs Raw outputs of the model. - * @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size - * (height, width) of each prediction. If unset, predictions will not be resized. - * @returns {{segmentation: Tensor; labels: number[]}[]} The semantic segmentation maps. - */ -function post_process_semantic_segmentation(outputs, target_sizes = null) { - - const logits = outputs.logits; - const batch_size = logits.dims[0]; - - if (target_sizes !== null && target_sizes.length !== batch_size) { - throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") - } - - const toReturn = []; - for (let i = 0; i < batch_size; ++i) { - const target_size = target_sizes !== null ? target_sizes[i] : null; - - let data = logits[i]; - - // 1. If target_size is not null, we need to resize the masks to the target size - if (target_size !== null) { - // resize the masks to the target size - data = interpolate(data, target_size, 'bilinear', false); - } - const [height, width] = target_size ?? data.dims.slice(-2); - - const segmentation = new Tensor( - 'int32', - new Int32Array(height * width), - [height, width] - ); - - // Buffer to store current largest value - const buffer = data[0].data; - const segmentation_data = segmentation.data; - for (let j = 1; j < data.dims[0]; ++j) { - const row = data[j].data; - for (let k = 0; k < row.length; ++k) { - if (row[k] > buffer[k]) { - buffer[k] = row[k]; - segmentation_data[k] = j; - } - } - } - - // Store which objects have labels - // This is much more efficient that creating a set of the final values - const hasLabel = new Array(data.dims[0]); - for (let j = 0; j < segmentation_data.length; ++j) { - const index = segmentation_data[j]; - hasLabel[index] = index; - } - /** @type {number[]} The unique list of labels that were detected */ - const labels = hasLabel.filter(x => x !== undefined); - - toReturn.push({ segmentation, labels }); - } - return toReturn; -} - - -/** - * Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. - * @param {Tensor} class_logits The class logits. - * @param {Tensor} mask_logits The mask logits. - * @param {number} object_mask_threshold A number between 0 and 1 used to binarize the masks. - * @param {number} num_labels The number of labels. - * @returns {[Tensor[], number[], number[]]} The binarized masks, the scores, and the labels. - * @private - */ -function remove_low_and_no_objects(class_logits, mask_logits, object_mask_threshold, num_labels) { - - const mask_probs_item = []; - const pred_scores_item = []; - const pred_labels_item = []; - - for (let j = 0; j < class_logits.dims[0]; ++j) { - const cls = class_logits[j]; - const mask = mask_logits[j]; - - const pred_label = max(cls.data)[1]; - if (pred_label === num_labels) { - // Is the background, so we ignore it - continue; - } - - const scores = softmax(cls.data); - const pred_score = scores[pred_label]; - if (pred_score > object_mask_threshold) { - mask_probs_item.push(mask); - pred_scores_item.push(pred_score); - pred_labels_item.push(pred_label); - } - } - - return [mask_probs_item, pred_scores_item, pred_labels_item]; -} - -/** - * Checks whether the segment is valid or not. - * @param {Int32Array} mask_labels Labels for each pixel in the mask. - * @param {Tensor[]} mask_probs Probabilities for each pixel in the masks. - * @param {number} k The class id of the segment. - * @param {number} mask_threshold The mask threshold. - * @param {number} overlap_mask_area_threshold The overlap mask area threshold. - * @returns {[boolean, number[]]} Whether the segment is valid or not, and the indices of the valid labels. - * @private - */ -function check_segment_validity( - mask_labels, - mask_probs, - k, - mask_threshold = 0.5, - overlap_mask_area_threshold = 0.8 -) { - // mask_k is a 1D array of indices, indicating where the mask is equal to k - const mask_k = []; - let mask_k_area = 0; - let original_area = 0; - - const mask_probs_k_data = mask_probs[k].data; - - // Compute the area of all the stuff in query k - for (let i = 0; i < mask_labels.length; ++i) { - if (mask_labels[i] === k) { - mask_k.push(i); - ++mask_k_area; - } - - if (mask_probs_k_data[i] >= mask_threshold) { - ++original_area; - } - } - let mask_exists = mask_k_area > 0 && original_area > 0; - - // Eliminate disconnected tiny segments - if (mask_exists) { - // Perform additional check - let area_ratio = mask_k_area / original_area; - mask_exists = area_ratio > overlap_mask_area_threshold; - } - - return [mask_exists, mask_k] -} - -/** - * Computes the segments. - * @param {Tensor[]} mask_probs The mask probabilities. - * @param {number[]} pred_scores The predicted scores. - * @param {number[]} pred_labels The predicted labels. - * @param {number} mask_threshold The mask threshold. - * @param {number} overlap_mask_area_threshold The overlap mask area threshold. - * @param {Set} label_ids_to_fuse The label ids to fuse. - * @param {number[]} target_size The target size of the image. - * @returns {[Tensor, Array<{id: number, label_id: number, score: number}>]} The computed segments. - * @private - */ -function compute_segments( - mask_probs, - pred_scores, - pred_labels, - mask_threshold, - overlap_mask_area_threshold, - label_ids_to_fuse = null, - target_size = null, -) { - const [height, width] = target_size ?? mask_probs[0].dims; - - const segmentation = new Tensor( - 'int32', - new Int32Array(height * width), - [height, width] - ); - const segments = []; - - // 1. If target_size is not null, we need to resize the masks to the target size - if (target_size !== null) { - // resize the masks to the target size - for (let i = 0; i < mask_probs.length; ++i) { - mask_probs[i] = interpolate(mask_probs[i], target_size, 'bilinear', false); - } - } - - // 2. Weigh each mask by its prediction score - // NOTE: `mask_probs` is updated in-place - // - // Temporary storage for the best label/scores for each pixel ([height, width]): - const mask_labels = new Int32Array(mask_probs[0].data.length); - const bestScores = new Float32Array(mask_probs[0].data.length); - - for (let i = 0; i < mask_probs.length; ++i) { - let score = pred_scores[i]; - - const mask_probs_i_data = mask_probs[i].data; - - for (let j = 0; j < mask_probs_i_data.length; ++j) { - mask_probs_i_data[j] *= score - if (mask_probs_i_data[j] > bestScores[j]) { - mask_labels[j] = i; - bestScores[j] = mask_probs_i_data[j]; - } - } - } - - let current_segment_id = 0; - - // let stuff_memory_list = {} - const segmentation_data = segmentation.data; - for (let k = 0; k < pred_labels.length; ++k) { - const pred_class = pred_labels[k]; - - // TODO add `should_fuse` - // let should_fuse = pred_class in label_ids_to_fuse - - // Check if mask exists and large enough to be a segment - const [mask_exists, mask_k] = check_segment_validity( - mask_labels, - mask_probs, - k, - mask_threshold, - overlap_mask_area_threshold - ) - - if (!mask_exists) { - // Nothing to see here - continue; - } - - // TODO - // if (pred_class in stuff_memory_list) { - // current_segment_id = stuff_memory_list[pred_class] - // } else { - // current_segment_id += 1; - // } - ++current_segment_id; - - - // Add current object segment to final segmentation map - for (const index of mask_k) { - segmentation_data[index] = current_segment_id; - } - - segments.push({ - id: current_segment_id, - label_id: pred_class, - // was_fused: should_fuse, TODO - score: pred_scores[k], - }) - - // TODO - // if(should_fuse){ - // stuff_memory_list[pred_class] = current_segment_id - // } - } - - return [segmentation, segments]; -} - - -/** - * Post-process the model output to generate the final panoptic segmentation. - * @param {*} outputs The model output to post process - * @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks. - * @param {number} [mask_threshold=0.5] Threshold to use when turning the predicted masks into binary values. - * @param {number} [overlap_mask_area_threshold=0.8] The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. - * @param {Set} [label_ids_to_fuse=null] The labels in this state will have all their instances be fused together. - * @param {[number, number][]} [target_sizes=null] The target sizes to resize the masks to. - * @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>} - */ -function post_process_panoptic_segmentation( - outputs, - threshold = 0.5, - mask_threshold = 0.5, - overlap_mask_area_threshold = 0.8, - label_ids_to_fuse = null, - target_sizes = null, -) { - if (label_ids_to_fuse === null) { - console.warn("`label_ids_to_fuse` unset. No instance will be fused.") - label_ids_to_fuse = new Set(); - } - - const class_queries_logits = outputs.class_queries_logits ?? outputs.logits; // [batch_size, num_queries, num_classes+1] - const masks_queries_logits = outputs.masks_queries_logits ?? outputs.pred_masks; // [batch_size, num_queries, height, width] - - const mask_probs = masks_queries_logits.sigmoid() // [batch_size, num_queries, height, width] - - let [batch_size, num_queries, num_labels] = class_queries_logits.dims; - num_labels -= 1; // Remove last class (background) - - if (target_sizes !== null && target_sizes.length !== batch_size) { - throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits") - } - - let toReturn = []; - for (let i = 0; i < batch_size; ++i) { - let target_size = target_sizes !== null ? target_sizes[i] : null; - - let class_logits = class_queries_logits[i]; - let mask_logits = mask_probs[i]; - - let [mask_probs_item, pred_scores_item, pred_labels_item] = remove_low_and_no_objects(class_logits, mask_logits, threshold, num_labels); - - if (pred_labels_item.length === 0) { - // No mask found - let [height, width] = target_size ?? mask_logits.dims.slice(-2); - - let segmentation = new Tensor( - 'int32', - new Int32Array(height * width).fill(-1), - [height, width] - ) - toReturn.push({ - segmentation: segmentation, - segments_info: [] - }); - continue; - } - - - // Get segmentation map and segment information of batch item - let [segmentation, segments] = compute_segments( - mask_probs_item, - pred_scores_item, - pred_labels_item, - mask_threshold, - overlap_mask_area_threshold, - label_ids_to_fuse, - target_size, - ) - - toReturn.push({ - segmentation: segmentation, - segments_info: segments - }) - } - - return toReturn; -} - - -/** - * Post-processes the outputs of the model (for instance segmentation). - * @param {*} outputs Raw outputs of the model. - * @param {number} [threshold=0.5] The probability score threshold to keep predicted instance masks. - * @param {[number, number][]} [target_sizes=null] List of tuples corresponding to the requested final size - * (height, width) of each prediction. If unset, predictions will not be resized. - * @returns {Array<{ segmentation: Tensor, segments_info: Array<{id: number, label_id: number, score: number}>}>} - */ -function post_process_instance_segmentation(outputs, threshold = 0.5, target_sizes = null) { - throw new Error('Not implemented yet'); - return []; -} - -/** - * Named tuple to indicate the order we are using is (height x width), even though - * the Graphics’ industry standard is (width x height). - * @typedef {[height: number, width: number]} HeightWidth - */ - -/** - * Helper function to validate audio inputs. - * @param {any} audio The audio data. - * @param {string} feature_extractor The name of the feature extractor. - * @private - */ -function validate_audio_inputs(audio, feature_extractor) { - if (!(audio instanceof Float32Array || audio instanceof Float64Array)) { - throw new Error( - `${feature_extractor} expects input to be a Float32Array or a Float64Array, but got ${audio?.constructor?.name ?? typeof audio} instead. ` + - `If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.` - ) - } -} - -/** - * Helper function to constrain a value to be a multiple of a number. - * @param {number} val The value to constrain. - * @param {number} multiple The number to constrain to. - * @param {number} [minVal=0] The minimum value to constrain to. - * @param {number} [maxVal=null] The maximum value to constrain to. - * @returns {number} The constrained value. - * @private - */ -function constraint_to_multiple_of(val, multiple, minVal = 0, maxVal = null) { - const a = val / multiple; - let x = bankers_round(a) * multiple; - - if (maxVal !== null && x > maxVal) { - x = Math.floor(a) * multiple; - } - - if (x < minVal) { - x = Math.ceil(a) * multiple; - } - - return x; -} - -/** - * Rounds the height and width down to the closest multiple of size_divisibility - * @param {[number, number]} size The size of the image - * @param {number} divisor The divisor to use. - * @returns {[number, number]} The rounded size. - */ -function enforce_size_divisibility([width, height], divisor) { - return [ - Math.max(Math.floor(width / divisor), 1) * divisor, - Math.max(Math.floor(height / divisor), 1) * divisor - ]; -} - - -/** - * Base class for feature extractors. - * - * @extends Callable - */ -export class FeatureExtractor extends Callable { - /** - * Constructs a new FeatureExtractor instance. - * - * @param {Object} config The configuration for the feature extractor. - */ - constructor(config) { - super(); - this.config = config - } -} - -/** - * @typedef {object} ImageFeatureExtractorResult - * @property {Tensor} pixel_values The pixel values of the batched preprocessed images. - * @property {HeightWidth[]} original_sizes Array of two-dimensional tuples like [[480, 640]]. - * @property {HeightWidth[]} reshaped_input_sizes Array of two-dimensional tuples like [[1000, 1330]]. - */ - -/** - * Feature extractor for image models. - * - * @extends FeatureExtractor - */ -export class ImageFeatureExtractor extends FeatureExtractor { - - /** - * Constructs a new ImageFeatureExtractor instance. - * - * @param {Object} config The configuration for the feature extractor. - * @param {number[]} config.image_mean The mean values for image normalization. - * @param {number[]} config.image_std The standard deviation values for image normalization. - * @param {boolean} config.do_rescale Whether to rescale the image pixel values to the [0,1] range. - * @param {number} config.rescale_factor The factor to use for rescaling the image pixel values. - * @param {boolean} config.do_normalize Whether to normalize the image pixel values. - * @param {boolean} config.do_resize Whether to resize the image. - * @param {number} config.resample What method to use for resampling. - * @param {number|Object} config.size The size to resize the image to. - * @param {boolean} [config.do_flip_channel_order=false] Whether to flip the color channels from RGB to BGR. - * Can be overridden by the `do_flip_channel_order` parameter in the `preprocess` method. - */ - constructor(config) { - super(config); - - this.image_mean = this.config.image_mean ?? this.config.mean; - this.image_std = this.config.image_std ?? this.config.std; - - this.resample = this.config.resample ?? 2; // 2 => bilinear - this.do_rescale = this.config.do_rescale ?? true; - this.rescale_factor = this.config.rescale_factor ?? (1 / 255); - this.do_normalize = this.config.do_normalize; - - this.do_resize = this.config.do_resize; - this.do_thumbnail = this.config.do_thumbnail; - this.size = this.config.size; - this.size_divisibility = this.config.size_divisibility ?? this.config.size_divisor; - - this.do_center_crop = this.config.do_center_crop; - this.crop_size = this.config.crop_size; - this.do_convert_rgb = this.config.do_convert_rgb ?? true; - this.do_crop_margin = this.config.do_crop_margin; - - this.pad_size = this.config.pad_size; - this.do_pad = this.config.do_pad; - - if (this.do_pad && !this.pad_size && this.size && this.size.width !== undefined && this.size.height !== undefined) { - // Should pad, but no pad size specified - // We infer the pad size from the resize size - this.pad_size = this.size - } - - this.do_flip_channel_order = this.config.do_flip_channel_order ?? false; - } - - /** - * Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any - * corresponding dimension of the specified size. - * @param {RawImage} image The image to be resized. - * @param {{height:number, width:number}} size The size `{"height": h, "width": w}` to resize the image to. - * @param {string | 0 | 1 | 2 | 3 | 4 | 5} [resample=2] The resampling filter to use. - * @returns {Promise} The resized image. - */ - async thumbnail(image, size, resample = 2) { - const input_height = image.height; - const input_width = image.width; - - const output_height = size.height; - const output_width = size.width; - - // We always resize to the smallest of either the input or output size. - let height = Math.min(input_height, output_height) - let width = Math.min(input_width, output_width) - - if (height === input_height && width === input_width) { - return image; - } - if (input_height > input_width) { - width = Math.floor(input_width * height / input_height); - } else if (input_width > input_height) { - height = Math.floor(input_height * width / input_width); - } - return await image.resize(width, height, { resample }); - } - - - /** - * Crops the margin of the image. Gray pixels are considered margin (i.e., pixels with a value below the threshold). - * @param {RawImage} image The image to be cropped. - * @param {number} gray_threshold Value below which pixels are considered to be gray. - * @returns {Promise} The cropped image. - */ - async crop_margin(image, gray_threshold = 200) { - - const gray_image = image.clone().grayscale(); - - const minValue = min(gray_image.data)[0]; - const maxValue = max(gray_image.data)[0]; - const diff = maxValue - minValue; - - if (diff === 0) { - return image; - } - - const threshold = gray_threshold / 255; - - let x_min = gray_image.width, y_min = gray_image.height, x_max = 0, y_max = 0; - const gray_image_data = gray_image.data; - for (let j = 0; j < gray_image.height; ++j) { - const row = j * gray_image.width; - for (let i = 0; i < gray_image.width; ++i) { - if ((gray_image_data[row + i] - minValue) / diff < threshold) { - // We have a non-zero pixel, so we update the min/max values accordingly - x_min = Math.min(x_min, i); - y_min = Math.min(y_min, j); - x_max = Math.max(x_max, i); - y_max = Math.max(y_max, j); - } - } - } - - image = await image.crop([x_min, y_min, x_max, y_max]); - return image; - } - - /** - * Pad the image by a certain amount. - * @param {Float32Array} pixelData The pixel data to pad. - * @param {number[]} imgDims The dimensions of the image (height, width, channels). - * @param {{width:number; height:number}|number} padSize The dimensions of the padded image. - * @param {Object} options The options for padding. - * @param {'constant'|'symmetric'} [options.mode='constant'] The type of padding to add. - * @param {boolean} [options.center=false] Whether to center the image. - * @param {number} [options.constant_values=0] The constant value to use for padding. - * @returns {[Float32Array, number[]]} The padded pixel data and image dimensions. - */ - pad_image(pixelData, imgDims, padSize, { - mode = 'constant', - center = false, - constant_values = 0, - } = {}) { - const [imageHeight, imageWidth, imageChannels] = imgDims; - - let paddedImageWidth, paddedImageHeight; - if (typeof padSize === 'number') { - paddedImageWidth = padSize; - paddedImageHeight = padSize; - } else { - paddedImageWidth = padSize.width; - paddedImageHeight = padSize.height; - } - - // Only add padding if there is a difference in size - if (paddedImageWidth !== imageWidth || paddedImageHeight !== imageHeight) { - const paddedPixelData = new Float32Array(paddedImageWidth * paddedImageHeight * imageChannels); - if (Array.isArray(constant_values)) { - // Fill with constant values, cycling through the array - for (let i = 0; i < paddedPixelData.length; ++i) { - paddedPixelData[i] = constant_values[i % imageChannels]; - } - } else if (constant_values !== 0) { - paddedPixelData.fill(constant_values); - } - - const [left, top] = center - ? [Math.floor((paddedImageWidth - imageWidth) / 2), Math.floor((paddedImageHeight - imageHeight) / 2)] - : [0, 0]; - - // Copy the original image into the padded image - for (let i = 0; i < imageHeight; ++i) { - const a = (i + top) * paddedImageWidth; - const b = i * imageWidth; - for (let j = 0; j < imageWidth; ++j) { - const c = (a + j + left) * imageChannels; - const d = (b + j) * imageChannels; - for (let k = 0; k < imageChannels; ++k) { - paddedPixelData[c + k] = pixelData[d + k]; - } - } - } - - if (mode === 'symmetric') { - if (center) { - throw new Error('`center` padding is not supported when `mode` is set to `symmetric`.'); - // TODO: Implement this - } - const h1 = imageHeight - 1; - const w1 = imageWidth - 1; - for (let i = 0; i < paddedImageHeight; ++i) { - const a = i * paddedImageWidth; - const b = calculateReflectOffset(i, h1) * imageWidth; - - for (let j = 0; j < paddedImageWidth; ++j) { - if (i < imageHeight && j < imageWidth) continue; // Do not overwrite original image - const c = (a + j) * imageChannels; - const d = (b + calculateReflectOffset(j, w1)) * imageChannels; - - // Copy channel-wise - for (let k = 0; k < imageChannels; ++k) { - paddedPixelData[c + k] = pixelData[d + k]; - } - } - } - } - - - // Update pixel data and image dimensions - pixelData = paddedPixelData; - imgDims = [paddedImageHeight, paddedImageWidth, imageChannels] - } - return [pixelData, imgDims]; - } - - /** - * Rescale the image' pixel values by `this.rescale_factor`. - * @param {Float32Array} pixelData The pixel data to rescale. - * @returns {void} - */ - rescale(pixelData) { - for (let i = 0; i < pixelData.length; ++i) { - pixelData[i] = this.rescale_factor * pixelData[i]; - } - } - - /** - * Find the target (width, height) dimension of the output image after - * resizing given the input image and the desired size. - * @param {RawImage} image The image to resize. - * @param {any} size The size to use for resizing the image. - * @returns {[number, number]} The target (width, height) dimension of the output image after resizing. - */ - get_resize_output_image_size(image, size) { - // `size` comes in many forms, so we need to handle them all here: - // 1. `size` is an integer, in which case we resize the image to be a square - - const [srcWidth, srcHeight] = image.size; - - let shortest_edge; - let longest_edge; - - if (this.do_thumbnail) { - // NOTE: custom logic for `Donut` models - const { height, width } = size; - shortest_edge = Math.min(height, width) - } - // Support both formats for backwards compatibility - else if (Number.isInteger(size)) { - shortest_edge = size; - longest_edge = this.config.max_size ?? shortest_edge; - - } else if (size !== undefined) { - // Extract known properties from `size` - shortest_edge = size.shortest_edge; - longest_edge = size.longest_edge; - } - - // If `longest_edge` and `shortest_edge` are set, maintain aspect ratio and resize to `shortest_edge` - // while keeping the largest dimension <= `longest_edge` - if (shortest_edge !== undefined || longest_edge !== undefined) { - // http://opensourcehacker.com/2011/12/01/calculate-aspect-ratio-conserving-resize-for-images-in-javascript/ - // Try resize so that shortest edge is `shortest_edge` (target) - const shortResizeFactor = shortest_edge === undefined - ? 1 // If `shortest_edge` is not set, don't upscale - : Math.max(shortest_edge / srcWidth, shortest_edge / srcHeight); - - const newWidth = srcWidth * shortResizeFactor; - const newHeight = srcHeight * shortResizeFactor; - - // The new width and height might be greater than `longest_edge`, so - // we downscale again to ensure the largest dimension is `longest_edge` - const longResizeFactor = longest_edge === undefined - ? 1 // If `longest_edge` is not set, don't downscale - : Math.min(longest_edge / newWidth, longest_edge / newHeight); - - // To avoid certain floating point precision issues, we round to 2 decimal places - let finalWidth = Math.floor(Number((newWidth * longResizeFactor).toFixed(2))); - let finalHeight = Math.floor(Number((newHeight * longResizeFactor).toFixed(2))); - - if (this.size_divisibility !== undefined) { - [finalWidth, finalHeight] = enforce_size_divisibility([finalWidth, finalHeight], this.size_divisibility) - } - return [finalWidth, finalHeight]; - - } else if (size !== undefined && size.width !== undefined && size.height !== undefined) { - // If `width` and `height` are set, resize to those dimensions - - let newWidth = size.width; - let newHeight = size.height; - - // Custom for DPT models - if (this.config.keep_aspect_ratio && this.config.ensure_multiple_of) { - - // determine new height and width - let scale_height = newHeight / srcHeight; - let scale_width = newWidth / srcWidth; - - // scale as little as possible - if (Math.abs(1 - scale_width) < Math.abs(1 - scale_height)) { - // fit width - scale_height = scale_width; - } else { - // fit height - scale_width = scale_height; - } - - newHeight = constraint_to_multiple_of(scale_height * srcHeight, this.config.ensure_multiple_of); - newWidth = constraint_to_multiple_of(scale_width * srcWidth, this.config.ensure_multiple_of); - } - - return [newWidth, newHeight]; - - } else if (this.size_divisibility !== undefined) { - return enforce_size_divisibility([srcWidth, srcHeight], this.size_divisibility); - } else { - throw new Error(`Could not resize image due to unsupported \`this.size\` option in config: ${JSON.stringify(size)}`); - } - } - - /** - * Resizes the image. - * @param {RawImage} image The image to resize. - * @returns {Promise} The resized image. - */ - async resize(image) { - const [newWidth, newHeight] = this.get_resize_output_image_size(image, this.size); - return await image.resize(newWidth, newHeight, { - resample: this.resample, - }); - } - - /** - * @typedef {object} PreprocessedImage - * @property {HeightWidth} original_size The original size of the image. - * @property {HeightWidth} reshaped_input_size The reshaped input size of the image. - * @property {Tensor} pixel_values The pixel values of the preprocessed image. - */ - - /** - * Preprocesses the given image. - * - * @param {RawImage} image The image to preprocess. - * @param {Object} overrides The overrides for the preprocessing options. - * @returns {Promise} The preprocessed image. - */ - async preprocess(image, { - do_normalize = null, - do_pad = null, - do_convert_rgb = null, - do_convert_grayscale = null, - do_flip_channel_order = null, - } = {}) { - if (this.do_crop_margin) { - // NOTE: Specific to nougat processors. This is done before resizing, - // and can be interpreted as a pre-preprocessing step. - image = await this.crop_margin(image); - } - - const [srcWidth, srcHeight] = image.size; // original image size - - // Convert image to RGB if specified in config. - if (do_convert_rgb ?? this.do_convert_rgb) { - image = image.rgb(); - } else if (do_convert_grayscale) { - image = image.grayscale(); - } - - // TODO: - // For efficiency reasons, it might be best to merge the resize and center crop operations into one. - - // Resize all images - if (this.do_resize) { - image = await this.resize(image); - } - - // Resize the image using thumbnail method. - if (this.do_thumbnail) { - image = await this.thumbnail(image, this.size, this.resample); - } - - if (this.do_center_crop) { - - let crop_width; - let crop_height; - if (Number.isInteger(this.crop_size)) { - crop_width = this.crop_size; - crop_height = this.crop_size; - } else { - crop_width = this.crop_size.width; - crop_height = this.crop_size.height; - } - - image = await image.center_crop(crop_width, crop_height); - } - - /** @type {HeightWidth} */ - const reshaped_input_size = [image.height, image.width]; - - // NOTE: All pixel-level manipulation (i.e., modifying `pixelData`) - // occurs with data in the hwc format (height, width, channels), - // to emulate the behavior of the original Python code (w/ numpy). - let pixelData = Float32Array.from(image.data); - let imgDims = [image.height, image.width, image.channels]; - - if (this.do_rescale) { - this.rescale(pixelData); - } - - if (do_normalize ?? this.do_normalize) { - let image_mean = this.image_mean; - if (!Array.isArray(this.image_mean)) { - image_mean = new Array(image.channels).fill(image_mean); - } - - let image_std = this.image_std; - if (!Array.isArray(this.image_std)) { - image_std = new Array(image.channels).fill(image_mean); - } - - if (image_mean.length !== image.channels || image_std.length !== image.channels) { - throw new Error(`When set to arrays, the length of \`image_mean\` (${image_mean.length}) and \`image_std\` (${image_std.length}) must match the number of channels in the image (${image.channels}).`); - } - - for (let i = 0; i < pixelData.length; i += image.channels) { - for (let j = 0; j < image.channels; ++j) { - pixelData[i + j] = (pixelData[i + j] - image_mean[j]) / image_std[j]; - } - } - } - - // do padding after rescaling/normalizing - if (do_pad ?? this.do_pad) { - if (this.pad_size) { - const padded = this.pad_image(pixelData, [image.height, image.width, image.channels], this.pad_size); - [pixelData, imgDims] = padded; // Update pixel data and image dimensions - } else if (this.size_divisibility) { - const [paddedWidth, paddedHeight] = enforce_size_divisibility([imgDims[1], imgDims[0]], this.size_divisibility); - [pixelData, imgDims] = this.pad_image(pixelData, imgDims, { width: paddedWidth, height: paddedHeight }); - } - } - - if (do_flip_channel_order ?? this.do_flip_channel_order) { - if (imgDims[2] !== 3) { - throw new Error('Flipping channel order is only supported for RGB images.'); - } - // Convert RGB to BGR - for (let i = 0; i < pixelData.length; i += 3) { - const temp = pixelData[i]; - pixelData[i] = pixelData[i + 2]; - pixelData[i + 2] = temp; - } - } - - const pixel_values = new Tensor('float32', pixelData, imgDims) - .permute(2, 0, 1); // convert to channel dimension format (hwc -> chw) - - return { - original_size: [srcHeight, srcWidth], - reshaped_input_size: reshaped_input_size, - pixel_values, - } - } - - /** - * Calls the feature extraction process on an array of images, - * preprocesses each image, and concatenates the resulting - * features into a single Tensor. - * @param {RawImage[]} images The image(s) to extract features from. - * @param {...any} args Additional arguments. - * @returns {Promise} An object containing the concatenated pixel values (and other metadata) of the preprocessed images. - */ - async _call(images, ...args) { - if (!Array.isArray(images)) { - images = [images]; - } - /** @type {PreprocessedImage[]} */ - const imageData = await Promise.all(images.map(x => this.preprocess(x))); - - // Stack pixel values - const pixel_values = stack(imageData.map(x => x.pixel_values), 0); - - return { - pixel_values, - - // Original sizes of images - original_sizes: imageData.map(x => x.original_size), - - // Reshaped sizes of images, before padding or cropping - reshaped_input_sizes: imageData.map(x => x.reshaped_input_size), - } - } - -} - -export class SapiensFeatureExtractor extends ImageFeatureExtractor { - /** @type {typeof post_process_semantic_segmentation} */ - post_process_semantic_segmentation(...args) { - return post_process_semantic_segmentation(...args); - } -} -export class SegformerFeatureExtractor extends ImageFeatureExtractor { - /** @type {typeof post_process_semantic_segmentation} */ - post_process_semantic_segmentation(...args) { - return post_process_semantic_segmentation(...args); - } -} -export class PvtImageProcessor extends ImageFeatureExtractor { } -export class DPTFeatureExtractor extends ImageFeatureExtractor { } -export class DPTImageProcessor extends DPTFeatureExtractor { } // NOTE: extends DPTFeatureExtractor -export class BitImageProcessor extends ImageFeatureExtractor { } -export class GLPNFeatureExtractor extends ImageFeatureExtractor { } -export class CLIPFeatureExtractor extends ImageFeatureExtractor { } -export class CLIPImageProcessor extends CLIPFeatureExtractor { } // NOTE: extends CLIPFeatureExtractor -export class ChineseCLIPFeatureExtractor extends ImageFeatureExtractor { } -export class SiglipImageProcessor extends ImageFeatureExtractor { } -export class ConvNextFeatureExtractor extends ImageFeatureExtractor { - constructor(config) { - super(config); - - /** - * Percentage of the image to crop. Only has an effect if this.size < 384. - */ - this.crop_pct = this.config.crop_pct ?? (224 / 256); - } - - async resize(image) { - const shortest_edge = this.size?.shortest_edge; - if (shortest_edge === undefined) { - throw new Error(`Size dictionary must contain 'shortest_edge' key.`); - } - - if (shortest_edge < 384) { - // maintain same ratio, resizing shortest edge to shortest_edge/crop_pct - const resize_shortest_edge = Math.floor(shortest_edge / this.crop_pct); - - const [newWidth, newHeight] = this.get_resize_output_image_size(image, { - shortest_edge: resize_shortest_edge, - }); - - image = await image.resize(newWidth, newHeight, { - resample: this.resample, - }); - - // then crop to (shortest_edge, shortest_edge) - image = await image.center_crop(shortest_edge, shortest_edge); - } else { - // warping (no cropping) when evaluated at 384 or larger - image = await image.resize(shortest_edge, shortest_edge, { - resample: this.resample, - }); - } - - return image; - } -} -export class ConvNextImageProcessor extends ConvNextFeatureExtractor { } // NOTE extends ConvNextFeatureExtractor -export class ViTFeatureExtractor extends ImageFeatureExtractor { } -export class ViTImageProcessor extends ImageFeatureExtractor { } - -export class EfficientNetImageProcessor extends ImageFeatureExtractor { - constructor(config) { - super(config); - this.include_top = this.config.include_top ?? true; - if (this.include_top) { - this.image_std = this.image_std.map(x => x * x); - } - } -} - -export class MobileNetV1FeatureExtractor extends ImageFeatureExtractor { } -export class MobileNetV2FeatureExtractor extends ImageFeatureExtractor { } -export class MobileNetV3FeatureExtractor extends ImageFeatureExtractor { } -export class MobileNetV4FeatureExtractor extends ImageFeatureExtractor { } - -export class MobileViTFeatureExtractor extends ImageFeatureExtractor { } -export class MobileViTImageProcessor extends MobileViTFeatureExtractor { } // NOTE extends MobileViTFeatureExtractor -export class OwlViTFeatureExtractor extends ImageFeatureExtractor { - /** @type {typeof post_process_object_detection} */ - post_process_object_detection(...args) { - return post_process_object_detection(...args); - } -} -export class Owlv2ImageProcessor extends OwlViTFeatureExtractor { } // NOTE extends OwlViTFeatureExtractor - -export class RTDetrImageProcessor extends ImageFeatureExtractor { - /** @type {typeof post_process_object_detection} */ - post_process_object_detection(...args) { - return post_process_object_detection(...args); - } -} - -export class DeiTFeatureExtractor extends ImageFeatureExtractor { } -export class BeitFeatureExtractor extends ImageFeatureExtractor { } -export class DonutFeatureExtractor extends ImageFeatureExtractor { - pad_image(pixelData, imgDims, padSize, options = {}) { - const [imageHeight, imageWidth, imageChannels] = imgDims; - - let image_mean = this.image_mean; - if (!Array.isArray(this.image_mean)) { - image_mean = new Array(imageChannels).fill(image_mean); - } - - let image_std = this.image_std; - if (!Array.isArray(image_std)) { - image_std = new Array(imageChannels).fill(image_mean); - } - - const constant_values = image_mean.map((x, i) => - x / image_std[i]); - - return super.pad_image(pixelData, imgDims, padSize, { - center: true, - - // Since normalization is done after padding, we need to use certain constant values to ensure the same behaviour is observed. - // For more information, see https://github.com/huggingface/transformers/blob/main/src/transformers/models/donut/image_processing_donut.py#L433-L451 - constant_values: constant_values, - ...options, - }); - } -} -export class DonutImageProcessor extends DonutFeatureExtractor { } // NOTE extends DonutFeatureExtractor -export class NougatImageProcessor extends DonutFeatureExtractor { } // NOTE extends DonutFeatureExtractor - -/** - * @typedef {object} DetrFeatureExtractorResultProps - * @property {Tensor} pixel_mask - * @typedef {ImageFeatureExtractorResult & DetrFeatureExtractorResultProps} DetrFeatureExtractorResult - */ - -/** - * Detr Feature Extractor. - * - * @extends ImageFeatureExtractor - */ -export class DetrFeatureExtractor extends ImageFeatureExtractor { - /** - * Calls the feature extraction process on an array of images, preprocesses - * each image, and concatenates the resulting features into a single Tensor. - * @param {RawImage[]} images The image(s) to extract features from. - * @returns {Promise} An object containing the concatenated pixel values of the preprocessed images. - */ - async _call(images) { - const result = await super._call(images); - - // TODO support differently-sized images, for now assume all images are the same size. - // TODO support different mask sizes (not just 64x64) - // Currently, just fill pixel mask with 1s - const maskSize = [result.pixel_values.dims[0], 64, 64]; - const pixel_mask = full(maskSize, 1n); - - return { ...result, pixel_mask }; - } - - /** @type {typeof post_process_object_detection} */ - post_process_object_detection(...args) { - return post_process_object_detection(...args); - } - - /** @type {typeof post_process_panoptic_segmentation} */ - post_process_panoptic_segmentation(...args) { - return post_process_panoptic_segmentation(...args); - } - - post_process_instance_segmentation() { - // TODO - throw Error("Not implemented yet"); - } -} - -export class MaskFormerFeatureExtractor extends ImageFeatureExtractor { - - /** @type {typeof post_process_panoptic_segmentation} */ - post_process_panoptic_segmentation(...args) { - return post_process_panoptic_segmentation(...args); - } - - post_process_instance_segmentation() { - // TODO - throw Error("Not implemented yet"); - } -} - - -export class YolosFeatureExtractor extends ImageFeatureExtractor { - /** @type {typeof post_process_object_detection} */ - post_process_object_detection(...args) { - return post_process_object_detection(...args); - } -} - -/** - * @typedef {object} SamImageProcessorResult - * @property {Tensor} pixel_values - * @property {HeightWidth[]} original_sizes - * @property {HeightWidth[]} reshaped_input_sizes - * @property {Tensor} [input_points] - * @property {Tensor} [input_labels] - * @property {Tensor} [input_boxes] - */ - -export class SamImageProcessor extends ImageFeatureExtractor { - - /** - * - * @param {any} input_points - * @param {HeightWidth[]} original_sizes - * @param {HeightWidth[]} reshaped_input_sizes - * @returns {Tensor} - */ - reshape_input_points(input_points, original_sizes, reshaped_input_sizes, is_bounding_box = false) { - - // Make deep copy to avoid altering user's input - input_points = structuredClone(input_points); - let shape = calculateDimensions(input_points); - - // TODO: add support for 2D input_points - if (shape.length === 3) { - // Correct user's input - if (!is_bounding_box) { - shape = [1, ...shape]; - } - input_points = [input_points]; - } else if (shape.length !== 4) { - throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.") - } - - // Reshape input points - for (let i = 0; i < input_points.length; ++i) { // batch_size - let originalImageSize = original_sizes[i]; - let reshapedImageSize = reshaped_input_sizes[i]; - - let resizeFactors = [ - reshapedImageSize[0] / originalImageSize[0], - reshapedImageSize[1] / originalImageSize[1] - ] - - for (let j = 0; j < input_points[i].length; ++j) { // point_batch_size - for (let k = 0; k < input_points[i][j].length; ++k) { // nb_points_per_image - for (let w = 0; w < input_points[i][j][k].length; ++w) { // 2 or 4 - input_points[i][j][k][w] *= resizeFactors[w % 2]; - } - } - } - } - - return new Tensor( - 'float32', - Float32Array.from(input_points.flat(Infinity)), - shape - ) - - } - - /** - * - * @param {any} input_labels - * @param {Tensor} input_points - * @returns {Tensor} - */ - add_input_labels(input_labels, input_points) { - let shape = calculateDimensions(input_labels); - if (shape.length === 2) { - // Correct user's input - shape = [1, ...shape]; - input_labels = [input_labels]; - } else if (shape.length !== 3) { - throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.") - } - - if (shape.some((x, i) => x !== input_points.dims[i])) { - throw Error(`The first ${shape.length} dimensions of 'input_points' and 'input_labels' must be the same.`) - } - return new Tensor( - 'int64', - input_labels.flat(Infinity).map(BigInt), - shape, - ) - } - /** - * @param {any[]} images The URL(s) of the image(s) to extract features from. - * @param {Object} [options] Additional options for the processor. - * @param {any} [options.input_points=null] A 3D or 4D array, representing the input points provided by the user. - * - 3D: `[point_batch_size, nb_points_per_image, 2]`. In this case, `batch_size` is assumed to be 1. - * - 4D: `[batch_size, point_batch_size, nb_points_per_image, 2]`. - * @param {any} [options.input_labels=null] A 2D or 3D array, representing the input labels for the points, used by the prompt encoder to encode the prompt. - * - 2D: `[point_batch_size, nb_points_per_image]`. In this case, `batch_size` is assumed to be 1. - * - 3D: `[batch_size, point_batch_size, nb_points_per_image]`. - * @param {number[][][]} [options.input_boxes=null] A 3D array of shape `(batch_size, num_boxes, 4)`, representing the input boxes provided by the user. - * This is used by the prompt encoder to encode the prompt. Generally yields to much better generated masks. - * The processor will generate a tensor, with each dimension corresponding respectively to the image batch size, - * the number of boxes per image and the coordinates of the top left and botton right point of the box. - * In the order (`x1`, `y1`, `x2`, `y2`): - * - `x1`: the x coordinate of the top left point of the input box - * - `y1`: the y coordinate of the top left point of the input box - * - `x2`: the x coordinate of the bottom right point of the input box - * - `y2`: the y coordinate of the bottom right point of the input box - * @returns {Promise} - */ - async _call(images, { - input_points = null, - input_labels = null, - input_boxes = null - } = {}) { - // TODO allow user to use preprocessed images - /** @type {SamImageProcessorResult} */ - const processed = await super._call(images); - - if (input_points) { - processed.input_points = this.reshape_input_points( - input_points, processed.original_sizes, processed.reshaped_input_sizes - ); - } - - if (input_labels) { - if (!processed.input_points) { - throw Error("`input_points` must be provided if `input_labels` are provided.") - } - processed.input_labels = this.add_input_labels(input_labels, processed.input_points); - } - - if (input_boxes) { - processed.input_boxes = this.reshape_input_points( - input_boxes, processed.original_sizes, processed.reshaped_input_sizes, true, - ); - } - - return processed; - } - - /** - * Remove padding and upscale masks to the original image size. - * @param {Tensor} masks Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format. - * @param {[number, number][]} original_sizes The original sizes of each image before it was resized to the model's expected input shape, in (height, width) format. - * @param {[number, number][]} reshaped_input_sizes The size of each image as it is fed to the model, in (height, width) format. Used to remove padding. - * @param {Object} options Optional parameters for post-processing. - * @param {number} [options.mask_threshold] The threshold to use for binarizing the masks. - * @param {boolean} [options.binarize] Whether to binarize the masks. - * @param {Object} [options.pad_size] The target size the images were padded to before being passed to the model. If `null`, the target size is assumed to be the processor's `pad_size`. - * @param {number} [options.pad_size.height] The height the images were padded to. - * @param {number} [options.pad_size.width] The width the images were padded to. - * @returns {Promise} Batched masks in batch_size, num_channels, height, width) format, where (height, width) is given by original_size. - */ - async post_process_masks(masks, original_sizes, reshaped_input_sizes, { - mask_threshold = 0.0, - binarize = true, - pad_size = null, - } = {}) { - // masks: [1, 1, 3, 256, 256] - - const output_masks = []; - - pad_size = pad_size ?? this.pad_size; - - /** @type {[number, number]} */ - const target_image_size = [pad_size.height, pad_size.width]; - - for (let i = 0; i < original_sizes.length; ++i) { - const original_size = original_sizes[i]; - const reshaped_input_size = reshaped_input_sizes[i]; - - // Upscale mask to padded size - let interpolated_mask = (await interpolate_4d( - masks[i], - { mode: 'bilinear', size: target_image_size } - )); - - // Crop mask - interpolated_mask = interpolated_mask.slice(null, null, [0, reshaped_input_size[0]], [0, reshaped_input_size[1]]); - - // Downscale mask - interpolated_mask = (await interpolate_4d( - interpolated_mask, - { mode: 'bilinear', size: original_size } - )); - - if (binarize) { - const data = interpolated_mask.data; - const binarizedMaskData = new Uint8Array(data.length); - for (let i = 0; i < data.length; ++i) { - if (data[i] > mask_threshold) { - binarizedMaskData[i] = 1; - } - } - interpolated_mask = new Tensor( - 'bool', - binarizedMaskData, - interpolated_mask.dims - ) - } - - output_masks.push(interpolated_mask); - } - - return output_masks; - } - - /** - * Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer. - * @param {RawImage} image Input original image - * @param {number} target_size Target size of the resized image - * @param {Object} options Options for generating crop boxes - * @param {number} [options.crop_n_layers] If >0, mask prediction will be run again on crops of the image. - * Sets the number of layers to run, where each layer has 2**i_layer number of image crops. - * @param {number} [options.overlap_ratio] Sets the degree to which crops overlap. In the first crop layer, - * crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. - * @param {number} [options.points_per_crop] Number of points to sample from each crop. - * @param {number} [options.crop_n_points_downscale_factor] The number of points-per-side sampled in layer n is - * scaled down by crop_n_points_downscale_factor**n. - * @returns {Object} An object containing the crop boxes, number of points per crop, cropped images, and input labels. - */ - generate_crop_boxes(image, target_size, { - crop_n_layers = 0, - overlap_ratio = 512 / 1500, - points_per_crop = 32, - crop_n_points_downscale_factor = 1, - } = {}) { - // TODO: Implement - // return { crop_boxes, points_per_crop, cropped_images, input_labels } - } -} - -export class Swin2SRImageProcessor extends ImageFeatureExtractor { - pad_image(pixelData, imgDims, padSize, options = {}) { - // NOTE: In this case, `padSize` represents the size of the sliding window for the local attention. - // In other words, the image is padded so that its width and height are multiples of `padSize`. - const [imageHeight, imageWidth, imageChannels] = imgDims; - - return super.pad_image(pixelData, imgDims, { - // NOTE: For Swin2SR models, the original python implementation adds padding even when the image's width/height is already - // a multiple of `pad_size`. However, this is most likely a bug (PR: https://github.com/mv-lab/swin2sr/pull/19). - // For this reason, we only add padding when the image's width/height is not a multiple of `pad_size`. - width: imageWidth + (padSize - imageWidth % padSize) % padSize, - height: imageHeight + (padSize - imageHeight % padSize) % padSize, - }, { - mode: 'symmetric', - center: false, - constant_values: -1, - ...options, - }) - } -} - -export class VitMatteImageProcessor extends ImageFeatureExtractor { - /** - * Calls the feature extraction process on an array of images, preprocesses - * each image, and concatenates the resulting features into a single Tensor. - * @param {RawImage[]} images The image(s) to extract features from. - * @param {RawImage[]} trimaps The trimaps(s) to extract features from. - * @returns {Promise} An object containing the concatenated pixel values of the preprocessed images. - */ - async _call(images, trimaps) { - if (!Array.isArray(images)) { - images = [images]; - } - if (!Array.isArray(trimaps)) { - trimaps = [trimaps]; - } - - const imageData = await Promise.all(images.map(x => this.preprocess(x))); - const trimapData = await Promise.all(trimaps.map(x => this.preprocess(x, { - do_normalize: false, - do_convert_rgb: false, - do_convert_grayscale: true, - }))); - - - // Stack pixel values - const pixel_values = stack(imageData.map( - // Concatenate images and trimaps - (x, i) => cat([x.pixel_values, trimapData[i].pixel_values], 0) - ), 0); - - return { - pixel_values, - - // Original sizes of images - original_sizes: imageData.map(x => x.original_size), - - // Reshaped sizes of images, before padding or cropping - reshaped_input_sizes: imageData.map(x => x.reshaped_input_size), - } - } -} - -export class WhisperFeatureExtractor extends FeatureExtractor { - - constructor(config) { - super(config); - - // Prefer given `mel_filters` from preprocessor_config.json, or calculate them if they don't exist. - this.config.mel_filters ??= mel_filter_bank( - Math.floor(1 + this.config.n_fft / 2), // num_frequency_bins - this.config.feature_size, // num_mel_filters - 0.0, // min_frequency - 8000.0, // max_frequency - this.config.sampling_rate, // sampling_rate - "slaney", // norm - "slaney", // mel_scale - ); - - this.window = window_function(this.config.n_fft, 'hann'); - } - - /** - * Computes the log-Mel spectrogram of the provided audio waveform. - * @param {Float32Array|Float64Array} waveform The audio waveform to process. - * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. - */ - async _extract_fbank_features(waveform) { - const features = await spectrogram( - waveform, - this.window, // window - this.config.n_fft, // frame_length - this.config.hop_length, // hop_length - { - power: 2.0, - mel_filters: this.config.mel_filters, - log_mel: 'log10', - - // Custom - max_num_frames: this.config.nb_max_frames, // 3000 - } - ) - - const data = features.data; - const maxValue = max(data)[0]; - - for (let i = 0; i < data.length; ++i) { - data[i] = (Math.max(data[i], maxValue - 8.0) + 4.0) / 4.0; - } - - return features; - } - - /** - * Asynchronously extracts features from a given audio using the provided configuration. - * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. - * @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. - */ - async _call(audio) { - validate_audio_inputs(audio, 'WhisperFeatureExtractor'); - - let waveform; - if (audio.length > this.config.n_samples) { - console.warn( - "Attempting to extract features for audio longer than 30 seconds. " + - "If using a pipeline to extract transcript from a long audio clip, " + - "remember to specify `chunk_length_s` and/or `stride_length_s`." - ); - waveform = audio.slice(0, this.config.n_samples); - } else { - // pad with zeros - waveform = new Float32Array(this.config.n_samples); - waveform.set(audio); - } - - const features = await this._extract_fbank_features(waveform); - - return { - input_features: features.unsqueeze_(0) - }; - } -} - -export class Wav2Vec2FeatureExtractor extends FeatureExtractor { - - /** - * @param {Float32Array} input_values - * @returns {Float32Array} - */ - _zero_mean_unit_var_norm(input_values) { - // TODO support batch? - const sum = input_values.reduce((a, b) => a + b, 0); - const mean = sum / input_values.length; - const variance = input_values.reduce((a, b) => a + (b - mean) ** 2, 0) / input_values.length; - return input_values.map(x => (x - mean) / Math.sqrt(variance + 1e-7)); - } - - /** - * Asynchronously extracts features from a given audio using the provided configuration. - * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. - * @returns {Promise<{ input_values: Tensor; attention_mask: Tensor }>} A Promise resolving to an object containing the extracted input features and attention mask as Tensors. - */ - async _call(audio) { - validate_audio_inputs(audio, 'Wav2Vec2FeatureExtractor'); - - if (audio instanceof Float64Array) { - audio = new Float32Array(audio); - } - - let input_values = audio; - - // zero-mean and unit-variance normalization - if (this.config.do_normalize) { - input_values = this._zero_mean_unit_var_norm(input_values); - } - - // TODO: allow user to pass in attention mask - const shape = [1, input_values.length]; - return { - input_values: new Tensor('float32', input_values, shape), - attention_mask: new Tensor('int64', new BigInt64Array(input_values.length).fill(1n), shape) - }; - } -} - -export class SeamlessM4TFeatureExtractor extends FeatureExtractor { - - constructor(config) { - super(config); - - const sampling_rate = this.config.sampling_rate; - const mel_filters = mel_filter_bank( - 256, // num_frequency_bins - this.config.num_mel_bins, // num_mel_filters - 20, // min_frequency - Math.floor(sampling_rate / 2), // max_frequency - sampling_rate, // sampling_rate - null, // norm - "kaldi", // mel_scale - true, // triangularize_in_mel_space - ); - - // Do padding: - for (let i = 0; i < mel_filters.length; ++i) { - mel_filters[i].push(0); - } - this.mel_filters = mel_filters; - - this.window = window_function(400, 'povey', { - periodic: false, - }) - } - - /** - * Computes the log-Mel spectrogram of the provided audio waveform. - * @param {Float32Array|Float64Array} waveform The audio waveform to process. - * @param {number} max_length The maximum number of frames to return. - * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. - */ - async _extract_fbank_features(waveform, max_length) { - // NOTE: We don't pad/truncate since that is passed in as `max_num_frames` - - // Kaldi compliance: 16-bit signed integers - // 32768 == 2 ** 15 - waveform = waveform.map((/** @type {number} */ x) => x * 32768) - - return spectrogram( - waveform, - this.window, // window - 400, // frame_length - 160, // hop_length - { - fft_length: 512, - power: 2.0, - center: false, - preemphasis: 0.97, - mel_filters: this.mel_filters, - log_mel: 'log', - mel_floor: 1.192092955078125e-07, - remove_dc_offset: true, - - // Custom - max_num_frames: max_length, - transpose: true, - } - ) - } - - /** - * Asynchronously extracts features from a given audio using the provided configuration. - * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. - * @param {Object} options Optional parameters for feature extraction. - * @param {boolean} [options.padding=true] Whether to pad the sequence to a multiple of `pad_to_multiple_of`. - * @param {number} [options.pad_to_multiple_of=2] The number to pad the sequence to a multiple of. - * @param {boolean} [options.do_normalize_per_mel_bins=true] Whether or not to zero-mean unit-variance normalize the input per mel-channel. - * @param {boolean} [options.return_attention_mask=true] Whether to return the attention mask. - * @returns {Promise<{ input_features: Tensor, attention_mask?: Tensor }>} A Promise resolving to an object containing the extracted input features and attention masks as Tensors. - */ - async _call(audio, { - padding = true, - pad_to_multiple_of = 2, - do_normalize_per_mel_bins = true, - return_attention_mask = true, - } = {}) { - validate_audio_inputs(audio, 'SeamlessM4TFeatureExtractor'); - - let features = await this._extract_fbank_features(audio, this.config.max_length); - - if (do_normalize_per_mel_bins) { - const [num_features, feature_size] = features.dims; - const data = features.data; - for (let i = 0; i < feature_size; ++i) { - let sum = 0; - for (let j = 0; j < num_features; ++j) { - sum += data[j * feature_size + i]; - } - - const mean = sum / num_features; - - let variance = 0; - for (let j = 0; j < num_features; ++j) { - variance += (data[j * feature_size + i] - mean) ** 2; - } - variance /= num_features - 1; // NOTE: We use ddof=1 - - const std = Math.sqrt(variance + 1e-7); - for (let j = 0; j < num_features; ++j) { - const index = j * feature_size + i; - data[index] = (data[index] - mean) / std; - } - } - } - - let padded_attention_mask; - if (padding) { - const [num_frames, num_channels] = features.dims; - const data = /** @type {Float32Array} */(features.data); - - const pad_size = num_frames % pad_to_multiple_of; - if (pad_size > 0) { - const padded_data = new Float32Array(num_channels * (num_frames + pad_size)); - padded_data.set(data) - padded_data.fill(this.config.padding_value, data.length) - - const numPaddedFrames = num_frames + pad_size; - features = new Tensor( - features.type, - padded_data, - [numPaddedFrames, num_channels], - ) - - if (return_attention_mask) { - padded_attention_mask = new Tensor( - 'int64', - new BigInt64Array(numPaddedFrames), - [1, numPaddedFrames], - ) - padded_attention_mask.data.fill(1n, 0, num_frames); - } - } - } - - const [num_frames, num_channels] = features.dims; - - const stride = this.config.stride; - const remainder = num_frames % stride; - if (remainder !== 0) { - throw new Error(`The number of frames (${num_frames}) must be a multiple of the stride (${stride}).`) - } - - const input_features = features.view( - 1, - Math.floor(num_frames / stride), - num_channels * stride, - ); - - const result = { input_features } - - if (return_attention_mask) { - const reshapedNumFrames = input_features.dims[1]; - - const attention_mask_data = new BigInt64Array(reshapedNumFrames); - - if (padded_attention_mask) { - const padded_attention_mask_data = padded_attention_mask.data; - for (let i = 1, j = 0; i < num_frames; i += stride, ++j) { - attention_mask_data[j] = padded_attention_mask_data[i]; - } - } else { - attention_mask_data.fill(1n); - } - result.attention_mask = new Tensor( - 'int64', - attention_mask_data, - [1, reshapedNumFrames], - ); - } - - return result; - } -} - -export class ASTFeatureExtractor extends FeatureExtractor { - - - constructor(config) { - super(config); - - const sampling_rate = this.config.sampling_rate; - const mel_filters = mel_filter_bank( - 256, // num_frequency_bins - this.config.num_mel_bins, // num_mel_filters - 20, // min_frequency - Math.floor(sampling_rate / 2), // max_frequency - sampling_rate, // sampling_rate - null, // norm - "kaldi", // mel_scale - true, // triangularize_in_mel_space - ); - - // Do padding: - for (let i = 0; i < mel_filters.length; ++i) { - mel_filters[i].push(0); - } - this.mel_filters = mel_filters; - - this.window = window_function(400, 'hann', { - periodic: false, - }) - - this.mean = this.config.mean; - this.std = this.config.std; - } - - /** - * Computes the log-Mel spectrogram of the provided audio waveform. - * @param {Float32Array|Float64Array} waveform The audio waveform to process. - * @param {number} max_length The maximum number of frames to return. - * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. - */ - async _extract_fbank_features(waveform, max_length) { - // NOTE: We don't pad/truncate since that is passed in as `max_num_frames` - return spectrogram( - waveform, - this.window, // window - 400, // frame_length - 160, // hop_length - { - fft_length: 512, - power: 2.0, - center: false, - preemphasis: 0.97, - mel_filters: this.mel_filters, - log_mel: 'log', - mel_floor: 1.192092955078125e-07, - remove_dc_offset: true, - - // Custom - max_num_frames: max_length, - transpose: true, - } - ) - } - - - /** - * Asynchronously extracts features from a given audio using the provided configuration. - * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. - * @returns {Promise<{ input_values: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. - */ - async _call(audio) { - validate_audio_inputs(audio, 'ASTFeatureExtractor'); - - const features = await this._extract_fbank_features(audio, this.config.max_length); - if (this.config.do_normalize) { - // Normalize the input audio spectrogram to have mean=0, std=0.5 - const denom = this.std * 2; - const features_data = features.data; - for (let i = 0; i < features_data.length; ++i) { - features_data[i] = (features_data[i] - this.mean) / denom; - } - } - - return { - input_values: features.unsqueeze_(0) - }; - } -} - -export class ClapFeatureExtractor extends FeatureExtractor { - - constructor(config) { - super(config); - - this.mel_filters = mel_filter_bank( - this.config.nb_frequency_bins, // num_frequency_bins - this.config.feature_size, // num_mel_filters - this.config.frequency_min, // min_frequency - this.config.frequency_max, // max_frequency - this.config.sampling_rate, // sampling_rate - null, // norm - "htk", // mel_scale - ); - - this.mel_filters_slaney = mel_filter_bank( - this.config.nb_frequency_bins, // num_frequency_bins - this.config.feature_size, // num_mel_filters - this.config.frequency_min, // min_frequency - this.config.frequency_max, // max_frequency - this.config.sampling_rate, // sampling_rate - "slaney", // norm - "slaney", // mel_scale - ); - - this.window = window_function(this.config.fft_window_size, 'hann') - - } - - - /** - * Extracts the mel spectrogram and prepares it for the mode based on the `truncation` and `padding` arguments. - * - * Four different path are possible: - * - `truncation="fusion"` and the length of the waveform is greater than the max length: the mel spectrogram - * will be computed on the entire audio. 3 random crops and a dowsampled version of the full mel spectrogram - * are then stacked together. They will later be used for `feature_fusion`. - * - `truncation="rand_trunc"` and the length of the waveform is smaller than the max length: the audio is - * padded based on `padding`. - * - `truncation="fusion"` and the length of the waveform is smaller than the max length: the audio is padded - * based on `padding`, and is repeated `4` times. - * - `truncation="rand_trunc"` and the length of the waveform is greater than the max length: the mel - * spectrogram will be computed on a random crop of the waveform. - * - * @param {Float32Array|Float64Array} waveform The input waveform. - * @param {number} max_length The maximum length of the waveform. - * @param {string} truncation The truncation strategy to use. - * @param {string} padding The padding strategy to use. - * @returns {Promise} An object containing the mel spectrogram data as a Float32Array, its dimensions as an array of numbers, and a boolean indicating whether the waveform was longer than the max length. - * @private - */ - async _get_input_mel(waveform, max_length, truncation, padding) { - - /** @type {Tensor} */ - let input_mel; - let longer = false; - const diff = waveform.length - max_length; - if (diff > 0) { - if (truncation === 'rand_trunc') { - longer = true; - const idx = Math.floor(Math.random() * (diff + 1)); - waveform = waveform.subarray(idx, idx + max_length); - - input_mel = await this._extract_fbank_features(waveform, this.mel_filters_slaney, this.config.nb_max_samples); - } else { - // TODO implement fusion strategy - throw new Error(`Truncation strategy "${truncation}" not implemented`) - } - } else { - if (diff < 0) { - let padded = new Float64Array(max_length); // already padded with zeros - padded.set(waveform); - - if (padding === 'repeat') { - for (let i = waveform.length; i < max_length; i += waveform.length) { - padded.set(waveform.subarray(0, Math.min(waveform.length, max_length - i)), i); - } - } else if (padding === 'repeatpad') { - for (let i = waveform.length; i < -diff; i += waveform.length) { - padded.set(waveform, i); - } - } - waveform = padded; - } - - if (truncation === 'fusion') { - throw new Error(`Truncation strategy "${truncation}" not implemented`) - } - - input_mel = await this._extract_fbank_features(waveform, this.mel_filters_slaney, this.config.nb_max_samples); - } - - return input_mel.unsqueeze_(0); - } - - /** - * Compute the log-mel spectrogram of the provided `waveform` using the Hann window. - * In CLAP, two different filter banks are used depending on the truncation pattern: - * - `self.mel_filters`: they correspond to the default parameters of `torchaudio` which can be obtained from - * calling `torchaudio.transforms.MelSpectrogram().mel_scale.fb`. These filters are used when `truncation` - * is set to `"fusion"`. - * - `self.mel_filteres_slaney` : they correspond to the default parameters of `librosa` which used - * `librosa.filters.mel` when computing the mel spectrogram. These filters were only used in the original - * implementation when the truncation mode is not `"fusion"`. - * - * @param {Float32Array|Float64Array} waveform The audio waveform to process. - * @param {number[][]} mel_filters The mel filters to use. - * @param {number} [max_length=null] The maximum number of frames to return. - * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. - */ - async _extract_fbank_features(waveform, mel_filters, max_length = null) { - // NOTE: We don't pad/truncate since that is passed in as `max_num_frames` - return spectrogram( - waveform, - this.window, // window - this.config.fft_window_size, // frame_length - this.config.hop_length, // hop_length - { - power: 2.0, - mel_filters, - log_mel: 'dB', - - // Custom - max_num_frames: max_length, - do_pad: false, - transpose: true, - } - ) - } - - - /** - * Asynchronously extracts features from a given audio using the provided configuration. - * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. - * @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. - */ - async _call(audio, { - max_length = null, - } = {}) { - validate_audio_inputs(audio, 'ClapFeatureExtractor'); - - // convert to mel spectrogram, truncate and pad if needed. - const padded_inputs = await this._get_input_mel( - audio, - max_length ?? this.config.nb_max_samples, - this.config.truncation, - this.config.padding, - ); - - return { - input_features: padded_inputs.unsqueeze_(0), - } - } -} - - -export class PyAnnoteFeatureExtractor extends FeatureExtractor { - /** - * Asynchronously extracts features from a given audio using the provided configuration. - * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. - * @returns {Promise<{ input_values: Tensor; }>} The extracted input features. - */ - async _call(audio) { - validate_audio_inputs(audio, 'PyAnnoteFeatureExtractor'); - - if (audio instanceof Float64Array) { - audio = new Float32Array(audio); - } - - const shape = [ - 1, /* batch_size */ - 1, /* num_channels */ - audio.length, /* num_samples */ - ]; - return { - input_values: new Tensor('float32', audio, shape), - }; - } - - /** - * NOTE: Can return fractional values. `Math.ceil` will ensure correct value. - * @param {number} samples The number of frames in the audio. - * @returns {number} The number of frames in the audio. - */ - samples_to_frames(samples) { - return ((samples - this.config.offset) / this.config.step); - } - - /** - * Post-processes the speaker diarization logits output by the model. - * @param {Tensor} logits The speaker diarization logits output by the model. - * @param {number} num_samples Number of samples in the input audio. - * @returns {Array>} The post-processed speaker diarization results. - */ - post_process_speaker_diarization(logits, num_samples) { - const ratio = ( - num_samples / this.samples_to_frames(num_samples) - ) / this.config.sampling_rate; - - const results = []; - for (const scores of logits.tolist()) { - const accumulated_segments = []; - - let current_speaker = -1; - for (let i = 0; i < scores.length; ++i) { - const probabilities = softmax(scores[i]); - const [score, id] = max(probabilities); - const [start, end] = [i, i + 1]; - - if (id !== current_speaker) { - // Speaker has changed - current_speaker = id; - accumulated_segments.push({ id, start, end, score }); - } else { - // Continue the current segment - accumulated_segments.at(-1).end = end; - accumulated_segments.at(-1).score += score; - } - } - - results.push(accumulated_segments.map( - // Convert frame-space to time-space - // and compute the confidence - ({ id, start, end, score }) => ({ - id, - start: start * ratio, - end: end * ratio, - confidence: score / (end - start), - }) - )); - } - return results; - } - -} - -export class WeSpeakerFeatureExtractor extends FeatureExtractor { - - constructor(config) { - super(config); - - const sampling_rate = this.config.sampling_rate; - const mel_filters = mel_filter_bank( - 256, // num_frequency_bins - this.config.num_mel_bins, // num_mel_filters - 20, // min_frequency - Math.floor(sampling_rate / 2), // max_frequency - sampling_rate, // sampling_rate - null, // norm - "kaldi", // mel_scale - true, // triangularize_in_mel_space - ); - - // Do padding: - for (let i = 0; i < mel_filters.length; ++i) { - mel_filters[i].push(0); - } - this.mel_filters = mel_filters; - - this.window = window_function(400, 'hamming', { - periodic: false, - }) - this.min_num_frames = this.config.min_num_frames; - } - - /** - * Computes the log-Mel spectrogram of the provided audio waveform. - * @param {Float32Array|Float64Array} waveform The audio waveform to process. - * @returns {Promise} An object containing the log-Mel spectrogram data as a Float32Array and its dimensions as an array of numbers. - */ - async _extract_fbank_features(waveform) { - // Kaldi compliance: 16-bit signed integers - // 32768 == 2 ** 15 - waveform = waveform.map((/** @type {number} */ x) => x * 32768) - - return spectrogram( - waveform, - this.window, // window - 400, // frame_length - 160, // hop_length - { - fft_length: 512, - power: 2.0, - center: false, - preemphasis: 0.97, - mel_filters: this.mel_filters, - log_mel: 'log', - mel_floor: 1.192092955078125e-07, - remove_dc_offset: true, - - // Custom - transpose: true, - min_num_frames: this.min_num_frames, - } - ) - } - - - /** - * Asynchronously extracts features from a given audio using the provided configuration. - * @param {Float32Array|Float64Array} audio The audio data as a Float32Array/Float64Array. - * @returns {Promise<{ input_features: Tensor }>} A Promise resolving to an object containing the extracted input features as a Tensor. - */ - async _call(audio) { - validate_audio_inputs(audio, 'WeSpeakerFeatureExtractor'); - - const features = (await this._extract_fbank_features(audio)).unsqueeze_(0); - - if (this.config.fbank_centering_span === null) { - // center features with global average - const meanData = /** @type {Float32Array} */ (features.mean(1).data); - const featuresData = /** @type {Float32Array} */(features.data); - const [batch_size, num_frames, feature_size] = features.dims; - - for (let i = 0; i < batch_size; ++i) { - const offset1 = i * num_frames * feature_size; - const offset2 = i * feature_size; - for (let j = 0; j < num_frames; ++j) { - const offset3 = offset1 + j * feature_size; - for (let k = 0; k < feature_size; ++k) { - featuresData[offset3 + k] -= meanData[offset2 + k]; - } - } - } - } - - return { - input_features: features - }; - } -} - -export class SpeechT5FeatureExtractor extends FeatureExtractor { } - -/** - * Represents a Processor that extracts features from an input. - * @extends Callable - */ -export class Processor extends Callable { - /** - * Creates a new Processor with the given feature extractor. - * @param {FeatureExtractor} feature_extractor The function used to extract features from the input. - */ - constructor(feature_extractor) { - super(); - this.feature_extractor = feature_extractor; - // TODO use tokenizer here? - } - - /** - * Calls the feature_extractor function with the given input. - * @param {any} input The input to extract features from. - * @param {...any} args Additional arguments. - * @returns {Promise} A Promise that resolves with the extracted features. - */ - async _call(input, ...args) { - return await this.feature_extractor(input, ...args); - } -} - -export class SamProcessor extends Processor { - /** - * @borrows SamImageProcessor#_call as _call - */ - async _call(...args) { - return await this.feature_extractor(...args); - } - - /** - * @borrows SamImageProcessor#post_process_masks as post_process_masks - */ - post_process_masks(...args) { - // @ts-ignore - return this.feature_extractor.post_process_masks(...args); - } - /** - * @borrows SamImageProcessor#reshape_input_points as reshape_input_points - */ - reshape_input_points(...args) { - // @ts-ignore - return this.feature_extractor.reshape_input_points(...args); - } -} - -/** - * Represents a WhisperProcessor that extracts features from an audio input. - * @extends Processor - */ -export class WhisperProcessor extends Processor { - /** - * Calls the feature_extractor function with the given audio input. - * @param {any} audio The audio input to extract features from. - * @returns {Promise} A Promise that resolves with the extracted features. - */ - async _call(audio) { - return await this.feature_extractor(audio) - } -} - - -export class Wav2Vec2ProcessorWithLM extends Processor { - /** - * Calls the feature_extractor function with the given audio input. - * @param {any} audio The audio input to extract features from. - * @returns {Promise} A Promise that resolves with the extracted features. - */ - async _call(audio) { - return await this.feature_extractor(audio) - } -} - -export class PyAnnoteProcessor extends Processor { - /** - * Calls the feature_extractor function with the given audio input. - * @param {any} audio The audio input to extract features from. - * @returns {Promise} A Promise that resolves with the extracted features. - */ - async _call(audio) { - return await this.feature_extractor(audio) - } - - post_process_speaker_diarization(...args) { - // @ts-ignore - return this.feature_extractor.post_process_speaker_diarization(...args); - } - -} - -export class SpeechT5Processor extends Processor { - /** - * Calls the feature_extractor function with the given input. - * @param {any} input The input to extract features from. - * @returns {Promise} A Promise that resolves with the extracted features. - */ - async _call(input) { - return await this.feature_extractor(input) - } -} - -export class OwlViTProcessor extends Processor { } - -export class Florence2Processor extends Processor { - constructor(feature_extractor) { - super(feature_extractor); - - const { - tasks_answer_post_processing_type, - task_prompts_without_inputs, - task_prompts_with_input, - } = feature_extractor.config; - - /** @type {Map} */ - this.tasks_answer_post_processing_type = new Map(Object.entries(tasks_answer_post_processing_type ?? {})); - - /** @type {Map} */ - this.task_prompts_without_inputs = new Map(Object.entries(task_prompts_without_inputs ?? {})); - - /** @type {Map} */ - this.task_prompts_with_input = new Map(Object.entries(task_prompts_with_input ?? {})); - - this.regexes = { - quad_boxes: /(.+?)/gm, - bboxes: /([^<]+)?/gm, - } - this.size_per_bin = 1000; - } - - /** - * Helper function to construct prompts from input texts - * @param {string|string[]} text - * @returns {string[]} - */ - construct_prompts(text) { - if (typeof text === 'string') { - text = [text]; - } - - const prompts = []; - for (const t of text) { - // 1. fixed task prompts without additional inputs - if (this.task_prompts_without_inputs.has(t)) { - prompts.push(this.task_prompts_without_inputs.get(t)); - } - // 2. task prompts with additional inputs - else { - for (const [task, prompt] of this.task_prompts_with_input) { - if (t.includes(task)) { - prompts.push(prompt.replaceAll('{input}', t).replaceAll(task, '')); - break; - } - } - - // 3. default prompt - if (prompts.length !== text.length) { - prompts.push(t); - } - } - } - return prompts; - } - - /** - * Post-process the output of the model to each of the task outputs. - * @param {string} text The text to post-process. - * @param {string} task The task to post-process the text for. - * @param {[number, number]} image_size The size of the image. height x width. - */ - post_process_generation(text, task, image_size) { - const task_answer_post_processing_type = this.tasks_answer_post_processing_type.get(task) ?? 'pure_text'; - - // remove the special tokens - text = text.replaceAll('', '').replaceAll('', ''); - - let final_answer; - switch (task_answer_post_processing_type) { - case 'pure_text': - final_answer = text; - break; - - case 'description_with_bboxes': - case 'bboxes': - case 'phrase_grounding': - case 'ocr': - const key = task_answer_post_processing_type === 'ocr' ? 'quad_boxes' : 'bboxes'; - const matches = text.matchAll(this.regexes[key]); - const labels = []; - const items = []; - for (const [_, label, ...locations] of matches) { - // Push new label, or duplicate the last label - labels.push(label ? label.trim() : labels.at(-1) ?? ''); - items.push(locations.map((x, i) => - // NOTE: Add 0.5 to use the center position of the bin as the coordinate. - (Number(x) + 0.5) / this.size_per_bin * image_size[i % 2]) - ); - } - final_answer = { labels, [key]: items }; - break; - - default: - throw new Error(`Task "${task}" (of type "${task_answer_post_processing_type}") not yet implemented.`); - } - - return { [task]: final_answer } - } -} - -////////////////////////////////////////////////// -/** - * Helper class which is used to instantiate pretrained processors with the `from_pretrained` function. - * The chosen processor class is determined by the type specified in the processor config. - * - * **Example:** Load a processor using `from_pretrained`. - * ```javascript - * let processor = await AutoProcessor.from_pretrained('openai/whisper-tiny.en'); - * ``` - * - * **Example:** Run an image through a processor. - * ```javascript - * let processor = await AutoProcessor.from_pretrained('Xenova/clip-vit-base-patch16'); - * let image = await RawImage.read('https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/football-match.jpg'); - * let image_inputs = await processor(image); - * // { - * // "pixel_values": { - * // "dims": [ 1, 3, 224, 224 ], - * // "type": "float32", - * // "data": Float32Array [ -1.558687686920166, -1.558687686920166, -1.5440893173217773, ... ], - * // "size": 150528 - * // }, - * // "original_sizes": [ - * // [ 533, 800 ] - * // ], - * // "reshaped_input_sizes": [ - * // [ 224, 224 ] - * // ] - * // } - * ``` - */ -export class AutoProcessor { - static FEATURE_EXTRACTOR_CLASS_MAPPING = { - ImageFeatureExtractor, - WhisperFeatureExtractor, - ViTFeatureExtractor, - MobileViTFeatureExtractor, - MobileViTImageProcessor, - MobileNetV1FeatureExtractor, - MobileNetV2FeatureExtractor, - MobileNetV3FeatureExtractor, - MobileNetV4FeatureExtractor, - OwlViTFeatureExtractor, - Owlv2ImageProcessor, - CLIPFeatureExtractor, - CLIPImageProcessor, - Florence2Processor, - ChineseCLIPFeatureExtractor, - SiglipImageProcessor, - ConvNextFeatureExtractor, - ConvNextImageProcessor, - SegformerFeatureExtractor, - SapiensFeatureExtractor, - BitImageProcessor, - DPTImageProcessor, - DPTFeatureExtractor, - PvtImageProcessor, - GLPNFeatureExtractor, - BeitFeatureExtractor, - DeiTFeatureExtractor, - DetrFeatureExtractor, - RTDetrImageProcessor, - MaskFormerFeatureExtractor, - YolosFeatureExtractor, - DonutFeatureExtractor, - DonutImageProcessor, - NougatImageProcessor, - EfficientNetImageProcessor, - - ViTImageProcessor, - VitMatteImageProcessor, - SamImageProcessor, - Swin2SRImageProcessor, - Wav2Vec2FeatureExtractor, - SeamlessM4TFeatureExtractor, - SpeechT5FeatureExtractor, - ASTFeatureExtractor, - ClapFeatureExtractor, - PyAnnoteFeatureExtractor, - WeSpeakerFeatureExtractor, - } - - static PROCESSOR_CLASS_MAPPING = { - WhisperProcessor, - Wav2Vec2ProcessorWithLM, - PyAnnoteProcessor, - SamProcessor, - SpeechT5Processor, - OwlViTProcessor, - Florence2Processor, - } - - /** - * Instantiate one of the processor classes of the library from a pretrained model. - * - * The processor class to instantiate is selected based on the `feature_extractor_type` property of the config object - * (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible) - * - * @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either: - * - A string, the *model id* of a pretrained processor hosted inside a model repo on huggingface.co. - * Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a - * user or organization name, like `dbmdz/bert-base-german-cased`. - * - A path to a *directory* containing processor files, e.g., `./my_model_directory/`. - * @param {import('./utils/hub.js').PretrainedOptions} options Additional options for loading the processor. - * - * @returns {Promise} A new instance of the Processor class. - */ - static async from_pretrained(pretrained_model_name_or_path, { - progress_callback = null, - config = null, - cache_dir = null, - local_files_only = false, - revision = 'main', - } = {}) { - - let preprocessorConfig = config ?? await getModelJSON(pretrained_model_name_or_path, 'preprocessor_config.json', true, { - progress_callback, - config, - cache_dir, - local_files_only, - revision, - }) - - // Determine feature extractor class - // TODO: Ensure backwards compatibility with old configs - let key = preprocessorConfig.feature_extractor_type ?? preprocessorConfig.image_processor_type; - let feature_extractor_class = this.FEATURE_EXTRACTOR_CLASS_MAPPING[key]; - - if (!feature_extractor_class) { - if (preprocessorConfig.size !== undefined) { - // Assume ImageFeatureExtractor - console.warn(`Feature extractor type "${key}" not found, assuming ImageFeatureExtractor due to size parameter in config.`); - feature_extractor_class = ImageFeatureExtractor; - } else { - throw new Error(`Unknown Feature Extractor type: ${key}`); - } - } - - // If no associated processor class, use default - let processor_class = this.PROCESSOR_CLASS_MAPPING[preprocessorConfig.processor_class] ?? Processor; - - // Instantiate processor and feature extractor - let feature_extractor = new feature_extractor_class(preprocessorConfig); - return new processor_class(feature_extractor); - } -} -////////////////////////////////////////////////// - diff --git a/src/tokenizers.js b/src/tokenizers.js index 48a26b636..e206fa411 100644 --- a/src/tokenizers.js +++ b/src/tokenizers.js @@ -4257,6 +4257,8 @@ export class VitsTokenizer extends PreTrainedTokenizer { export class CohereTokenizer extends PreTrainedTokenizer { } +export class MgpstrTokenizer extends PreTrainedTokenizer { } + /** * Helper class which is used to instantiate pretrained tokenizers with the `from_pretrained` function. * The chosen tokenizer class is determined by the type specified in the tokenizer config. @@ -4310,6 +4312,7 @@ export class AutoTokenizer { GemmaTokenizer, Grok1Tokenizer, CohereTokenizer, + MgpstrTokenizer, // Base case: PreTrainedTokenizer, diff --git a/src/transformers.js b/src/transformers.js index be7ad176e..052c8c829 100644 --- a/src/transformers.js +++ b/src/transformers.js @@ -12,10 +12,10 @@ */ export { env } from './env.js'; + export * from './pipelines.js'; export * from './models.js'; export * from './tokenizers.js'; -export * from './processors.js'; export * from './configs.js'; export * from './utils/audio.js'; @@ -23,6 +23,19 @@ export * from './utils/image.js'; export * from './utils/tensor.js'; export * from './utils/maths.js'; + +export { FeatureExtractor } from './base/feature_extraction_utils.js'; +export * from './models/feature_extractors.js'; +export * from './models/auto/feature_extraction_auto.js'; + +export { ImageProcessor } from './base/image_processors_utils.js'; +export * from './models/image_processors.js'; +export * from './models/auto/image_processing_auto.js'; + +export { Processor } from './base/processing_utils.js'; +export * from './models/processors.js'; +export * from './models/auto/processing_auto.js'; + export * from './generation/streamers.js'; export * from './generation/stopping_criteria.js'; - +export * from './generation/logits_process.js'; diff --git a/src/utils/constants.js b/src/utils/constants.js index 9d0e9ee42..ed456a56b 100644 --- a/src/utils/constants.js +++ b/src/utils/constants.js @@ -1,2 +1,9 @@ -export const GITHUB_ISSUE_URL = 'https://github.com/huggingface/transformers.js/issues/new/choose'; \ No newline at end of file +export const GITHUB_ISSUE_URL = 'https://github.com/huggingface/transformers.js/issues/new/choose'; + +export const CONFIG_NAME = "config.json" +export const FEATURE_EXTRACTOR_NAME = "preprocessor_config.json" +export const IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME +export const PROCESSOR_NAME = "processor_config.json" +export const CHAT_TEMPLATE_NAME = "chat_template.json" +export const GENERATION_CONFIG_NAME = "generation_config.json" diff --git a/tests/models/roberta/tokenization.js b/tests/models/roberta/tokenization.js index 458430878..96756f9ca 100644 --- a/tests/models/roberta/tokenization.js +++ b/tests/models/roberta/tokenization.js @@ -696,9 +696,9 @@ export const TEST_CONFIG = { "onnx-community/camembertv2-base": { SIMPLE: { text: BASE_TEST_STRINGS.SIMPLE, - tokens: ['How', 'are', 'you', 'doi', '##ng', '?'], + tokens: ["How", "are", "you", "doi", "##ng", "?"], ids: [1, 14473, 9556, 10577, 6471, 9274, 38, 2], decoded: "[CLS] How are you doing? [SEP]", - } + }, }, }; diff --git a/tests/processors.test.js b/tests/processors.test.js index caf1ddf86..53f94bcaa 100644 --- a/tests/processors.test.js +++ b/tests/processors.test.js @@ -1,4 +1,4 @@ -import { env, AutoProcessor, RawImage } from "../src/transformers.js"; +import { env, AutoProcessor, AutoImageProcessor, RawImage } from "../src/transformers.js"; import { init, MAX_TEST_EXECUTION_TIME } from "./init.js"; import { compare } from "./test_utils.js"; @@ -44,9 +44,11 @@ const MODELS = { dinov2: "Xenova/dinov2-small-imagenet1k-1-layer", // efficientnet: 'Xenova/efficientnet-b0', florence2: "Xenova/tiny-random-Florence2ForConditionalGeneration", + qwen2_vl: "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration", }; const TEST_IMAGES = { + white_image: "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/white-image.png", pattern_3x3: "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/pattern_3x3.png", pattern_3x5: "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/pattern_3x5.png", checkerboard_8x8: "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/checkerboard_8x8.png", @@ -73,7 +75,7 @@ describe("Processors", () => { it( MODELS.swin2sr, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.swin2sr); + const processor = await AutoImageProcessor.from_pretrained(MODELS.swin2sr); { // Pad to multiple of 8 (3x3 -> 8x8) @@ -101,7 +103,7 @@ describe("Processors", () => { it( MODELS.sam, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.sam); + const processor = await AutoImageProcessor.from_pretrained(MODELS.sam); { // without input points @@ -173,7 +175,7 @@ describe("Processors", () => { it( MODELS["donut-swin"], async () => { - const processor = await AutoProcessor.from_pretrained(MODELS["donut-swin"]); + const processor = await AutoImageProcessor.from_pretrained(MODELS["donut-swin"]); { const image = await load_image(TEST_IMAGES.receipt); @@ -193,7 +195,7 @@ describe("Processors", () => { it( MODELS.resnet, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.resnet); + const processor = await AutoImageProcessor.from_pretrained(MODELS.resnet); { const image = await load_image(TEST_IMAGES.tiger); @@ -213,7 +215,7 @@ describe("Processors", () => { it( MODELS.vit, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.vit); + const processor = await AutoImageProcessor.from_pretrained(MODELS.vit); { const image = await load_image(TEST_IMAGES.tiger); @@ -233,7 +235,7 @@ describe("Processors", () => { it( MODELS.mobilevit, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.mobilevit); + const processor = await AutoImageProcessor.from_pretrained(MODELS.mobilevit); { const image = await load_image(TEST_IMAGES.tiger); @@ -254,7 +256,7 @@ describe("Processors", () => { it( MODELS.mobilevit_2, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.mobilevit_2); + const processor = await AutoImageProcessor.from_pretrained(MODELS.mobilevit_2); { // Tests grayscale image @@ -276,7 +278,7 @@ describe("Processors", () => { it( MODELS.mobilevit_3, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.mobilevit_3); + const processor = await AutoImageProcessor.from_pretrained(MODELS.mobilevit_3); { const image = await load_image(TEST_IMAGES.cats); @@ -299,7 +301,7 @@ describe("Processors", () => { it( MODELS.deit, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.deit); + const processor = await AutoImageProcessor.from_pretrained(MODELS.deit); { const image = await load_image(TEST_IMAGES.tiger); @@ -319,7 +321,7 @@ describe("Processors", () => { it( MODELS.beit, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.beit); + const processor = await AutoImageProcessor.from_pretrained(MODELS.beit); { const image = await load_image(TEST_IMAGES.tiger); @@ -339,7 +341,7 @@ describe("Processors", () => { it( MODELS.detr, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.detr); + const processor = await AutoImageProcessor.from_pretrained(MODELS.detr); { const image = await load_image(TEST_IMAGES.tiger); @@ -362,7 +364,7 @@ describe("Processors", () => { it( MODELS.yolos, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.yolos); + const processor = await AutoImageProcessor.from_pretrained(MODELS.yolos); { const image = await load_image(TEST_IMAGES.tiger); @@ -382,7 +384,7 @@ describe("Processors", () => { it( MODELS.dpt, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.dpt); + const processor = await AutoImageProcessor.from_pretrained(MODELS.dpt); { // Tests grayscale image @@ -404,7 +406,7 @@ describe("Processors", () => { it( MODELS.glpn, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.glpn); + const processor = await AutoImageProcessor.from_pretrained(MODELS.glpn); { const image = await load_image(TEST_IMAGES.cats); @@ -436,7 +438,7 @@ describe("Processors", () => { it( MODELS.nougat, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.nougat); + const processor = await AutoImageProcessor.from_pretrained(MODELS.nougat); { const image = await load_image(TEST_IMAGES.paper); @@ -454,7 +456,7 @@ describe("Processors", () => { // OwlViTFeatureExtractor it(MODELS.owlvit, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.owlvit); + const processor = await AutoImageProcessor.from_pretrained(MODELS.owlvit); { const image = await load_image(TEST_IMAGES.cats); const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image); @@ -472,7 +474,7 @@ describe("Processors", () => { it( MODELS.clip, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.clip); + const processor = await AutoImageProcessor.from_pretrained(MODELS.clip); { const image = await load_image(TEST_IMAGES.tiger); @@ -496,7 +498,7 @@ describe("Processors", () => { it( MODELS.vitmatte, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.vitmatte); + const processor = await AutoImageProcessor.from_pretrained(MODELS.vitmatte); { const image = await load_image(TEST_IMAGES.vitmatte_image); @@ -543,7 +545,7 @@ describe("Processors", () => { it( MODELS.dinov2, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.dinov2); + const processor = await AutoImageProcessor.from_pretrained(MODELS.dinov2); { const image = await load_image(TEST_IMAGES.tiger); @@ -566,7 +568,7 @@ describe("Processors", () => { it( MODELS.dpt_2, async () => { - const processor = await AutoProcessor.from_pretrained(MODELS.dpt_2); + const processor = await AutoImageProcessor.from_pretrained(MODELS.dpt_2); { const image = await load_image(TEST_IMAGES.cats); @@ -598,7 +600,7 @@ describe("Processors", () => { // // EfficientNetImageProcessor // // - tests include_top // it(MODELS.efficientnet, async () => { - // const processor = await AutoProcessor.from_pretrained(MODELS.efficientnet) + // const processor = await AutoImageProcessor.from_pretrained(MODELS.efficientnet) // { // const image = await load_image(TEST_IMAGES.cats); @@ -611,6 +613,28 @@ describe("Processors", () => { // compare(reshaped_input_sizes, [[224, 224]]); // } // }, MAX_TEST_EXECUTION_TIME); + + // Qwen2VLProcessor + // - custom image processing (min_pixels, max_pixels) + it( + MODELS.qwen2_vl, + async () => { + const processor = await AutoImageProcessor.from_pretrained(MODELS.qwen2_vl); + + { + const image = await load_image(TEST_IMAGES.white_image); + const { pixel_values, image_grid_thw, original_sizes, reshaped_input_sizes } = await processor(image); + + compare(pixel_values.dims, [256, 1176]); + compare(avg(pixel_values.data), 2.050372362136841); + compare(image_grid_thw.tolist(), [[1n, 16n, 16n]]); + + compare(original_sizes, [[224, 224]]); + compare(reshaped_input_sizes, [[224, 224]]); + } + }, + MAX_TEST_EXECUTION_TIME, + ); }); describe("Audio processors", () => { @@ -810,7 +834,7 @@ describe("Processors", () => { describe( "FlorenceProcessor", () => { - /** @type {import('../src/processors.js').Florence2Processor} */ + /** @type {import('../src/transformers.js').Florence2Processor} */ let processor; let images = {}; @@ -1014,5 +1038,41 @@ describe("Processors", () => { }, MAX_TEST_EXECUTION_TIME, ); + + describe( + "Qwen2VLProcessor", + () => { + /** @type {import('../src/transformers.js').Qwen2VLProcessor} */ + let processor; + let images = {}; + + beforeAll(async () => { + processor = await AutoProcessor.from_pretrained(MODELS.qwen2_vl); + images = { + white_image: await load_image(TEST_IMAGES.white_image), + }; + }); + + it("Image and text", async () => { + const conversation = [ + { + role: "user", + content: [{ type: "image" }, { type: "text", text: "Describe this image." }], + }, + ]; + + const text = processor.apply_chat_template(conversation, { + add_generation_prompt: true, + }); + const { input_ids, attention_mask, pixel_values, image_grid_thw } = await processor(text, images.white_image); + + compare(input_ids.dims, [1, 89]); + compare(attention_mask.dims, [1, 89]); + compare(pixel_values.dims, [256, 1176]); + compare(image_grid_thw.dims, [1, 3]); + }); + }, + MAX_TEST_EXECUTION_TIME, + ); }); }); diff --git a/tests/tiny_random.test.js b/tests/tiny_random.test.js index 37059650d..bd2fe1c60 100644 --- a/tests/tiny_random.test.js +++ b/tests/tiny_random.test.js @@ -52,6 +52,7 @@ import { WhisperForConditionalGeneration, VisionEncoderDecoderModel, Florence2ForConditionalGeneration, + Qwen2VLForConditionalGeneration, MarianMTModel, PatchTSTModel, PatchTSTForPrediction, @@ -767,8 +768,6 @@ describe("Tiny random models", () => { /** @type {Florence2ForConditionalGeneration} */ let model; - /** @type {BartTokenizer} */ - let tokenizer; /** @type {Florence2Processor} */ let processor; beforeAll(async () => { @@ -776,22 +775,18 @@ describe("Tiny random models", () => { // TODO move to config ...DEFAULT_MODEL_OPTIONS, }); - tokenizer = await BartTokenizer.from_pretrained(model_id); processor = await AutoProcessor.from_pretrained(model_id); }, MAX_MODEL_LOAD_TIME); it( "forward", async () => { - const text_inputs = tokenizer(texts[0]); - const vision_inputs = await processor(image); - const inputs = { - ...text_inputs, - ...vision_inputs, - decoder_input_ids: full([1, 1], 2n), - }; + const inputs = await processor(image, texts[0]); - const { logits } = await model(inputs); + const { logits } = await model({ + ...inputs, + decoder_input_ids: full([1, 1], 2n), + }); expect(logits.dims).toEqual([1, 1, 51289]); }, MAX_TEST_EXECUTION_TIME, @@ -800,15 +795,13 @@ describe("Tiny random models", () => { it( "batch_size=1", async () => { - const text_inputs = tokenizer(texts[0]); { + const text_inputs = processor.tokenizer(texts[0]); const generate_ids = await model.generate({ ...text_inputs, max_new_tokens: 10 }); expect(generate_ids.tolist()).toEqual([[2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n]]); } { - const vision_inputs = await processor(image); - const inputs = { ...text_inputs, ...vision_inputs }; - + const inputs = await processor(image, texts[0]); const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10 }); expect(generate_ids.tolist()).toEqual([[2n, 0n, 48n, 48n, 48n, 48n, 48n, 48n, 48n, 48n, 2n]]); } @@ -819,8 +812,8 @@ describe("Tiny random models", () => { it( "batch_size>1", async () => { - const text_inputs = tokenizer(texts, { padding: true }); { + const text_inputs = processor.tokenizer(texts, { padding: true }); const generate_ids = await model.generate({ ...text_inputs, max_new_tokens: 10 }); expect(generate_ids.tolist()).toEqual([ [2n, 0n, 0n, 0n, 1n, 0n, 0n, 2n], @@ -828,8 +821,7 @@ describe("Tiny random models", () => { ]); } { - const vision_inputs = await processor([image, image]); - const inputs = { ...text_inputs, ...vision_inputs }; + const inputs = await processor([image, image], texts, { padding: true }); const generate_ids = await model.generate({ ...inputs, max_new_tokens: 10 }); expect(generate_ids.tolist()).toEqual([ @@ -847,6 +839,96 @@ describe("Tiny random models", () => { }); }); + describe("qwen2_vl", () => { + const CONVERSATION = [ + { + role: "user", + content: [{ type: "text", text: "Hello" }], + }, + ]; + + // Example adapted from https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct + const CONVERSATION_WITH_IMAGE = [ + { + role: "user", + content: [{ type: "image" }, { type: "text", text: "Describe this image." }], + }, + ]; + // Empty white image + const dims = [224, 224, 3]; + const image = new RawImage(new Uint8ClampedArray(dims[0] * dims[1] * dims[2]).fill(255), ...dims); + + describe("Qwen2VLForConditionalGeneration", () => { + const model_id = "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration"; + + /** @type {Qwen2VLForConditionalGeneration} */ + let model; + /** @type {Qwen2VLProcessor} */ + let processor; + beforeAll(async () => { + model = await Qwen2VLForConditionalGeneration.from_pretrained(model_id, { + // TODO move to config + ...DEFAULT_MODEL_OPTIONS, + }); + processor = await AutoProcessor.from_pretrained(model_id); + }, MAX_MODEL_LOAD_TIME); + + it( + "forward", + async () => { + const text = processor.apply_chat_template(CONVERSATION_WITH_IMAGE, { + add_generation_prompt: true, + }); + const inputs = await processor(text, image); + const { logits } = await model(inputs); + expect(logits.dims).toEqual([1, 89, 152064]); + expect(logits.mean().item()).toBeCloseTo(-0.0011299321195110679, 5); + }, + MAX_TEST_EXECUTION_TIME, + ); + + it( + "text-only (batch_size=1)", + async () => { + const text = processor.apply_chat_template(CONVERSATION, { + add_generation_prompt: true, + }); + const inputs = await processor(text); + const generate_ids = await model.generate({ + ...inputs, + max_new_tokens: 10, + }); + + const new_tokens = generate_ids.slice(null, [inputs.input_ids.dims.at(-1), null]); + expect(new_tokens.tolist()).toEqual([[24284n, 63986n, 108860n, 84530n, 8889n, 23262n, 128276n, 64948n, 136757n, 138348n]]); + }, + MAX_TEST_EXECUTION_TIME, + ); + + it( + "text + image (batch_size=1)", + async () => { + const text = processor.apply_chat_template(CONVERSATION_WITH_IMAGE, { + add_generation_prompt: true, + }); + const inputs = await processor(text, image); + const generate_ids = await model.generate({ + ...inputs, + max_new_tokens: 10, + }); + + const new_tokens = generate_ids.slice(null, [inputs.input_ids.dims.at(-1), null]); + expect(new_tokens.tolist()).toEqual([[24284n, 35302n, 60575n, 38679n, 113390n, 115118n, 137596n, 38241n, 96726n, 142301n]]); + }, + MAX_TEST_EXECUTION_TIME, + ); + + afterAll(async () => { + await model?.dispose(); + }, MAX_MODEL_DISPOSE_TIME); + }); + }); + describe("vision-encoder-decoder", () => { describe("VisionEncoderDecoderModel", () => { const model_id = "hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2";