Citation If you find this project useful in your research, please consider cite: @article{wang2025internvl3, title={Internvl3. 5: Advancing open-source multimodal models in versatility, reasoning, and efficiency}, author={Wang, Weiyun and Gao, Zhangwei and Gu, Lixin and Pu, Hengjun and Cui, Long and Wei, Xingguang and Liu, Zhaoyang and Jing, Linglin and Ye, Shenglong and Shao, Jie and others}, journal={arXiv preprint arXiv:2508.18265}, year={2025} }
Oct 13, 2025
Citation If you find this project useful in your research, please consider cite: @article{wang2025internvl3, title={Internvl3. 5: Advancing open-source multimodal models in versatility, reasoning, and efficiency}, author={Wang, Weiyun and Gao, Zhangwei and Gu, Lixin and Pu, Hengjun and Cui, Long and Wei, Xingguang and Liu, Zhaoyang and Jing, Linglin and Ye, Shenglong and Shao, Jie and others}, journal={arXiv preprint arXiv:2508.18265}, year={2025} }
Aug 25, 2025

Citation If you find this project useful in your research, please consider cite: @article{zhu2025internvl3, title={InternVL3: Exploring Advanced Training and Test-Time Recipes for Open-Source Multimodal Models}, author={Zhu, Jinguo and Wang, Weiyun and Chen, Zhe and Liu, Zhaoyang and Ye, Shenglong and Gu, Lixin and Duan, Yuchen and Tian, Hao and Su, Weijie and Shao, Jie and others}, journal={arXiv preprint arXiv:2504.10479}, year={2025} }
Apr 14, 2025