From 6b962e6e9172e08f87015bbdd906456c53dd74e0 Mon Sep 17 00:00:00 2001 From: Zhihao Lin <36994684+LZHgrla@users.noreply.github.com> Date: Tue, 26 Dec 2023 17:39:01 +0800 Subject: [PATCH] [Feature] Support LLaVA (#196) * v1 * add load_image * update cfg image url * del fig * update * temp * update convert * update chat_mm * add exclude_frozen_parameters for deepspeed * update chat * update xtuner help msg * fix bugs * revert bf16 deepspeed * fix bugs * add visual_select_layer for chat * improve pth_to_hf * rename projecter_pth to pretrained_pth * temp * update requirements * add cfgs * update * fix pre-commit * optim chat * optim chat * Delete xtuner/model/unused.py * move dispatch to a deeper folder * add projector * update * del model/projector * fix bugs * add docs * update * update * update * update * enhance resume for map_fn * update import * add llava_internlm_chat_7b_clip_vit_large_p14 * update dispatch * update dispatch * add link * update max_length * update max_length * update hyp * align * move yi flash attn * fix pre-commit * update deepspeed requirements * add mmbench script * install openpyxl * add entry_point for mmbench * save args * update mmbench * update max_length * add llama2 qlora * update mmbench * fix mmbench bugs * use osp instead of os.path * refactor pth_to_hf * update chat and mmbench to support --llava * align to chat * update entry_point * add vicuna template * add vicuna_7b_v15 * fix pre-commit * add vicuna_7b_v1.5 qlora * skip_special_tokens for decode text * remove do_sample * add warmup * fix pre-commit * Update dataset_prepare.md * Update dataset_prepare.md * Add KEEP_STSTEM for template * remove * fix vicuna template * clean cfgs * add cfgs * fix pre-commit * add --language for mmbench * fix bugs * fix pretrain bug * support visual_encoder lora * fix bugs * add paramwise_cfg * remove print_peft_model_trainable_parameters * fix bugs * add paramwise_cfg for DeepSpeedOptimWrapper * fix engine deepspeed paramwise_cfg bug * fix encode_fn bug * fix * fix pad_image_to_square bugs * Add space for system to avoid mismatch of 'USER' token * revert to adding bos_token at each conv * revert for paramwise_cfg * better cfgs? * fix import bug * fix import bug * pretrain align * update prepare_inputs_labels_for_multimodal * 1792 * support length_grouped_samplers * 1792 * remove KEEP_SYSTEM * remove system in cfg * update 336 cfg * add torch_dtype for mmbench and chat * group 50 * quant for pretrain * update cfgs * refactor cfgs * add length for concat dataset * update requirements * fix typo * add template for internlm pretrain * no zh * remove 20b cfgs * fix pre-commit * revert invalid input * rename * Update README.md * Update README_zh-CN.md * fix pre-commit * remove llava_zh from docs * qlora 512 * rename llava map_fn * update cfgs * update model urls * add docs link * add llava docs * update docs * update urls * add citation * fix README * move * update * vicuna pretrain with prompt * rename * add results * fix pre-commit * update * update * update * update * update * update * update * update * update * update * update * update * Update README.md * Update README_zh-CN.md * Update README_zh.md * Update README_zh.md * Update README.md * Update README_zh.md * Update README.md * Update README.md * fix typo * fix * Update README.md * Update README_zh-CN.md * rename * auto cn_string * fix pre-commit * rename * remove language * add VLMEvalKit * rename VLLM to VLM * add the download links of MMBench * update * update readme * update * update * update merge * fix cfg bug * Update README.md * Update README_zh.md * update * fix * update requirements * Update runtime.txt * Update runtime.txt * Update runtime.txt * Update README.md * Update README.md * Update README_zh.md * fix pre-commit * fix * update mmbench prompt * fix bugs * fix bugs * update docs * update * update * Update README.md --- README.md | 15 +- README_zh-CN.md | 13 + docs/en/user_guides/dataset_prepare.md | 76 +++ docs/zh_cn/user_guides/dataset_prepare.md | 76 +++ requirements/runtime.txt | 1 + ...lm_7b_full_intern_repo_dataset_template.py | 4 +- xtuner/configs/llava/README.md | 92 ++++ xtuner/configs/llava/README_zh.md | 90 ++++ ...clip_vit_large_p14_336_e1_gpu8_finetune.py | 194 ++++++++ ...vit_large_p14_336_lora_e1_gpu8_finetune.py | 214 +++++++++ ...clip_vit_large_p14_336_e1_gpu8_pretrain.py | 198 ++++++++ ...clip_vit_large_p14_336_e1_gpu8_finetune.py | 194 ++++++++ ...vit_large_p14_336_lora_e1_gpu8_finetune.py | 214 +++++++++ ...clip_vit_large_p14_336_e1_gpu8_pretrain.py | 198 ++++++++ ...clip_vit_large_p14_336_e1_gpu8_finetune.py | 194 ++++++++ ...vit_large_p14_336_lora_e1_gpu8_finetune.py | 214 +++++++++ ...clip_vit_large_p14_336_e1_gpu8_pretrain.py | 198 ++++++++ xtuner/dataset/__init__.py | 6 +- .../dataset/collate_fns/defalut_collate_fn.py | 8 + xtuner/dataset/concat_dataset.py | 10 +- xtuner/dataset/huggingface.py | 25 +- xtuner/dataset/llava.py | 88 ++++ .../map_fns/dataset_map_fns/__init__.py | 4 +- .../map_fns/dataset_map_fns/llava_map_fn.py | 46 ++ xtuner/dataset/map_fns/template_map_fn.py | 16 +- xtuner/dataset/samplers/__init__.py | 3 + xtuner/dataset/samplers/length_grouped.py | 150 ++++++ xtuner/dataset/utils.py | 57 ++- xtuner/engine/_strategy/deepspeed.py | 1 + xtuner/engine/hooks/dataset_info_hook.py | 30 +- xtuner/engine/hooks/evaluate_chat_hook.py | 105 +++- xtuner/entry_point.py | 24 +- xtuner/model/__init__.py | 3 +- xtuner/model/llava.py | 207 ++++++++ xtuner/model/modules/__init__.py | 5 +- .../{dispatch.py => dispatch/__init__.py} | 3 + .../model/modules/{ => dispatch}/baichuan.py | 0 .../model/modules/{ => dispatch}/internlm.py | 0 xtuner/model/modules/{ => dispatch}/llama.py | 0 xtuner/model/modules/{ => dispatch}/yi.py | 0 xtuner/model/modules/projector/__init__.py | 10 + .../projector/configuration_projector.py | 23 + .../modules/projector/modeling_projector.py | 51 ++ xtuner/model/sft.py | 82 +--- xtuner/model/utils.py | 235 +++++++++ xtuner/tools/chat.py | 223 ++++++--- xtuner/tools/copy_cfg.py | 8 +- xtuner/tools/mmbench.py | 453 ++++++++++++++++++ xtuner/tools/model_converters/merge.py | 56 ++- xtuner/tools/model_converters/pth_to_hf.py | 95 ++-- xtuner/tools/model_converters/split.py | 13 +- xtuner/tools/test.py | 27 +- xtuner/tools/train.py | 6 +- xtuner/tools/utils.py | 6 + xtuner/utils/__init__.py | 6 +- xtuner/utils/constants.py | 2 + xtuner/utils/templates.py | 14 +- 57 files changed, 4014 insertions(+), 272 deletions(-) create mode 100644 xtuner/configs/llava/README.md create mode 100644 xtuner/configs/llava/README_zh.md create mode 100644 xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_finetune.py create mode 100644 xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py create mode 100644 xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/pretrain/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain.py create mode 100644 xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py create mode 100644 xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py create mode 100644 xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py create mode 100644 xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py create mode 100644 xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py create mode 100644 xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py create mode 100644 xtuner/dataset/llava.py create mode 100644 xtuner/dataset/map_fns/dataset_map_fns/llava_map_fn.py create mode 100644 xtuner/dataset/samplers/__init__.py create mode 100644 xtuner/dataset/samplers/length_grouped.py create mode 100644 xtuner/model/llava.py rename xtuner/model/modules/{dispatch.py => dispatch/__init__.py} (99%) rename xtuner/model/modules/{ => dispatch}/baichuan.py (100%) rename xtuner/model/modules/{ => dispatch}/internlm.py (100%) rename xtuner/model/modules/{ => dispatch}/llama.py (100%) rename xtuner/model/modules/{ => dispatch}/yi.py (100%) create mode 100644 xtuner/model/modules/projector/__init__.py create mode 100644 xtuner/model/modules/projector/configuration_projector.py create mode 100644 xtuner/model/modules/projector/modeling_projector.py create mode 100644 xtuner/tools/mmbench.py diff --git a/README.md b/README.md index a4f6ebda7..07d37cb0c 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,8 @@ English | [简体中文](README_zh-CN.md) ## 🎉 News -- **\[2023/12\]** 🔥 Support [Mixtral 8x7b](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model! To get started, please check out the [docs](xtuner/configs/mixtral/README.md)! +- **\[2023/12\]** 🔥 Support multi-modal VLM pretraining and fine-tuning with [LLaVA-v1.5](https://github.com/haotian-liu/LLaVA) architecture! Click [here](xtuner/configs/llava/README.md) for details! +- **\[2023/12\]** 🔥 Support [Mixtral 8x7b](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) model! Click [here](xtuner/configs/mixtral/README.md) for details! - **\[2023/11\]** Support [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b) model! - **\[2023/10\]** Support [MSAgent-Bench](https://modelscope.cn/datasets/damo/MSAgent-Bench) dataset, and the fine-tuned LLMs can be applied by [Lagent](https://github.com/InternLM/lagent)! - **\[2023/10\]** Optimize the data processing to accommodate `system` context. More information can be found on [Docs](docs/en/user_guides/dataset_format.md)! @@ -267,6 +268,18 @@ We appreciate all contributions to XTuner. Please refer to [CONTRIBUTING.md](.gi - [Llama 2](https://github.com/facebookresearch/llama) - [QLoRA](https://github.com/artidoro/qlora) - [LMDeploy](https://github.com/InternLM/lmdeploy) +- [LLaVA](https://github.com/haotian-liu/LLaVA) + +## 🖊️ Citation + +```bibtex +@misc{2023xtuner, + title={XTuner: A Toolkit for Efficiently Fine-tuning LLM}, + author={XTuner Contributors}, + howpublished = {\url{https://github.com/InternLM/xtuner}}, + year={2023} +} +``` ## License diff --git a/README_zh-CN.md b/README_zh-CN.md index 9d2df123d..bf0fb7076 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -23,6 +23,7 @@ ## 🎉 更新 +- **\[2023/12\]** 🔥 支持多模态模型 VLM([LLaVA-v1.5](https://github.com/haotian-liu/LLaVA))预训练和指令微调!快速开始请查阅此[文档](xtuner/configs/llava/README_zh.md)! - **\[2023/12\]** 🔥 支持 [Mixtral 8x7b](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) 模型!快速开始请查阅此[文档](xtuner/configs/mixtral/README.md)! - **\[2023/11\]** 支持 [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b) 模型! - **\[2023/10\]** 支持 [MSAgent-Bench](https://modelscope.cn/datasets/damo/MSAgent-Bench) 数据集,并且微调所得大语言模型可应用至 [Lagent](https://github.com/InternLM/lagent) 框架! @@ -267,6 +268,18 @@ xtuner chat meta-llama/Llama-2-7b-hf --adapter xtuner/Llama-2-7b-qlora-moss-003- - [Llama 2](https://github.com/facebookresearch/llama) - [QLoRA](https://github.com/artidoro/qlora) - [LMDeploy](https://github.com/InternLM/lmdeploy) +- [LLaVA](https://github.com/haotian-liu/LLaVA) + +## 🖊️ 引用 + +```bibtex +@misc{2023xtuner, + title={XTuner: A Toolkit for Efficiently Fine-tuning LLM}, + author={XTuner Contributors}, + howpublished = {\url{https://github.com/InternLM/xtuner}}, + year={2023} +} +``` ## 开源许可证 diff --git a/docs/en/user_guides/dataset_prepare.md b/docs/en/user_guides/dataset_prepare.md index 8e6481f23..b56c749fa 100644 --- a/docs/en/user_guides/dataset_prepare.md +++ b/docs/en/user_guides/dataset_prepare.md @@ -5,6 +5,7 @@ - [Arxiv Gentitle](#arxiv-gentitle) - [MOSS-003-SFT](#moss-003-sft) - [Chinese Lawyer](#chinese-lawyer) + - [LLaVA dataset](#llava-dataset) ## HuggingFace datasets @@ -55,3 +56,78 @@ unzip moss-003-sft-with-tools-no-text2image.zip Chinese Lawyer dataset has two sub-dataset, and can be downloaded form https://github.com/LiuHC0428/LAW-GPT. All lawyer configs assume the dataset path to be `./data/CrimeKgAssitant清洗后_52k.json` and `./data/训练数据_带法律依据_92k.json`. You can move and rename your data, or make changes to these configs. + +### LLaVA dataset + +#### File structure + +``` +./data/llava_data +├── LLaVA-Pretrain +│   ├── blip_laion_cc_sbu_558k.json +│   ├── blip_laion_cc_sbu_558k_meta.json +│   └── images +├── LLaVA-Instruct-150K +│   └── llava_v1_5_mix665k.json +└── llava_images +    ├── coco +    │ └── train2017 +    ├── gqa +    │ └── images +    ├── ocr_vqa +    │ └── images +    ├── textvqa +    │ └── train_images +    └── vg +       ├── VG_100K +    └── VG_100K_2 +``` + +#### Pretrain + +LLaVA-Pretrain + +```shell +# Make sure you have git-lfs installed (https://git-lfs.com) +git lfs install +git clone https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain --depth=1 +``` + +#### Finetune + +1. Text data + + 1. LLaVA-Instruct-150K + + ```shell + # Make sure you have git-lfs installed (https://git-lfs.com) + git lfs install + git clone https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K --depth=1 + ``` + +2. Image data + + 1. COCO (coco): [train2017](http://images.cocodataset.org/zips/train2017.zip) + + 2. GQA (gqa): [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip) + + 3. OCR-VQA (ocr_vqa): [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing) + + 1. ⚠️ Modify the name of OCR-VQA's images to keep the extension as `.jpg`! + + ```shell + #!/bin/bash + ocr_vqa_path="" + + find "$target_dir" -type f | while read file; do + extension="${file##*.}" + if [ "$extension" != "jpg" ] + then + cp -- "$file" "${file%.*}.jpg" + fi + done + ``` + + 4. TextVQA (textvqa): [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) + + 5. VisualGenome (VG): [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) diff --git a/docs/zh_cn/user_guides/dataset_prepare.md b/docs/zh_cn/user_guides/dataset_prepare.md index cfad38927..9154a299d 100644 --- a/docs/zh_cn/user_guides/dataset_prepare.md +++ b/docs/zh_cn/user_guides/dataset_prepare.md @@ -5,6 +5,7 @@ - [Arxiv Gentitle 生成题目](#arxiv-gentitle-生成题目) - [MOSS-003-SFT](#moss-003-sft) - [Chinese Lawyer](#chinese-lawyer) + - [LLaVA dataset](#llava-dataset) ## HuggingFace 数据集 @@ -55,3 +56,78 @@ unzip moss-003-sft-with-tools-no-text2image.zip Chinese Lawyer 数据集有两个子数据集,它们可以在 https://github.com/LiuHC0428/LAW-GPT 下载。 所有的 Chinese Lawyer 配置文件都假设数据集路径为 `./data/CrimeKgAssitant清洗后_52k.json` 和 `./data/训练数据_带法律依据_92k.json`。用户可以移动并重命名数据,或者在配置文件中重新设置数据路径。 + +### LLaVA dataset + +#### 文件结构 + +``` +./data/llava_data +├── LLaVA-Pretrain +│   ├── blip_laion_cc_sbu_558k.json +│   ├── blip_laion_cc_sbu_558k_meta.json +│   └── images +├── LLaVA-Instruct-150K +│   └── llava_v1_5_mix665k.json +└── llava_images +    ├── coco +    │ └── train2017 +    ├── gqa +    │ └── images +    ├── ocr_vqa +    │ └── images +    ├── textvqa +    │ └── train_images +    └── vg +       ├── VG_100K +    └── VG_100K_2 +``` + +#### 预训练 Pretrain + +LLaVA-Pretrain + +```shell +# Make sure you have git-lfs installed (https://git-lfs.com) +git lfs install +git clone https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain --depth=1 +``` + +#### 微调 Finetune + +1. 文本数据 + + 1. LLaVA-Instruct-150K + + ```shell + # Make sure you have git-lfs installed (https://git-lfs.com) + git lfs install + git clone https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K --depth=1 + ``` + +2. 图片数据 + + 1. COCO (coco): [train2017](http://images.cocodataset.org/zips/train2017.zip) + + 2. GQA (gqa): [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip) + + 3. OCR-VQA (ocr_vqa): [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing) + + 1. ⚠️ OCR-VQA 所下载的图片命名需要进行修改,以确保所有图片后缀为 `.jpg`! + + ```shell + #!/bin/bash + ocr_vqa_path="" + + find "$target_dir" -type f | while read file; do + extension="${file##*.}" + if [ "$extension" != "jpg" ] + then + cp -- "$file" "${file%.*}.jpg" + fi + done + ``` + + 4. TextVQA (textvqa): [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) + + 5. VisualGenome (VG): [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index d1a63785a..9be3701ab 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -8,6 +8,7 @@ lagent>=0.1.2 # Minimum 0.10.1 to support exclude_frozen_parameters for DeepSpeedStrategy, # see https://github.com/open-mmlab/mmengine/pull/1415, https://github.com/open-mmlab/mmengine/pull/1424 mmengine>=0.10.1 +openpyxl # Minimum 0.4.0 to support QLoRA, see https://github.com/huggingface/peft/pull/476 peft>=0.4.0 scipy diff --git a/xtuner/configs/internlm/internlm_7b/internlm_7b_full_intern_repo_dataset_template.py b/xtuner/configs/internlm/internlm_7b/internlm_7b_full_intern_repo_dataset_template.py index 98b46d805..cd000f254 100644 --- a/xtuner/configs/internlm/internlm_7b/internlm_7b_full_intern_repo_dataset_template.py +++ b/xtuner/configs/internlm/internlm_7b/internlm_7b_full_intern_repo_dataset_template.py @@ -100,7 +100,9 @@ ####################################################################### # Log the dialogue periodically during the training process, optional custom_hooks = [ - dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=DatasetInfoHook, tokenizer=tokenizer, + is_intern_repo_dataset=True), dict(type=ThroughputHook) ] diff --git a/xtuner/configs/llava/README.md b/xtuner/configs/llava/README.md new file mode 100644 index 000000000..35990bd8a --- /dev/null +++ b/xtuner/configs/llava/README.md @@ -0,0 +1,92 @@ +# LLaVA Full Pipeline + +## Data Preparation + +Please refer to the [docs](../../../docs/en/user_guides/dataset_prepare.md#llava-dataset). + +## Training + +The training of LLaVA consists of two steps: alignment module (i.e., MLP) pretraining and instruction following fine-tuning + +Note: this guide takes 8-card training LLaVA-InternLM as an example, if there are insufficient GPU resources or memory during actual use, you can reduce the batchsize appropriately to decrease memory consumption. The Pretrained projector is saved and re-loaded by default in `./work_dirs/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth`. + +1. Alignment module pretraining (saved by default in `./work_dirs/`) + +```bash +NPROC_PER_NODE=8 xtuner train llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain --deepspeed deepspeed_zero2 +``` + +2. Instruction following fine-tuning (saved by default in `./work_dirs/`) + +```bash +NPROC_PER_NODE=8 xtuner train llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune --deepspeed deepspeed_zero2 +``` + +## Model Convert (and Merge) + +After training, we will obtain a set of weights (*i.e.*, `epoch_1.pth`), which are not in the universal HuggingFace format. We first need to convert them. + +```bash +xtuner convert pth_to_hf $FINETUNE_CFG $PTH_PATH $SAVE_PATH +# e.g., xtuner convert pth_to_hf llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune ./epoch_1.pth ./epoch_1_hf +``` + +At this point, we have obtained the relevant model (LLM or the corresponding LoRA). + +Afterwards, if you want to merge LoRA into LLM or CLIP-ViT, please use the following command: + +```bash +(LLM) xtuner convert merge $LLM $LLM_ADAPTER $SAVE_PATH +(CLIP) xtuner convert merge $CLIP $CLIP_ADAPTER $SAVE_PATH --is-clip +``` + +## Chat + +You can download the released LLaVA-InternLM-7B model from 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-internlm-7b) and 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-internlm-7b), and achieve image-text question answering with the following command! + +```bash +xtuner chat internlm/internlm-chat-7b \ + --visual-encoder openai/clip-vit-large-patch14-336 \ + --llava xtuner/llava-internlm-7b \ + --prompt-template internlm_chat \ + --image $IMAGE_PATH +``` + +Here, `--llava` is the converted weight from the above step (in our example, it is `./epoch_1_hf` ). + +## Evaluation + +XTuner's LLaVA models can be evaluated using [VLMEvalKit](https://github.com/open-compass/VLMEvalKit). + +For convenience, XTuner also integrates the [MMBench](https://mmbench.opencompass.org.cn/home) evaluation. + +User can download the MMBench dataset with + +``` +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_DEV_EN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_TEST_EN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_DEV_CN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_TEST_CN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/CCBench.tsv +``` + +After that, the evaluations can be run with + +```bash +xtuner mmbench internlm/internlm-chat-7b \ + --visual-encoder openai/clip-vit-large-patch14-336 \ + --llava xtuner/llava-internlm-7b \ + --prompt-template internlm_chat \ + --data-path $DATA_PATH \ + --work-dir $RESULT_PATH +``` + +Here, `$DATA_PATH` refers to one of the datasets downloaded as mentioned above, such as `MMBench_DEV_EN.tsv`. + +After the evaluation is completed, if it's a development set, it will directly print out the results; If it's a test set, you need to submit `mmbench_result.xlsx` to the official MMBench for final evaluation to obtain precision results! + +| Model | MMBench Test (EN) | MMBench Dev (EN) | MMBench Test (CN) | MMBench Dev (CN) | CCBench Dev | MME | MMVet | SEEDBench_IMG | Configs | Pretrained Projector Checkpoints | Fine-tuned LLaVA Checkpoints | +| :------------------------- | :---------------: | :--------------: | :---------------: | :--------------: | :---------: | :--: | :---: | :-----------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | +| LLaVA-v1.5-7B (XTuner) | 67.7 | 69.2 | 61.0 | 59.7 | 27.6 | 1702 | 66.4 | 32.3 | [Pretrain](./vicuna_7b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py) / [Fine-tune](./vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-7b-xtuner-pretrain) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-7b-xtuner-pretrain) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-7b-xtuner) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-7b-xtuner) | +| LLaVA-v1.5-13B (XTuner) | 68.9 | 69.5 | 64.7 | 63.1 | 32.2 | 1771 | 68.1 | 35.5 | [Pretrain](./vicuna_13b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py) / [Fine-tune](./vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-13b-xtuner-pretrain) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-13b-xtuner-pretrain) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-13b-xtuner) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-13b-xtuner) | +| LLaVA-InternLM-7B (XTuner) | 69.0 | 68.5 | 66.7 | 63.8 | 35.8 | 1671 | 65.8 | 33.8 | [Pretrain](./internlm_chat_7b_clip_vit_large_p14_336/pretrain/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain.py) / [Fine-tune](./internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-internlm-7b-pretrain) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-internlm-7b-pretrain) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-internlm-7b) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-internlm-7b) | diff --git a/xtuner/configs/llava/README_zh.md b/xtuner/configs/llava/README_zh.md new file mode 100644 index 000000000..c7fbe9b99 --- /dev/null +++ b/xtuner/configs/llava/README_zh.md @@ -0,0 +1,90 @@ +# LLaVA 全流程 + +## 数据准备 + +请参考[文档](../../../docs/zh_cn/user_guides/dataset_prepare.md#llava-dataset)。 + +## 训练流程 + +LLaVA 训练一共分为两步:对齐模块预训练、指令跟随微调(本指南以 8 卡训练 LLaVA-InternLM 为例,实际使用时如遇到显卡数量不足、显存不足等情况可以适当调低 batchsize 来降低显存开销) + +预训练的 Projector 默认保存在 `./work_dirs/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth`,并且指令微调阶段将默认在此路径载入 Projector 权重。 + +1. 对齐模块训练(默认保存在 `./work_dirs/`) + +```bash +NPROC_PER_NODE=8 xtuner train llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain --deepspeed deepspeed_zero2 +``` + +2. 指令跟随微调(默认保存在 `./work_dirs/`) + +```bash +NPROC_PER_NODE=8 xtuner train llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune --deepspeed deepspeed_zero2 +``` + +## 模型转换(和合并) + +训练后,我们将获得一组权重(即,`epoch_1.pth`,但它并不是通用的 HuggingFace 格式。我们需要对其进行转换。 + +```bash +xtuner convert pth_to_hf $FINETUNE_CFG $PTH_PATH $SAVE_PATH +# e.g., xtuner convert pth_to_hf llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune ./epoch_1.pth ./epoch_1_hf +``` + +此时,我们将获得所需要的模型(LLM或对应的 LoRA)。 + +之后,如果想要合并 LoRA 至 LLM 或 CLIP-ViT 中,请使用下列命令: + +```bash +(LLM) xtuner convert merge $LLM $LLM_ADAPTER $SAVE_PATH +(CLIP) xtuner convert merge $CLIP $CLIP_ADAPTER $SAVE_PATH --is-clip +``` + +## 对话测试 + +开源的 LLaVA-InternLM-7B 模型在 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-internlm-7b) 和 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-internlm-7b) 都可以下载,您可以利用下列命令实现图文问答! + +```bash +xtuner chat internlm/internlm-chat-7b \ + --visual-encoder openai/clip-vit-large-patch14-336 \ + --llava xtuner/llava-internlm-7b \ + --prompt-template internlm_chat \ + --image $IMAGE_PATH +``` + +此处, `--llava` 请传入模型转换阶段所获得的权重(示例中为 `./epoch_1_hf`)。 + +## 评测 + +XTuner 的 LLaVA 模型可以利用 [VLMEvalKit](https://github.com/open-compass/VLMEvalKit) 进行评测。 + +同时,为了方便使用,XTuner 内也集成了 MMBench 评测,您可以通过下列命令下载 MMBench 评测数据集: + +``` +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_DEV_EN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_TEST_EN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_DEV_CN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/MMBench_TEST_CN.tsv +wget https://opencompass.openxlab.space/utils/VLMEval/CCBench.tsv +``` + +之后,您可以利用下列命令实现评测: + +```bash +xtuner mmbench internlm/internlm-chat-7b \ + --visual-encoder openai/clip-vit-large-patch14-336 \ + --llava xtuner/llava-internlm-7b \ + --prompt-template internlm_chat \ + --data-path $DATA_PATH \ + --work-dir $RESULT_PATH +``` + +其中,`$DATA_PATH` 指上一步骤所下载的某一个 tsv 文件,如 `MMBench_DEV_EN.tsv`。 + +评测完成后,若为开发集则会直接打印出结果;若为测试集,则需将 mmbench_result.xlsx 提交至 MMBench 官方完成评测取得精度结果! + +| 模型 | MMBench Test (EN) | MMBench Dev (EN) | MMBench Test (CN) | MMBench Dev (CN) | CCBench Dev | MME | MMVet | SEEDBench_IMG | 配置文件 | 预训练 Projector 权重 | 微调 LLaVA 权重 | +| :------------------------- | :---------------: | :--------------: | :---------------: | :--------------: | :---------: | :--: | :---: | :-----------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------: | +| LLaVA-v1.5-7B (XTuner) | 67.7 | 69.2 | 61.0 | 59.7 | 27.6 | 1702 | 66.4 | 32.3 | [Pretrain](./vicuna_7b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py) / [Fine-tune](./vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-7b-xtuner-pretrain) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-7b-xtuner-pretrain) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-7b-xtuner) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-7b-xtuner) | +| LLaVA-v1.5-13B (XTuner) | 68.9 | 69.5 | 64.7 | 63.1 | 32.2 | 1771 | 68.1 | 35.5 | [Pretrain](./vicuna_13b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py) / [Fine-tune](./vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-13b-xtuner-pretrain) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-13b-xtuner-pretrain) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-v1.5-13b-xtuner) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-v1.5-13b-xtuner) | +| LLaVA-InternLM-7B (XTuner) | 69.0 | 68.5 | 66.7 | 63.8 | 35.8 | 1671 | 65.8 | 33.8 | [Pretrain](./internlm_chat_7b_clip_vit_large_p14_336/pretrain/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain.py) / [Fine-tune](./internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-internlm-7b-pretrain) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-internlm-7b-pretrain) | 🤗 [HuggingFace](https://huggingface.co/xtuner/llava-internlm-7b) / 🤖 [ModelScope](https://modelscope.cn/models/xtuner/llava-internlm-7b) | diff --git a/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_finetune.py b/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_finetune.py new file mode 100644 index 000000000..7a863a199 --- /dev/null +++ b/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_finetune.py @@ -0,0 +1,194 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + CLIPImageProcessor, CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.dataset.samplers import LengthGroupedSampler +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'internlm/internlm-chat-7b' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' +# Specify the pretrained pth +pretrained_pth = './work_dirs/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth' # noqa: E501 + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' +image_folder = data_root + 'llava_images' +prompt_template = PROMPT_TEMPLATE.internlm_chat +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 16 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 2e-5 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=False, + freeze_visual_encoder=True, + pretrained_pth=pretrained_pth, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float32), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path)) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=True) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict( + type=LengthGroupedSampler, + length_property='modality_length', + per_device_batch_size=batch_size * accumulative_counts), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py b/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py new file mode 100644 index 000000000..f80477572 --- /dev/null +++ b/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/finetune/llava_internlm_chat_7b_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py @@ -0,0 +1,214 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from peft import LoraConfig +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.dataset.samplers import LengthGroupedSampler +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'internlm/internlm-chat-7b' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' +# Specify the pretrained pth +pretrained_pth = './work_dirs/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth' # noqa: E501 + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' +image_folder = data_root + 'llava_images' +prompt_template = PROMPT_TEMPLATE.internlm_chat +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 16 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 2e-4 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=True, + freeze_visual_encoder=True, + pretrained_pth=pretrained_pth, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float16, + quantization_config=dict( + type=BitsAndBytesConfig, + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4')), + llm_lora=dict( + type=LoraConfig, + r=512, + lora_alpha=256, + lora_dropout=0.05, + bias='none', + task_type='CAUSAL_LM'), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path), + visual_encoder_lora=dict( + type=LoraConfig, r=64, lora_alpha=16, lora_dropout=0.05, bias='none')) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=True) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict( + type=LengthGroupedSampler, + length_property='modality_length', + per_device_batch_size=batch_size * accumulative_counts), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/pretrain/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain.py b/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/pretrain/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain.py new file mode 100644 index 000000000..f753acdb7 --- /dev/null +++ b/xtuner/configs/llava/internlm_chat_7b_clip_vit_large_p14_336/pretrain/llava_internlm_chat_7b_clip_vit_large_p14_336_e1_gpu8_pretrain.py @@ -0,0 +1,198 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.dataset import DefaultSampler +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'internlm/internlm-chat-7b' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Pretrain/blip_laion_cc_sbu_558k.json' +image_folder = data_root + 'LLaVA-Pretrain/images' +prompt_template = PROMPT_TEMPLATE.internlm_chat +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 32 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 1e-3 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=True, + freeze_visual_encoder=True, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float16, + quantization_config=dict( + type=BitsAndBytesConfig, + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4')), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path)) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=False) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict(type=DefaultSampler, shuffle=True), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py b/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py new file mode 100644 index 000000000..26d5154bc --- /dev/null +++ b/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py @@ -0,0 +1,194 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + CLIPImageProcessor, CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.dataset.samplers import LengthGroupedSampler +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'lmsys/vicuna-13b-v1.5' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' +# Specify the pretrained pth +pretrained_pth = './work_dirs/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth' # noqa: E501 + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' +image_folder = data_root + 'llava_images' +prompt_template = PROMPT_TEMPLATE.vicuna +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 16 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 2e-5 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=False, + freeze_visual_encoder=True, + pretrained_pth=pretrained_pth, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float32), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path)) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=True) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict( + type=LengthGroupedSampler, + length_property='modality_length', + per_device_batch_size=batch_size * accumulative_counts), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py b/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py new file mode 100644 index 000000000..86526d35f --- /dev/null +++ b/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_13b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py @@ -0,0 +1,214 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from peft import LoraConfig +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.dataset.samplers import LengthGroupedSampler +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'lmsys/vicuna-13b-v1.5' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' +# Specify the pretrained pth +pretrained_pth = './work_dirs/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth' # noqa: E501 + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' +image_folder = data_root + 'llava_images' +prompt_template = PROMPT_TEMPLATE.vicuna +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 16 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 2e-4 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=True, + freeze_visual_encoder=True, + pretrained_pth=pretrained_pth, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float16, + quantization_config=dict( + type=BitsAndBytesConfig, + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4')), + llm_lora=dict( + type=LoraConfig, + r=512, + lora_alpha=256, + lora_dropout=0.05, + bias='none', + task_type='CAUSAL_LM'), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path), + visual_encoder_lora=dict( + type=LoraConfig, r=64, lora_alpha=16, lora_dropout=0.05, bias='none')) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=True) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict( + type=LengthGroupedSampler, + length_property='modality_length', + per_device_batch_size=batch_size * accumulative_counts), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py b/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py new file mode 100644 index 000000000..a359d1ef1 --- /dev/null +++ b/xtuner/configs/llava/vicuna_13b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_13b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py @@ -0,0 +1,198 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.dataset import DefaultSampler +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'lmsys/vicuna-13b-v1.5' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Pretrain/blip_laion_cc_sbu_558k.json' +image_folder = data_root + 'LLaVA-Pretrain/images' +prompt_template = PROMPT_TEMPLATE.vicuna +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 32 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 1e-3 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=True, + freeze_visual_encoder=True, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float16, + quantization_config=dict( + type=BitsAndBytesConfig, + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4')), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path)) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=False) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict(type=DefaultSampler, shuffle=True), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py b/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py new file mode 100644 index 000000000..174797c92 --- /dev/null +++ b/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_finetune.py @@ -0,0 +1,194 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + CLIPImageProcessor, CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.dataset.samplers import LengthGroupedSampler +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'lmsys/vicuna-7b-v1.5' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' +# Specify the pretrained pth +pretrained_pth = './work_dirs/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth' # noqa: E501 + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' +image_folder = data_root + 'llava_images' +prompt_template = PROMPT_TEMPLATE.vicuna +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 16 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 2e-5 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=False, + freeze_visual_encoder=True, + pretrained_pth=pretrained_pth, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float32), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path)) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=True) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict( + type=LengthGroupedSampler, + length_property='modality_length', + per_device_batch_size=batch_size * accumulative_counts), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py b/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py new file mode 100644 index 000000000..5758f4d5f --- /dev/null +++ b/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/finetune/llava_vicuna_7b_v15_qlora_clip_vit_large_p14_336_lora_e1_gpu8_finetune.py @@ -0,0 +1,214 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from peft import LoraConfig +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.dataset.samplers import LengthGroupedSampler +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'lmsys/vicuna-7b-v1.5' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' +# Specify the pretrained pth +pretrained_pth = './work_dirs/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain/epoch_1.pth' # noqa: E501 + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' +image_folder = data_root + 'llava_images' +prompt_template = PROMPT_TEMPLATE.vicuna +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 16 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 2e-4 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=True, + freeze_visual_encoder=True, + pretrained_pth=pretrained_pth, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float16, + quantization_config=dict( + type=BitsAndBytesConfig, + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4')), + llm_lora=dict( + type=LoraConfig, + r=512, + lora_alpha=256, + lora_dropout=0.05, + bias='none', + task_type='CAUSAL_LM'), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path), + visual_encoder_lora=dict( + type=LoraConfig, r=64, lora_alpha=16, lora_dropout=0.05, bias='none')) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=True) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict( + type=LengthGroupedSampler, + length_property='modality_length', + per_device_batch_size=batch_size * accumulative_counts), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py b/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py new file mode 100644 index 000000000..94a046c52 --- /dev/null +++ b/xtuner/configs/llava/vicuna_7b_v15_clip_vit_large_p14_336/pretrain/llava_vicuna_7b_v15_clip_vit_large_p14_336_e1_gpu8_pretrain.py @@ -0,0 +1,198 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.dataset import DefaultSampler +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from torch.optim import AdamW +from transformers import (AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel) + +from xtuner.dataset import LLaVADataset +from xtuner.dataset.collate_fns import default_collate_fn +from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory +from xtuner.engine import DatasetInfoHook, EvaluateChatHook +from xtuner.model import LLaVAModel +from xtuner.utils import PROMPT_TEMPLATE + +####################################################################### +# PART 1 Settings # +####################################################################### +# Model +llm_name_or_path = 'lmsys/vicuna-7b-v1.5' +visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' + +# Data +data_root = './data/llava_data/' +data_path = data_root + 'LLaVA-Pretrain/blip_laion_cc_sbu_558k.json' +image_folder = data_root + 'LLaVA-Pretrain/images' +prompt_template = PROMPT_TEMPLATE.vicuna +max_length = int(2048 - (336 / 14)**2) + +# Scheduler & Optimizer +batch_size = 32 # per_device +accumulative_counts = 1 +dataloader_num_workers = 0 +max_epochs = 1 +optim_type = AdamW +lr = 1e-3 +betas = (0.9, 0.999) +weight_decay = 0 +max_norm = 1 # grad clip +warmup_ratio = 0.03 + +# Evaluate the generation performance during the training +evaluation_freq = 500 +SYSTEM = '' +evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' +evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] + +####################################################################### +# PART 2 Model & Tokenizer & Image Processor # +####################################################################### +tokenizer = dict( + type=AutoTokenizer.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + padding_side='right') + +image_processor = dict( + type=CLIPImageProcessor.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path, + trust_remote_code=True) + +model = dict( + type=LLaVAModel, + freeze_llm=True, + freeze_visual_encoder=True, + llm=dict( + type=AutoModelForCausalLM.from_pretrained, + pretrained_model_name_or_path=llm_name_or_path, + trust_remote_code=True, + torch_dtype=torch.float16, + quantization_config=dict( + type=BitsAndBytesConfig, + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4')), + visual_encoder=dict( + type=CLIPVisionModel.from_pretrained, + pretrained_model_name_or_path=visual_encoder_name_or_path)) + +####################################################################### +# PART 3 Dataset & Dataloader # +####################################################################### +llava_dataset = dict( + type=LLaVADataset, + data_path=data_path, + image_folder=image_folder, + tokenizer=tokenizer, + image_processor=image_processor, + dataset_map_fn=llava_map_fn, + template_map_fn=dict( + type=template_map_fn_factory, template=prompt_template), + max_length=max_length, + pad_image_to_square=False) + +train_dataloader = dict( + batch_size=batch_size, + num_workers=dataloader_num_workers, + dataset=llava_dataset, + sampler=dict(type=DefaultSampler, shuffle=True), + collate_fn=dict(type=default_collate_fn)) + +####################################################################### +# PART 4 Scheduler & Optimizer # +####################################################################### +# optimizer +optim_wrapper = dict( + type=AmpOptimWrapper, + optimizer=dict( + type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), + clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), + accumulative_counts=accumulative_counts, + loss_scale='dynamic', + dtype='float16') + +# learning policy +# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-5, + by_epoch=True, + begin=0, + end=warmup_ratio * max_epochs, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=0.0, + by_epoch=True, + begin=warmup_ratio * max_epochs, + T_max=max_epochs, + convert_to_iter_based=True) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) + +####################################################################### +# PART 5 Runtime # +####################################################################### +# Log the dialogue periodically during the training process, optional +custom_hooks = [ + dict(type=DatasetInfoHook, tokenizer=tokenizer), + dict( + type=EvaluateChatHook, + tokenizer=tokenizer, + image_processor=image_processor, + every_n_iters=evaluation_freq, + evaluation_inputs=evaluation_inputs, + evaluation_images=evaluation_images, + system=SYSTEM, + prompt_template=prompt_template) +] + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=10), + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +visualizer = None + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/xtuner/dataset/__init__.py b/xtuner/dataset/__init__.py index 556cb005d..c58cf44cb 100644 --- a/xtuner/dataset/__init__.py +++ b/xtuner/dataset/__init__.py @@ -2,10 +2,14 @@ from .concat_dataset import ConcatDataset from .huggingface import process_hf_dataset from .intern_repo import process_intern_repo_dataset +from .llava import LLaVADataset from .modelscope import process_ms_dataset from .moss_sft import MOSSSFTDataset +from .utils import decode_base64_to_image, expand2square, load_image __all__ = [ 'process_hf_dataset', 'ConcatDataset', 'MOSSSFTDataset', - 'process_ms_dataset', 'process_intern_repo_dataset' + 'process_ms_dataset', 'LLaVADataset', 'expand2square', + 'decode_base64_to_image', 'load_image', 'process_ms_dataset', + 'process_intern_repo_dataset' ] diff --git a/xtuner/dataset/collate_fns/defalut_collate_fn.py b/xtuner/dataset/collate_fns/defalut_collate_fn.py index bd34910cf..f400dabb2 100644 --- a/xtuner/dataset/collate_fns/defalut_collate_fn.py +++ b/xtuner/dataset/collate_fns/defalut_collate_fn.py @@ -13,9 +13,14 @@ def default_collate_fn( return_hf_format: bool = False) -> Dict[str, torch.Tensor]: input_ids = [] labels = [] + has_image = any(inst.get('pixel_values') is not None for inst in instances) + if has_image: + pixel_values = [] for example in instances: input_ids.append(torch.tensor(example['input_ids'])) labels.append(torch.tensor(example['labels'])) + if has_image: + pixel_values.append(example['pixel_values']) if len(instances) > 1: input_ids = pad_sequence( input_ids, batch_first=True, padding_value=pad_index) @@ -30,6 +35,9 @@ def default_collate_fn( 'attention_mask': input_ids.ne(pad_index), 'labels': labels } + if has_image: + pixel_values = torch.stack(pixel_values) + data_dict['pixel_values'] = pixel_values if return_hf_format: return data_dict diff --git a/xtuner/dataset/concat_dataset.py b/xtuner/dataset/concat_dataset.py index ee25533f6..ed581d204 100644 --- a/xtuner/dataset/concat_dataset.py +++ b/xtuner/dataset/concat_dataset.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. -from torch.utils.data import ConcatDataset as _ConcatDataset +from torch.utils.data import ConcatDataset as TorchConcatDataset from xtuner.registry import BUILDER -class ConcatDataset(_ConcatDataset): +class ConcatDataset(TorchConcatDataset): def __init__(self, datasets_cfg, datasets_kwargs=None): datasets = [] @@ -24,3 +24,9 @@ def __repr__(self): for name, dataset in zip(self.names, self.datasets) ]) return main_str + + def __getattr__(self, name: str): + try: + return super().__getattr__(name) + except AttributeError: + return [getattr(dataset, name) for dataset in self.datasets] diff --git a/xtuner/dataset/huggingface.py b/xtuner/dataset/huggingface.py index c6049669e..fde23d811 100644 --- a/xtuner/dataset/huggingface.py +++ b/xtuner/dataset/huggingface.py @@ -6,6 +6,7 @@ from datasets import DatasetDict from mmengine import print_log from mmengine.config import Config, ConfigDict +from mmengine.utils.misc import get_object_from_string from torch import distributed as dist from xtuner.registry import BUILDER, MAP_FUNC @@ -24,6 +25,7 @@ def process(dataset, shuffle_before_pack=True, pack_to_max_length=True, input_ids_with_output=True, + with_image_token=False, map_num_proc=32): """Post-process the dataset loaded from the Hugging Face Hub, or a local dataset. @@ -53,6 +55,9 @@ def process(dataset, input_ids_with_output: Whether to put the groundtruth output corresponding to the question into the dataset. Typically set it to True during training and False during testing. + with_image_token: Whether to convert DEFAULT_IMAGE_TOKEN to + IMAGE_TOKEN_INDEX. Typically set it to True during the training + of VLM. map_num_proc: Max number of processes when mapping the dataset. """ @@ -75,7 +80,14 @@ def process(dataset, # Extract the useful data for training from the original dataset. if dataset_map_fn is not None: if isinstance(dataset_map_fn, str): - dataset_map_fn = MAP_FUNC.get(dataset_map_fn) + map_fn_obj = MAP_FUNC.get( + dataset_map_fn) or get_object_from_string(dataset_map_fn) + if map_fn_obj is not None: + dataset_map_fn = map_fn_obj + else: + raise TypeError('dataset_map_fn must be a function or a ' + "registered function's string in MAP_FUNC, " + f"but got a string of '{dataset_map_fn}'") dataset = dataset.map(dataset_map_fn, num_proc=map_num_proc) @@ -100,7 +112,9 @@ def process(dataset, remove_unused_columns = True # remove invalid data - dataset = dataset.filter(lambda example: len(example['conversation']) > 0) + dataset = dataset.filter( + lambda example: len(example['conversation']) > 0, + num_proc=map_num_proc) # tokenize if isinstance(tokenizer, dict) or isinstance( @@ -111,6 +125,7 @@ def process(dataset, encode_fn, tokenizer=tokenizer, max_length=max_length, + with_image_token=with_image_token, input_ids_with_output=input_ids_with_output), remove_columns=list(dataset.column_names) if remove_unused_columns else None, @@ -118,7 +133,8 @@ def process(dataset, # remove data that does not have the valid labels. dataset = dataset.filter( - lambda example: any(label >= 0 for label in example['labels'])) + lambda example: any(label >= 0 for label in example['labels']), + num_proc=map_num_proc) # pack to max length if pack_to_max_length and split == 'train': @@ -128,6 +144,9 @@ def process(dataset, dataset = dataset.map( Packer(max_length), batched=True, num_proc=map_num_proc) + # add 'length' + setattr(dataset, 'length', [len(i['input_ids']) for i in dataset]) + return dataset diff --git a/xtuner/dataset/llava.py b/xtuner/dataset/llava.py new file mode 100644 index 000000000..a668697f7 --- /dev/null +++ b/xtuner/dataset/llava.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os + +import torch +from datasets import Dataset as HFDataset +from datasets import DatasetDict +from mmengine.config import Config, ConfigDict +from PIL import Image +from torch.utils.data import Dataset + +from xtuner.registry import BUILDER +from .huggingface import process_hf_dataset +from .utils import expand2square + + +class LLaVADataset(Dataset): + + def __init__(self, + data_path, + image_folder, + tokenizer, + image_processor, + max_dataset_length=None, + dataset_map_fn=None, + template_map_fn=None, + max_length=2048, + pad_image_to_square=False): + super().__init__() + + json_data = json.load(open(data_path)) + for idx in range(len(json_data)): + if isinstance(json_data[idx]['id'], int): + json_data[idx]['id'] = str(json_data[idx]['id']) + json_data = DatasetDict({'train': HFDataset.from_list(json_data)}) + self.text_data = process_hf_dataset( + dataset=json_data, + tokenizer=tokenizer, + max_length=max_length, + dataset_map_fn=dataset_map_fn, + template_map_fn=template_map_fn, + split='train', + max_dataset_length=max_dataset_length, + remove_unused_columns=False, + pack_to_max_length=False, + with_image_token=True) + + self.image_folder = image_folder + if isinstance(image_processor, dict) or isinstance( + image_processor, Config) or isinstance(image_processor, + ConfigDict): + self.image_processor = BUILDER.build(image_processor) + else: + self.image_processor = image_processor + self.pad_image_to_square = pad_image_to_square + + @property + def modality_length(self): + length_list = [] + for data_dict in self.text_data: + cur_len = len(data_dict['input_ids']) + if data_dict.get('image', None) is None: + cur_len = -cur_len + length_list.append(cur_len) + return length_list + + def __len__(self): + return len(self.text_data) + + def __getitem__(self, index): + data_dict = self.text_data[index] + if data_dict.get('image', None) is not None: + image_file = data_dict['image'] + image = Image.open(os.path.join(self.image_folder, + image_file)).convert('RGB') + if self.pad_image_to_square: + image = expand2square( + image, + tuple( + int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + data_dict['pixel_values'] = image + else: + crop_size = self.image_processor.crop_size + data_dict['pixel_values'] = torch.zeros(3, crop_size['height'], + crop_size['width']) + return data_dict diff --git a/xtuner/dataset/map_fns/dataset_map_fns/__init__.py b/xtuner/dataset/map_fns/dataset_map_fns/__init__.py index 6a106925f..67eb2aa5c 100644 --- a/xtuner/dataset/map_fns/dataset_map_fns/__init__.py +++ b/xtuner/dataset/map_fns/dataset_map_fns/__init__.py @@ -7,6 +7,7 @@ from .crime_kg_assitant_map_fn import crime_kg_assitant_map_fn from .default_map_fn import default_map_fn from .law_reference_map_fn import law_reference_map_fn +from .llava_map_fn import llava_image_only_map_fn, llava_map_fn from .medical_map_fn import medical_map_fn from .msagent_map_fn import msagent_react_map_fn from .oasst1_map_fn import oasst1_map_fn @@ -24,5 +25,6 @@ 'tiny_codes_map_fn', 'colors_map_fn', 'law_reference_map_fn', 'crime_kg_assitant_map_fn', 'sql_map_fn', 'openai_map_fn', 'wizardlm_map_fn', 'stack_exchange_map_fn', 'msagent_react_map_fn', - 'default_map_fn', 'pretrain_map_fn' + 'pretrain_map_fn', 'default_map_fn', 'llava_image_only_map_fn', + 'llava_map_fn' ] diff --git a/xtuner/dataset/map_fns/dataset_map_fns/llava_map_fn.py b/xtuner/dataset/map_fns/dataset_map_fns/llava_map_fn.py new file mode 100644 index 000000000..a08ca395b --- /dev/null +++ b/xtuner/dataset/map_fns/dataset_map_fns/llava_map_fn.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from xtuner.utils import DEFAULT_IMAGE_TOKEN + + +def llava_image_only_map_fn(example): + # input contains the DEFAULT_IMAGE_TOKEN only + messages = example['conversations'] + input = '' + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + assert DEFAULT_IMAGE_TOKEN in msg['value'] + input += DEFAULT_IMAGE_TOKEN + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + return {'conversation': conversation} + + +def llava_map_fn(example): + messages = example['conversations'] + input = '' + conversation = [] + while messages and messages[0]['from'] == 'gpt': + # Skip the first one if it is from gpt + messages = messages[1:] + for msg in messages: + if msg['from'] == 'human': + if DEFAULT_IMAGE_TOKEN in msg['value']: + msg['value'] = msg['value'].replace(DEFAULT_IMAGE_TOKEN, + '').strip() + msg['value'] = DEFAULT_IMAGE_TOKEN + '\n' + msg['value'] + msg['value'] = msg['value'].strip() + input += msg['value'] + + elif msg['from'] == 'gpt': + conversation.append({'input': input, 'output': msg['value']}) + input = '' + else: + raise NotImplementedError + return {'conversation': conversation} diff --git a/xtuner/dataset/map_fns/template_map_fn.py b/xtuner/dataset/map_fns/template_map_fn.py index 5aed21b77..48658a290 100644 --- a/xtuner/dataset/map_fns/template_map_fn.py +++ b/xtuner/dataset/map_fns/template_map_fn.py @@ -1,25 +1,25 @@ # Copyright (c) OpenMMLab. All rights reserved. from functools import partial +from mmengine.utils.misc import get_object_from_string + def template_map_fn(example, template): conversation = example.get('conversation', []) for i, single_turn_conversation in enumerate(conversation): - input_text = '' input = single_turn_conversation.get('input', '') - if input != '' and input is not None: - input = template.INSTRUCTION.format(input=input, round=i + 1) - input_text += input - instruction_postfix = '' - else: - instruction_postfix = template.INSTRUCTION.split('{input}')[-1] + if input is None: + input = '' + input_text = template.INSTRUCTION.format(input=input, round=i + 1) system = single_turn_conversation.get('system', '') if system != '' and system is not None: system = template.SYSTEM.format(system=system) input_text = system + input_text - single_turn_conversation['input'] = input_text + instruction_postfix + single_turn_conversation['input'] = input_text return {'conversation': conversation} def template_map_fn_factory(template): + if isinstance(template, str): # for resume + template = get_object_from_string(template) return partial(template_map_fn, template=template) diff --git a/xtuner/dataset/samplers/__init__.py b/xtuner/dataset/samplers/__init__.py new file mode 100644 index 000000000..a162e73c9 --- /dev/null +++ b/xtuner/dataset/samplers/__init__.py @@ -0,0 +1,3 @@ +from .length_grouped import LengthGroupedSampler + +__all__ = ['LengthGroupedSampler'] diff --git a/xtuner/dataset/samplers/length_grouped.py b/xtuner/dataset/samplers/length_grouped.py new file mode 100644 index 000000000..642021b9f --- /dev/null +++ b/xtuner/dataset/samplers/length_grouped.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Iterator, Optional, Sized + +import torch +from mmengine.dist import get_dist_info, sync_random_seed +from torch.utils.data import Sampler + + +def get_length_grouped_indices(lengths, group_batch_size, generator=None): + + def process(lengths, group_batch_size, generator=None): + indices = torch.randperm(len(lengths), generator=generator) + megabatches = [ + indices[i:i + group_batch_size].tolist() + for i in range(0, len(lengths), group_batch_size) + ] + megabatches = [ + sorted(megabatch, key=lambda i: lengths[i], reverse=True) + for megabatch in megabatches + ] + return megabatches + + assert all(leng != 0 for leng in lengths), 'Should not have zero length.' + if all(leng > 0 for leng in lengths) or all(leng < 0 for leng in lengths): + # all samples are in the same modality + megabatches = process(lengths, group_batch_size, generator=generator) + else: + mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) + if l > 0]) + lang_indices, lang_lengths = zip(*[(i, -l) + for i, l in enumerate(lengths) + if l < 0]) + mm_megabatches = [] + for mm_megabatch in process( + mm_lengths, group_batch_size, generator=generator): + mm_megabatches.append([mm_indices[i] for i in mm_megabatch]) + lang_megabatches = [] + for lang_megabatch in process( + lang_lengths, group_batch_size, generator=generator): + lang_megabatches.append([lang_indices[i] for i in lang_megabatch]) + + last_mm = mm_megabatches[-1] + last_lang = lang_megabatches[-1] + last_batch = last_mm + last_lang + megabatches = mm_megabatches[:-1] + lang_megabatches[:-1] + + megabatch_indices = torch.randperm( + len(megabatches), generator=generator) + megabatches = [megabatches[i] for i in megabatch_indices] + + if len(last_batch) > 0: + megabatches.append( + sorted( + last_batch, key=lambda i: abs(lengths[i]), reverse=True)) + + # The rest is to get the biggest batch first. + # Since each megabatch is sorted by descending length, + # the longest element is the first + megabatch_maximums = [ + abs(lengths[megabatch[0]]) for megabatch in megabatches + ] + max_idx = torch.argmax(torch.tensor(megabatch_maximums)).item() + # Switch to put the longest element in first position + megabatches[0][0], megabatches[max_idx][0] = megabatches[max_idx][ + 0], megabatches[0][0] + + return [i for megabatch in megabatches for i in megabatch] + + +class LengthGroupedSampler(Sampler): + + def __init__(self, + dataset: Sized, + per_device_batch_size: int, + length_property='length', + mega_batch_mult: Optional[int] = None, + seed: Optional[int] = None, + round_up: bool = True) -> None: + rank, world_size = get_dist_info() + self.rank = rank + self.world_size = world_size + + self.dataset = dataset + if seed is None: + seed = sync_random_seed() + self.seed = seed + self.epoch = 0 + self.round_up = round_up + + if self.round_up: + self.num_samples = math.ceil(len(self.dataset) / world_size) + self.total_size = self.num_samples * self.world_size + else: + self.num_samples = math.ceil( + (len(self.dataset) - rank) / world_size) + self.total_size = len(self.dataset) + + total_batch_size = per_device_batch_size * self.world_size + if mega_batch_mult is None: + # Default for mega_batch_mult: 50 or the number to get 4 + # megabatches, whichever is smaller. + mega_batch_mult = min( + len(self.dataset) // (total_batch_size * 4), 50) + # Just in case, for tiny datasets + if mega_batch_mult == 0: + mega_batch_mult = 1 + self.group_batch_size = mega_batch_mult * total_batch_size + self.length = getattr(self.dataset, length_property) + assert isinstance(self.length, (list, tuple)) + if isinstance(self.length[0], (list, tuple)): + self.length = [i for one_length in self.length for i in one_length] + + self.total_batch_size = total_batch_size + + def __iter__(self) -> Iterator[int]: + """Iterate the indices.""" + generator = torch.Generator() + generator.manual_seed(self.seed + self.epoch) + indices = get_length_grouped_indices( + lengths=self.length, + group_batch_size=self.group_batch_size, + generator=generator) + assert len(set(indices)) == len(indices) + # add extra samples to make it evenly divisible + if self.round_up: + indices = ( + indices * + int(self.total_size / len(indices) + 1))[:self.total_size] + # subsample + assert len(indices) == self.total_size + indices = indices[self.rank:self.total_size:self.world_size] + assert len(indices) == self.num_samples + return iter(indices) + + def __len__(self) -> int: + """The number of samples in this rank.""" + return self.num_samples + + def set_epoch(self, epoch: int) -> None: + """Sets the epoch for this sampler. + + When :attr:`shuffle=True`, this ensures all replicas use a different + random ordering for each epoch. Otherwise, the next iteration of this + sampler will yield the same ordering. + + Args: + epoch (int): Epoch number. + """ + self.epoch = epoch diff --git a/xtuner/dataset/utils.py b/xtuner/dataset/utils.py index 7f4428ade..d89b32bf6 100644 --- a/xtuner/dataset/utils.py +++ b/xtuner/dataset/utils.py @@ -1,11 +1,21 @@ # Copyright (c) OpenMMLab. All rights reserved. +import base64 import copy +import io +from io import BytesIO from itertools import chain -from xtuner.utils import IGNORE_INDEX +import requests +from PIL import Image +from xtuner.utils import DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX -def encode_fn(example, tokenizer, max_length, input_ids_with_output=True): + +def encode_fn(example, + tokenizer, + max_length, + input_ids_with_output=True, + with_image_token=False): """We only support the following three scenarios: 1. Incremental pretraining dataset. @@ -56,7 +66,19 @@ def encode_fn(example, tokenizer, max_length, input_ids_with_output=True): input_ids, labels = [], [] for single_turn_conversation in example['conversation']: input = single_turn_conversation['input'] - input_encode = tokenizer(f'{input}', add_special_tokens=False) + if DEFAULT_IMAGE_TOKEN in input and with_image_token: + chunk_encode = [ + tokenizer(chunk, add_special_tokens=False) + for chunk in input.split('') + ] + assert len(chunk_encode) == 2 + input_encode = {'input_ids': []} + for idx, cur_chunk_encode in enumerate(chunk_encode): + input_encode['input_ids'].extend(cur_chunk_encode['input_ids']) + if idx != len(chunk_encode) - 1: + input_encode['input_ids'].append(IMAGE_TOKEN_INDEX) + else: + input_encode = tokenizer(f'{input}', add_special_tokens=False) input_ids += bos_token_id + input_encode['input_ids'] labels += [IGNORE_INDEX] * ( len(bos_token_id + input_encode['input_ids'])) @@ -138,3 +160,32 @@ def __call__(self, batch): self.residual = [] return result + + +def expand2square(pil_img, background_color): + width, height = pil_img.size + if width == height: + return pil_img + elif width > height: + result = Image.new(pil_img.mode, (width, width), background_color) + result.paste(pil_img, (0, (width - height) // 2)) + return result + else: + result = Image.new(pil_img.mode, (height, height), background_color) + result.paste(pil_img, ((height - width) // 2, 0)) + return result + + +def load_image(image_file): + if image_file.startswith('http://') or image_file.startswith('https://'): + response = requests.get(image_file) + image = Image.open(BytesIO(response.content)).convert('RGB') + else: + image = Image.open(image_file).convert('RGB') + return image + + +def decode_base64_to_image(base64_string): + image_data = base64.b64decode(base64_string) + image = Image.open(io.BytesIO(image_data)) + return image diff --git a/xtuner/engine/_strategy/deepspeed.py b/xtuner/engine/_strategy/deepspeed.py index 74a66e962..f326f2caf 100644 --- a/xtuner/engine/_strategy/deepspeed.py +++ b/xtuner/engine/_strategy/deepspeed.py @@ -1,3 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. from mmengine._strategy import DeepSpeedStrategy as MMEngineDeepSpeedStrategy diff --git a/xtuner/engine/hooks/dataset_info_hook.py b/xtuner/engine/hooks/dataset_info_hook.py index 49cc7821e..d835311dc 100644 --- a/xtuner/engine/hooks/dataset_info_hook.py +++ b/xtuner/engine/hooks/dataset_info_hook.py @@ -2,18 +2,42 @@ from mmengine.hooks import Hook from xtuner.registry import BUILDER +from xtuner.utils import DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX + + +def split_list(lst, value): + res = [] + tmp_res = [] + for i in lst: + if tmp_res and i == value: + res.append(tmp_res) + tmp_res = [] + else: + tmp_res.append(i) + res.append(tmp_res) + return res class DatasetInfoHook(Hook): - def __init__(self, tokenizer): + def __init__(self, tokenizer, is_intern_repo_dataset=False): self.tokenizer = BUILDER.build(tokenizer) + self.is_intern_repo_dataset = is_intern_repo_dataset def log(self, runner, dataset, mode='train'): runner.logger.info(f'Num {mode} samples {len(dataset)}') runner.logger.info(f'{mode} example:') - input_ids = [abs(x) for x in dataset[0]['input_ids']] - runner.logger.info(self.tokenizer.decode(input_ids)) + input_ids = dataset[0]['input_ids'] + if self.is_intern_repo_dataset: + input_ids = [abs(x) for x in input_ids] + # Try to split list to be compatible with IMAGE token + input_ids = split_list(input_ids, IMAGE_TOKEN_INDEX) + text = '' + for idx, ids in enumerate(input_ids): + text += self.tokenizer.decode(ids) + if idx != len(input_ids) - 1: + text += DEFAULT_IMAGE_TOKEN + runner.logger.info(text) def before_train(self, runner) -> None: do_train = runner.train_loop is not None diff --git a/xtuner/engine/hooks/evaluate_chat_hook.py b/xtuner/engine/hooks/evaluate_chat_hook.py index 78caf2197..44c458c11 100644 --- a/xtuner/engine/hooks/evaluate_chat_hook.py +++ b/xtuner/engine/hooks/evaluate_chat_hook.py @@ -1,10 +1,15 @@ # Copyright (c) OpenMMLab. All rights reserved. +import torch from mmengine.hooks import Hook from mmengine.model import is_model_wrapper +from mmengine.utils.misc import get_object_from_string from transformers import GenerationConfig, StoppingCriteriaList +from xtuner.dataset.utils import expand2square, load_image +from xtuner.model.utils import prepare_inputs_labels_for_multimodal from xtuner.registry import BUILDER -from xtuner.utils import StopWordStoppingCriteria +from xtuner.utils import (DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX, + StopWordStoppingCriteria) class EvaluateChatHook(Hook): @@ -12,6 +17,8 @@ class EvaluateChatHook(Hook): def __init__(self, tokenizer, evaluation_inputs, + evaluation_images=None, + image_processor=None, system='', prompt_template=None, every_n_iters=None, @@ -20,9 +27,23 @@ def __init__(self, self.evaluation_inputs = evaluation_inputs if isinstance(self.evaluation_inputs, str): self.evaluation_inputs = [self.evaluation_inputs] + self.evaluation_images = evaluation_images + if isinstance(self.evaluation_images, str): + self.evaluation_images = [self.evaluation_images] + if self.evaluation_images is not None: + assert len( + self.evaluation_images) in [1, len(self.evaluation_inputs)] + if len(self.evaluation_images) == 1: + self.evaluation_images = [self.evaluation_images[0]] * len( + self.evaluation_inputs) + self.evaluation_images = [ + load_image(img) for img in self.evaluation_images + ] if prompt_template is None: instruction = '{input}' else: + if isinstance(prompt_template, str): # for resume + prompt_template = get_object_from_string(prompt_template) instruction = prompt_template.get('INSTRUCTION', '{input}') if system != '': system = prompt_template.get( @@ -32,6 +53,8 @@ def __init__(self, self.every_n_iters = every_n_iters self.max_new_tokens = max_new_tokens self.tokenizer = BUILDER.build(tokenizer) + if image_processor is not None: + self.image_processor = BUILDER.build(image_processor) self.stop_criteria = StoppingCriteriaList() # default generation config self.gen_config = GenerationConfig( @@ -62,28 +85,76 @@ def _generate_samples(self, runner, max_new_tokens=None): use_cache = model.llm.config.use_cache # Cast to inference mode - model.llm.gradient_checkpointing_disable() + model.activation_checkpointing_disable() model.llm.config.use_cache = True model.eval() + if self.evaluation_images is not None: + for sample_image, sample_input in zip(self.evaluation_images, + self.evaluation_inputs): + image = expand2square( + sample_image, + tuple( + int(x * 255) for x in self.image_processor.image_mean)) + image = self.image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + image = image.to(device) + sample_input = DEFAULT_IMAGE_TOKEN + '\n' + sample_input + inputs = (self.system + self.instruction).format( + input=sample_input, round=1, **runner.cfg) + chunk_encode = [] + for idx, chunk in enumerate(inputs.split(DEFAULT_IMAGE_TOKEN)): + if idx == 0: + cur_encode = self.tokenizer(chunk) + else: + cur_encode = self.tokenizer( + chunk, add_special_tokens=False) + chunk_encode.append(cur_encode) + assert len(chunk_encode) == 2 + input_ids = [] + for idx, cur_chunk_encode in enumerate(chunk_encode): + input_ids.extend(cur_chunk_encode['input_ids']) + if idx != len(chunk_encode) - 1: + input_ids.append(IMAGE_TOKEN_INDEX) + input_ids = torch.tensor(input_ids).to(device) + visual_outputs = model.visual_encoder( + image.unsqueeze(0), output_hidden_states=True) + pixel_values = model.projector(visual_outputs.hidden_states[ + model.visual_select_layer][:, 1:]) - for sample_input in self.evaluation_inputs: - inputs = (self.system + self.instruction).format( - input=sample_input, round=1, **runner.cfg) - input_ids = self.tokenizer( - inputs, return_tensors='pt')['input_ids'] - input_ids = input_ids.to(device) - generation_output = model.generate( - input_ids=input_ids, - max_new_tokens=max_new_tokens, - generation_config=self.gen_config, - stopping_criteria=self.stop_criteria) - runner.logger.info( - f'Sample output:\n' - f'{self.tokenizer.decode(generation_output[0])}\n') + mm_inputs = prepare_inputs_labels_for_multimodal( + llm=model.llm, + input_ids=input_ids.unsqueeze(0), + pixel_values=pixel_values) + + generation_output = model.generate( + **mm_inputs, + max_new_tokens=max_new_tokens, + generation_config=self.gen_config, + bos_token_id=self.tokenizer.bos_token_id, + stopping_criteria=self.stop_criteria) + runner.logger.info( + f'Sample output:\n' + f'{inputs + self.tokenizer.decode(generation_output[0])}\n' + ) + else: + for sample_input in self.evaluation_inputs: + inputs = (self.system + self.instruction).format( + input=sample_input, round=1, **runner.cfg) + input_ids = self.tokenizer( + inputs, return_tensors='pt')['input_ids'] + input_ids = input_ids.to(device) + generation_output = model.generate( + input_ids=input_ids, + max_new_tokens=max_new_tokens, + generation_config=self.gen_config, + stopping_criteria=self.stop_criteria) + runner.logger.info( + f'Sample output:\n' + f'{self.tokenizer.decode(generation_output[0])}\n') # Cast to training mode if is_checkpointing: - model.llm.gradient_checkpointing_enable() + model.activation_checkpointing_enable() model.llm.config.use_cache = use_cache model.train() diff --git a/xtuner/entry_point.py b/xtuner/entry_point.py index 27360d945..bf87bb1b8 100644 --- a/xtuner/entry_point.py +++ b/xtuner/entry_point.py @@ -9,13 +9,13 @@ import xtuner from xtuner.tools import (chat, check_custom_dataset, copy_cfg, list_cfg, - log_dataset, test, train) + log_dataset, mmbench, test, train) from xtuner.tools.data_preprocess import arxiv as arxiv_preprocess from xtuner.tools.model_converters import merge, pth_to_hf, split # Define valid modes MODES = ('list-cfg', 'copy-cfg', 'log-dataset', 'check-custom-dataset', - 'train', 'test', 'chat', 'convert', 'preprocess') + 'train', 'test', 'chat', 'convert', 'preprocess', 'mmbench') CLI_HELP_MSG = \ f""" @@ -39,18 +39,23 @@ NPROC_PER_NODE=$NGPUS NNODES=$NNODES NODE_RANK=$NODE_RANK PORT=$PORT ADDR=$ADDR xtuner dist_train $CONFIG $GPUS 4-1. Convert the pth model to HuggingFace's model: xtuner convert pth_to_hf $CONFIG $PATH_TO_PTH_MODEL $SAVE_PATH_TO_HF_MODEL - 4-2. Merge the HuggingFace's adapter to the pretrained LLM: - xtuner convert merge $NAME_OR_PATH_TO_LLM $NAME_OR_PATH_TO_ADAPTER $SAVE_PATH + 4-2. Merge the HuggingFace's adapter to the pretrained base model: + xtuner convert merge $LLM $ADAPTER $SAVE_PATH + xtuner convert merge $CLIP $ADAPTER $SAVE_PATH --is-clip 4-3. Split HuggingFace's LLM to the smallest sharded one: - xtuner convert split $NAME_OR_PATH_TO_LLM $SAVE_PATH - 5. Chat with LLMs with HuggingFace's model and adapter: - xtuner chat $NAME_OR_PATH_TO_LLM --adapter $NAME_OR_PATH_TO_ADAPTER --prompt-template $PROMPT_TEMPLATE --system-template $SYSTEM_TEMPLATE + xtuner convert split $LLM $SAVE_PATH + 5-1. Chat with LLMs with HuggingFace's model and adapter: + xtuner chat $LLM --adapter $ADAPTER --prompt-template $PROMPT_TEMPLATE --system-template $SYSTEM_TEMPLATE + 5-2. Chat with VLMs with HuggingFace's model and LLaVA: + xtuner chat $LLM --llava $LLAVA --visual-encoder $VISUAL_ENCODER --image $IMAGE --prompt-template $PROMPT_TEMPLATE --system-template $SYSTEM_TEMPLATE 6-1. Preprocess arxiv dataset: xtuner preprocess arxiv $SRC_FILE $DST_FILE --start-date $START_DATE --categories $CATEGORIES 7-1. Log processed dataset: xtuner log-dataset $CONFIG 7-2. Verify the correctness of the config file for the custom dataset. xtuner check-custom-dataset + 8. MMBench evaluation + xtuner mmbench $LLM --llava $LLAVA --visual-encoder $VISUAL_ENCODER --prompt-template $PROMPT_TEMPLATE --data-path $MMBENCH_DATA_PATH Run special commands: @@ -76,9 +81,9 @@ 1. Convert the pth model to HuggingFace's model: xtuner convert pth_to_hf $CONFIG $PATH_TO_PTH_MODEL $SAVE_PATH_TO_HF_MODEL 2. Merge the HuggingFace's adapter to the pretrained LLM: - xtuner convert merge $NAME_OR_PATH_TO_LLM $NAME_OR_PATH_TO_ADAPTER $SAVE_PATH + xtuner convert merge $LLM $ADAPTER $SAVE_PATH 3. Split HuggingFace's LLM to the smallest sharded one: - xtuner convert split $NAME_OR_PATH_TO_LLM $SAVE_PATH + xtuner convert split $LLM $SAVE_PATH GitHub: https://github.com/InternLM/xtuner """ # noqa: E501 @@ -122,6 +127,7 @@ 'train': train.__file__, 'test': test.__file__, 'chat': chat.__file__, + 'mmbench': mmbench.__file__, 'convert': { 'pth_to_hf': pth_to_hf.__file__, 'merge': merge.__file__, diff --git a/xtuner/model/__init__.py b/xtuner/model/__init__.py index 9ac3ff300..39547b2d7 100644 --- a/xtuner/model/__init__.py +++ b/xtuner/model/__init__.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. +from .llava import LLaVAModel from .sft import SupervisedFinetune -__all__ = ['SupervisedFinetune'] +__all__ = ['SupervisedFinetune', 'LLaVAModel'] diff --git a/xtuner/model/llava.py b/xtuner/model/llava.py new file mode 100644 index 000000000..955ad7c92 --- /dev/null +++ b/xtuner/model/llava.py @@ -0,0 +1,207 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict + +import torch.nn as nn +from mmengine.config import Config, ConfigDict +from mmengine.model import BaseModel +from peft import get_peft_model, prepare_model_for_kbit_training + +from xtuner.registry import BUILDER +from .modules import ProjectorConfig, ProjectorModel, dispatch_modules +from .utils import (LoadWoInit, find_all_linear_names, + get_peft_model_state_dict, guess_load_checkpoint, + make_inputs_require_grad, + prepare_inputs_labels_for_multimodal, traverse_dict) + + +class LLaVAModel(BaseModel): + + def __init__(self, + llm, + visual_encoder, + freeze_llm=False, + freeze_visual_encoder=False, + visual_select_layer=-2, + pretrained_pth=None, + projector_depth=2, + llm_lora=None, + visual_encoder_lora=None, + use_activation_checkpointing=True): + super().__init__() + self.freeze_llm = freeze_llm + self.freeze_visual_encoder = freeze_visual_encoder + with LoadWoInit(): + self.llm = self._build_from_cfg_or_module(llm) + self.visual_encoder = self._build_from_cfg_or_module( + visual_encoder) + self.llm.config.use_cache = False + dispatch_modules(self.llm) + + projector_config = ProjectorConfig( + visual_hidden_size=self.visual_encoder.config.hidden_size, + llm_hidden_size=self.llm.config.hidden_size, + depth=projector_depth) + self.projector = ProjectorModel(projector_config).to( + self.visual_encoder.dtype) + + if self.freeze_llm: + self.llm.requires_grad_(False) + if self.freeze_visual_encoder: + self.visual_encoder.requires_grad_(False) + + if use_activation_checkpointing: + # For backward compatibility + if hasattr(self.llm, 'enable_input_require_grads'): + self.llm.enable_input_require_grads() + else: + self.llm.get_input_embeddings().register_forward_hook( + make_inputs_require_grad) + if hasattr(self.visual_encoder, 'enable_input_require_grads'): + self.visual_encoder.enable_input_require_grads() + else: + self.visual_encoder.get_input_embeddings( + ).register_forward_hook(make_inputs_require_grad) + self.projector.enable_input_require_grads() + + # enable gradient (activation) checkpointing for memory efficiency + self.gradient_checkpointing_enable() + + self.use_llm_lora = llm_lora is not None + self.use_visual_encoder_lora = visual_encoder_lora is not None + + if self.use_llm_lora: + self._prepare_llm_for_lora(llm_lora, use_activation_checkpointing) + if self.use_visual_encoder_lora: + self._prepare_visual_encoder_for_lora( + visual_encoder_lora, use_activation_checkpointing) + + if pretrained_pth is not None: + pretrained_state_dict = guess_load_checkpoint(pretrained_pth) + + self.load_state_dict(pretrained_state_dict, strict=False) + print(f'Load pretrained weight from {pretrained_pth}') + + self.visual_select_layer = visual_select_layer + + self._is_init = True + + def _parse_lora_config(self, lora_config): + if isinstance(lora_config, dict) or isinstance( + lora_config, Config) or isinstance(lora_config, ConfigDict): + lora_config = BUILDER.build(lora_config) + return lora_config + + def _prepare_llm_for_lora(self, + lora_config, + use_activation_checkpointing=True): + lora_config = self._parse_lora_config(lora_config) + self.llm = prepare_model_for_kbit_training( + self.llm, use_activation_checkpointing) + if lora_config.target_modules is None: + modules = find_all_linear_names(self.llm) + lora_config.target_modules = modules + self.llm = get_peft_model(self.llm, lora_config) + + def _prepare_visual_encoder_for_lora(self, + lora_config, + use_activation_checkpointing=True): + lora_config = self._parse_lora_config(lora_config) + if lora_config.target_modules is None: + modules = find_all_linear_names(self.visual_encoder) + lora_config.target_modules = modules + self.visual_encoder = get_peft_model(self.visual_encoder, lora_config) + + def gradient_checkpointing_enable(self): + self.activation_checkpointing_enable() + + def activation_checkpointing_enable(self): + self.llm.gradient_checkpointing_enable() + self.visual_encoder.gradient_checkpointing_enable() + self.projector.gradient_checkpointing_enable() + + def gradient_checkpointing_disable(self): + self.activation_checkpointing_disable() + + def activation_checkpointing_disable(self): + self.llm.gradient_checkpointing_disable() + self.visual_encoder.gradient_checkpointing_disable() + self.projector.gradient_checkpointing_disable() + + def init_weights(self): + pass + + def state_dict(self, *args, **kwargs): + state_dict = super().state_dict(*args, **kwargs) + to_return = OrderedDict() + # Step 1. visual_encoder + if self.use_visual_encoder_lora: + to_return.update( + get_peft_model_state_dict( + self.visual_encoder, state_dict=state_dict)) + elif not self.freeze_visual_encoder: + to_return.update({ + k: v + for k, v in state_dict.items() if 'visual_encoder.' in k + }) + # Step 2. LLM + if self.use_llm_lora: + to_return.update( + get_peft_model_state_dict(self.llm, state_dict=state_dict)) + elif not self.freeze_llm: + to_return.update( + {k: v + for k, v in state_dict.items() if 'llm.' in k}) + # Step 3. Projector + to_return.update( + {k: v + for k, v in state_dict.items() if 'projector.' in k}) + return to_return + + def _build_from_cfg_or_module(self, cfg_or_mod): + if isinstance(cfg_or_mod, nn.Module): + return cfg_or_mod + elif isinstance(cfg_or_mod, dict): + traverse_dict(cfg_or_mod) + return BUILDER.build(cfg_or_mod) + else: + raise NotImplementedError + + def forward(self, data, data_samples=None, mode='loss'): + if 'pixel_values' in data: + visual_outputs = self.visual_encoder( + data['pixel_values'], output_hidden_states=True) + pixel_values = self.projector( + visual_outputs.hidden_states[self.visual_select_layer][:, 1:]) + data['pixel_values'] = pixel_values + data = prepare_inputs_labels_for_multimodal(llm=self.llm, **data) + + if mode == 'loss': + return self.compute_loss(data, data_samples) + elif mode == 'predict': + return self.predict(data, data_samples) + elif mode == 'tensor': + return self._forward(data, data_samples) + else: + raise NotImplementedError + + def _forward(self, data, data_samples=None): + + outputs = self.llm(**data) + + return outputs + + def predict(self, data, data_samples=None): + outputs = self.llm(**data) + logits_dict = [{'logits': logits} for logits in outputs.logits] + return logits_dict + + def compute_loss(self, data, data_samples=None): + outputs = self.llm(**data) + loss_dict = {'loss': outputs.loss} + return loss_dict + + def __getattr__(self, name: str): + try: + return super().__getattr__(name) + except AttributeError: + return getattr(self.llm, name) diff --git a/xtuner/model/modules/__init__.py b/xtuner/model/modules/__init__.py index 0c41bc597..1207a9249 100644 --- a/xtuner/model/modules/__init__.py +++ b/xtuner/model/modules/__init__.py @@ -1,5 +1,4 @@ -# Copyright (c) OpenMMLab. All rights reserved. - from .dispatch import dispatch_modules +from .projector import ProjectorConfig, ProjectorModel -__all__ = ['dispatch_modules'] +__all__ = ['dispatch_modules', 'ProjectorConfig', 'ProjectorModel'] diff --git a/xtuner/model/modules/dispatch.py b/xtuner/model/modules/dispatch/__init__.py similarity index 99% rename from xtuner/model/modules/dispatch.py rename to xtuner/model/modules/dispatch/__init__.py index 181ead803..b378e3ca5 100644 --- a/xtuner/model/modules/dispatch.py +++ b/xtuner/model/modules/dispatch/__init__.py @@ -94,3 +94,6 @@ def dispatch_modules(model): dispath_baichuan_13b_attn_forward(model) if 'yi' in model_name: dispatch_yi_attn_forward(model) + + +__all__ = ['dispatch_modules'] diff --git a/xtuner/model/modules/baichuan.py b/xtuner/model/modules/dispatch/baichuan.py similarity index 100% rename from xtuner/model/modules/baichuan.py rename to xtuner/model/modules/dispatch/baichuan.py diff --git a/xtuner/model/modules/internlm.py b/xtuner/model/modules/dispatch/internlm.py similarity index 100% rename from xtuner/model/modules/internlm.py rename to xtuner/model/modules/dispatch/internlm.py diff --git a/xtuner/model/modules/llama.py b/xtuner/model/modules/dispatch/llama.py similarity index 100% rename from xtuner/model/modules/llama.py rename to xtuner/model/modules/dispatch/llama.py diff --git a/xtuner/model/modules/yi.py b/xtuner/model/modules/dispatch/yi.py similarity index 100% rename from xtuner/model/modules/yi.py rename to xtuner/model/modules/dispatch/yi.py diff --git a/xtuner/model/modules/projector/__init__.py b/xtuner/model/modules/projector/__init__.py new file mode 100644 index 000000000..6196093dd --- /dev/null +++ b/xtuner/model/modules/projector/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from transformers import AutoConfig, AutoModel + +from .configuration_projector import ProjectorConfig +from .modeling_projector import ProjectorModel + +AutoConfig.register('projector', ProjectorConfig) +AutoModel.register(ProjectorConfig, ProjectorModel) + +__all__ = ['ProjectorConfig', 'ProjectorModel'] diff --git a/xtuner/model/modules/projector/configuration_projector.py b/xtuner/model/modules/projector/configuration_projector.py new file mode 100644 index 000000000..f63ffdc46 --- /dev/null +++ b/xtuner/model/modules/projector/configuration_projector.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from transformers import PretrainedConfig + + +class ProjectorConfig(PretrainedConfig): + model_type = 'projector' + _auto_class = 'AutoConfig' + + def __init__( + self, + visual_hidden_size=4096, + llm_hidden_size=4096, + depth=2, + hidden_act='gelu', + bias=True, + **kwargs, + ): + self.visual_hidden_size = visual_hidden_size + self.llm_hidden_size = llm_hidden_size + self.depth = depth + self.hidden_act = hidden_act + self.bias = bias + super().__init__(**kwargs) diff --git a/xtuner/model/modules/projector/modeling_projector.py b/xtuner/model/modules/projector/modeling_projector.py new file mode 100644 index 000000000..d55e7588c --- /dev/null +++ b/xtuner/model/modules/projector/modeling_projector.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from transformers import PreTrainedModel +from transformers.activations import ACT2FN + +from .configuration_projector import ProjectorConfig + + +class ProjectorModel(PreTrainedModel): + _auto_class = 'AutoModel' + config_class = ProjectorConfig + base_model_prefix = 'model' + supports_gradient_checkpointing = True + + def __init__(self, config: ProjectorConfig) -> None: + super().__init__(config) + self.gradient_checkpointing = False + + modules = [ + nn.Linear( + config.visual_hidden_size, + config.llm_hidden_size, + bias=config.bias) + ] + for _ in range(1, config.depth): + modules.append(ACT2FN[config.hidden_act]) + modules.append( + nn.Linear( + config.llm_hidden_size, + config.llm_hidden_size, + bias=config.bias)) + self.model = nn.Sequential(*modules) + + def enable_input_require_grads(self): + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + self.model.register_forward_hook(make_inputs_require_grad) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, ProjectorModel): + module.gradient_checkpointing = value + + def forward(self, x): + if self.gradient_checkpointing and self.training: + layer_outputs = torch.utils.checkpoint.checkpoint(self.model, x) + else: + layer_outputs = self.model(x) + return layer_outputs diff --git a/xtuner/model/sft.py b/xtuner/model/sft.py index 66ba0340c..f48b196c8 100644 --- a/xtuner/model/sft.py +++ b/xtuner/model/sft.py @@ -4,12 +4,14 @@ from mmengine.config import Config, ConfigDict from mmengine.model import BaseModel from mmengine.runner import load_checkpoint -from peft import PeftType, get_peft_model, prepare_model_for_kbit_training +from peft import get_peft_model, prepare_model_for_kbit_training from torch import nn from xtuner.registry import BUILDER from .modules import dispatch_modules -from .utils import LoadWoInit, find_all_linear_names, traverse_dict +from .utils import (LoadWoInit, find_all_linear_names, + get_peft_model_state_dict, make_inputs_require_grad, + traverse_dict) class SupervisedFinetune(BaseModel): @@ -30,15 +32,11 @@ def __init__(self, if hasattr(self.llm, 'enable_input_require_grads'): self.llm.enable_input_require_grads() else: - - def make_inputs_require_grad(module, input, output): - output.requires_grad_(True) - self.llm.get_input_embeddings().register_forward_hook( make_inputs_require_grad) # enable gradient checkpointing for memory efficiency - self.llm.gradient_checkpointing_enable() + self.gradient_checkpointing_enable() if isinstance(lora, dict) or isinstance(lora, Config) or isinstance( lora, ConfigDict): @@ -52,6 +50,18 @@ def make_inputs_require_grad(module, input, output): self._is_init = True + def gradient_checkpointing_enable(self): + self.activation_checkpointing_enable() + + def activation_checkpointing_enable(self): + self.llm.gradient_checkpointing_enable() + + def gradient_checkpointing_disable(self): + self.activation_checkpointing_disable() + + def activation_checkpointing_disable(self): + self.llm.gradient_checkpointing_disable() + def _prepare_for_lora(self, peft_model=None, use_activation_checkpointing=True): @@ -104,62 +114,10 @@ def compute_loss(self, data, data_samples=None): loss_dict = {'loss': outputs.loss} return loss_dict - def state_dict(self, destination=None, prefix='', keep_vars=False): - - def get_peft_model_state_dict(model, - state_dict=None, - adapter_name='default'): - # Modified from `https://github.com/huggingface/peft/blob/main/src - # /peft/utils/save_and_load.py` - - config = model.peft_config[adapter_name] - if state_dict is None: - state_dict = model.state_dict() - if config.peft_type == PeftType.LORA: - # adapted from `https://github.com/microsoft/LoRA/blob/main/ - # loralib/utils.py` - # to be used directly with the state dict which is necessary - # when using DeepSpeed or FSDP - bias = config.bias - if bias == 'none': - to_return = { - k: state_dict[k] - for k in state_dict if 'lora_' in k - } - elif bias == 'all': - to_return = { - k: state_dict[k] - for k in state_dict if 'lora_' in k or 'bias' in k - } - elif bias == 'lora_only': - to_return = {} - for k in state_dict: - if 'lora_' in k: - to_return[k] = state_dict[k] - bias_name = k.split('lora_')[0] + 'bias' - if bias_name in state_dict: - to_return[bias_name] = state_dict[bias_name] - else: - raise NotImplementedError - to_return = { - k: v - for k, v in to_return.items() - if (('lora_' in k and adapter_name in k) or ('bias' in k)) - } - else: - # Currently we only support lora - raise NotImplementedError - if model.modules_to_save is not None: - for key, value in state_dict.items(): - if any(f'{module_name}.modules_to_save.{adapter_name}' in - key for module_name in model.modules_to_save): - to_return[key] = value - - return to_return - + def state_dict(self, *args, **kwargs): + state_dict = super().state_dict(*args, **kwargs) if not self.use_lora: - return super().state_dict() - state_dict = super().state_dict() + return state_dict to_return = get_peft_model_state_dict(self.llm, state_dict=state_dict) return OrderedDict(to_return) diff --git a/xtuner/model/utils.py b/xtuner/model/utils.py index bdc32a487..dce86315d 100644 --- a/xtuner/model/utils.py +++ b/xtuner/model/utils.py @@ -1,8 +1,15 @@ # Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import List, Optional + import torch from mmengine import print_log from mmengine.utils.misc import get_object_from_string +from peft import PeftType from torch import nn +from transformers import PreTrainedModel + +from xtuner.utils import IGNORE_INDEX, IMAGE_TOKEN_INDEX def set_obj_dtype(d): @@ -72,3 +79,231 @@ def __exit__(self, *args, **kwargs): torch.nn.init.normal_ = self.normal_ torch.nn.init.kaiming_uniform_ = self.kaiming_uniform_ torch.nn.init.kaiming_normal_ = self.kaiming_normal_ + + +def get_peft_model_state_dict(model, state_dict=None, adapter_name='default'): + # Modified from `https://github.com/huggingface/peft/blob/main/src/peft/utils/save_and_load.py` # noqa: E501 + + config = model.peft_config[adapter_name] + if state_dict is None: + state_dict = model.state_dict() + if config.peft_type == PeftType.LORA: + # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # noqa: E501 + # to be used directly with the state dict which is necessary + # when using DeepSpeed or FSDP + bias = config.bias + if bias == 'none': + to_return = {k: state_dict[k] for k in state_dict if 'lora_' in k} + elif bias == 'all': + to_return = { + k: state_dict[k] + for k in state_dict if 'lora_' in k or 'bias' in k + } + elif bias == 'lora_only': + to_return = {} + for k in state_dict: + if 'lora_' in k: + to_return[k] = state_dict[k] + bias_name = k.split('lora_')[0] + 'bias' + if bias_name in state_dict: + to_return[bias_name] = state_dict[bias_name] + else: + raise NotImplementedError + to_return = { + k: v + for k, v in to_return.items() + if (('lora_' in k and adapter_name in k) or ('bias' in k)) + } + else: + # Currently we only support lora + raise NotImplementedError + if model.modules_to_save is not None: + for key, value in state_dict.items(): + if any(f'{module_name}.modules_to_save.{adapter_name}' in key + for module_name in model.modules_to_save): + to_return[key] = value + + return to_return + + +# Modified from https://github.com/haotian-liu/LLaVA/blob/82fc5e0e5f4393a4c26851fa32c69ab37ea3b146/llava/model/llava_arch.py#L99 # noqa: E501 +def prepare_inputs_labels_for_multimodal( + llm: PreTrainedModel, + input_ids: torch.LongTensor = None, + position_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + labels: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None): + if pixel_values is None: + return { + 'input_ids': input_ids, + 'position_ids': position_ids, + 'attention_mask': attention_mask, + 'past_key_values': past_key_values, + 'inputs_embeds': None, + 'labels': labels + } + + _labels = labels + _position_ids = position_ids + _attention_mask = attention_mask + if attention_mask is None: + attention_mask = torch.ones_like(input_ids, dtype=torch.bool) + else: + attention_mask = attention_mask.bool() + if position_ids is None: + position_ids = torch.arange( + 0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) + if labels is None: + labels = torch.full_like(input_ids, IGNORE_INDEX) + + # remove the padding using attention_mask -- TODO: double check + input_ids = [ + cur_input_ids[cur_attention_mask] + for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask) + ] + labels = [ + cur_labels[cur_attention_mask] + for cur_labels, cur_attention_mask in zip(labels, attention_mask) + ] + + new_inputs_embeds = [] + new_labels = [] + cur_image_idx = 0 + for batch_idx, cur_input_ids in enumerate(input_ids): + num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() + if num_images == 0: + cur_pixel_values = pixel_values[cur_image_idx] + cur_inputs_embeds_1 = llm.get_input_embeddings()(cur_input_ids) + cur_inputs_embeds = torch.cat( + [cur_inputs_embeds_1, cur_pixel_values[0:0]], dim=0) + new_inputs_embeds.append(cur_inputs_embeds) + new_labels.append(labels[batch_idx]) + cur_image_idx += 1 + continue + + image_token_indices = [-1] + torch.where( + cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [ + cur_input_ids.shape[0] + ] + cur_input_ids_noim = [] + cur_labels = labels[batch_idx] + cur_labels_noim = [] + for i in range(len(image_token_indices) - 1): + cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + + 1:image_token_indices[i + + 1]]) + cur_labels_noim.append(cur_labels[image_token_indices[i] + + 1:image_token_indices[i + 1]]) + split_sizes = [x.shape[0] for x in cur_labels_noim] + cur_inputs_embeds = llm.get_input_embeddings()( + torch.cat(cur_input_ids_noim)) + cur_inputs_embeds_no_im = torch.split( + cur_inputs_embeds, split_sizes, dim=0) + cur_new_inputs_embeds = [] + cur_new_labels = [] + + for i in range(num_images + 1): + cur_new_inputs_embeds.append(cur_inputs_embeds_no_im[i]) + cur_new_labels.append(cur_labels_noim[i]) + if i < num_images: + cur_pixel_values = pixel_values[cur_image_idx] + cur_image_idx += 1 + cur_new_inputs_embeds.append(cur_pixel_values) + cur_new_labels.append( + torch.full((cur_pixel_values.shape[0], ), + IGNORE_INDEX, + device=cur_labels.device, + dtype=cur_labels.dtype)) + + cur_new_inputs_embeds = torch.cat(cur_new_inputs_embeds) + cur_new_labels = torch.cat(cur_new_labels) + + new_inputs_embeds.append(cur_new_inputs_embeds) + new_labels.append(cur_new_labels) + + # Combine them + max_len = max(x.shape[0] for x in new_inputs_embeds) + batch_size = len(new_inputs_embeds) + + new_inputs_embeds_padded = [] + new_labels_padded = torch.full((batch_size, max_len), + IGNORE_INDEX, + dtype=new_labels[0].dtype, + device=new_labels[0].device) + attention_mask = torch.zeros((batch_size, max_len), + dtype=attention_mask.dtype, + device=attention_mask.device) + position_ids = torch.zeros((batch_size, max_len), + dtype=position_ids.dtype, + device=position_ids.device) + + for i, (cur_new_embed, + cur_new_labels) in enumerate(zip(new_inputs_embeds, new_labels)): + cur_len = cur_new_embed.shape[0] + new_inputs_embeds_padded.append( + torch.cat((cur_new_embed, + torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), + dtype=cur_new_embed.dtype, + device=cur_new_embed.device)), + dim=0)) + if cur_len > 0: + new_labels_padded[i, :cur_len] = cur_new_labels + attention_mask[i, :cur_len] = True + position_ids[i, :cur_len] = torch.arange( + 0, + cur_len, + dtype=position_ids.dtype, + device=position_ids.device) + + new_inputs_embeds = torch.stack(new_inputs_embeds_padded, dim=0) + + if _labels is None: + new_labels = None + else: + new_labels = new_labels_padded + + if _attention_mask is None: + attention_mask = None + else: + attention_mask = attention_mask.to(dtype=_attention_mask.dtype) + + if _position_ids is None: + position_ids = None + + return { + 'input_ids': None, + 'position_ids': position_ids, + 'attention_mask': attention_mask, + 'past_key_values': past_key_values, + 'inputs_embeds': new_inputs_embeds, + 'labels': new_labels + } + + +def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + +def guess_load_checkpoint(pth_model): + if osp.isfile(pth_model): + state_dict = torch.load(pth_model, map_location='cpu') + if 'state_dict' in state_dict: + state_dict = state_dict['state_dict'] + elif osp.isdir(pth_model): + try: + from deepspeed.utils.zero_to_fp32 import \ + get_fp32_state_dict_from_zero_checkpoint + except ImportError: + raise ImportError( + 'The provided PTH model appears to be a DeepSpeed checkpoint. ' + 'However, DeepSpeed library is not detected in current ' + 'environment. This suggests that DeepSpeed may not be ' + 'installed or is incorrectly configured. Please verify your ' + 'setup.') + state_dict = get_fp32_state_dict_from_zero_checkpoint( + osp.dirname(pth_model), osp.basename(pth_model)) + else: + raise FileNotFoundError(f'Cannot find {pth_model}') + return state_dict diff --git a/xtuner/tools/chat.py b/xtuner/tools/chat.py index bc03e0c09..4aed0c12c 100644 --- a/xtuner/tools/chat.py +++ b/xtuner/tools/chat.py @@ -1,16 +1,22 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse import os +import os.path as osp import re import sys import torch +from huggingface_hub import snapshot_download from peft import PeftModel -from transformers import (AutoModelForCausalLM, AutoTokenizer, - BitsAndBytesConfig, GenerationConfig) +from transformers import (AutoModel, AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel, GenerationConfig) +from xtuner.dataset.utils import expand2square, load_image +from xtuner.model.utils import prepare_inputs_labels_for_multimodal from xtuner.tools.utils import get_chat_utils, update_stop_criteria -from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE +from xtuner.utils import (DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX, + PROMPT_TEMPLATE, SYSTEM_TEMPLATE) TORCH_DTYPE_MAP = dict( fp16=torch.float16, bf16=torch.bfloat16, fp32=torch.float32, auto='auto') @@ -31,7 +37,16 @@ def parse_args(): parser = argparse.ArgumentParser(description='Chat with a HF model') parser.add_argument( 'model_name_or_path', help='Hugging Face model name or path') - parser.add_argument('--adapter', default=None, help='adapter name or path') + adapter_group = parser.add_mutually_exclusive_group() + adapter_group.add_argument( + '--adapter', default=None, help='adapter name or path') + adapter_group.add_argument( + '--llava', default=None, help='llava name or path') + parser.add_argument( + '--visual-encoder', default=None, help='visual encoder name or path') + parser.add_argument( + '--visual-select-layer', default=-2, help='visual select layer') + parser.add_argument('--image', default=None, help='image') parser.add_argument( '--torch-dtype', default='fp16', @@ -126,7 +141,7 @@ def main(): args = parse_args() torch.manual_seed(args.seed) - # model_kwargs + # build llm quantization_config = None load_in_8bit = False if args.bits == 4: @@ -165,9 +180,9 @@ def main(): llm = HFTransformerCasualLM( args.model_name_or_path, model_kwargs=model_kwargs) if args.adapter is not None: + print(f'Loading adapter from {args.adapter}...') llm.model = PeftModel.from_pretrained( llm.model, args.adapter, offload_folder=args.offload_folder) - print(f'Load adapter from {args.adapter}') search_tool = GoogleSearch(api_key=SERPER_API_KEY) chatbot = ReAct( llm=llm, @@ -207,20 +222,79 @@ def main(): from plugins import solve # noqa: F401 if search_open: from plugins import search # noqa: F401 - # build model - model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, - **model_kwargs) + # build llm + llm = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, + **model_kwargs) tokenizer = AutoTokenizer.from_pretrained( args.model_name_or_path, trust_remote_code=True, encode_special_tokens=True) + print(f'Load LLM from {args.model_name_or_path}') if args.adapter is not None: - model = PeftModel.from_pretrained( - model, args.adapter, offload_folder=args.offload_folder) + llm = PeftModel.from_pretrained( + llm, args.adapter, offload_folder=args.offload_folder) print(f'Load adapter from {args.adapter}') - model.eval() + if args.llava is not None: + llava_path = snapshot_download( + repo_id=args.llava) if not osp.isdir( + args.llava) else args.llava + + # build visual_encoder + if 'visual_encoder' in os.listdir(llava_path): + assert args.visual_encoder is None, ( + "Please don't specify the `--visual-encoder` since passed " + '`--llava` contains a visual encoder!') + visual_encoder_path = osp.join(llava_path, 'visual_encoder') + else: + assert args.visual_encoder is not None, ( + 'Please specify the `--visual-encoder`!') + visual_encoder_path = args.visual_encoder + visual_encoder = CLIPVisionModel.from_pretrained( + visual_encoder_path, + torch_dtype=TORCH_DTYPE_MAP[args.torch_dtype]) + image_processor = CLIPImageProcessor.from_pretrained( + visual_encoder_path) + print(f'Load visual_encoder from {visual_encoder_path}') + + # load adapter + if 'llm_adapter' in os.listdir(llava_path): + adapter_path = osp.join(llava_path, 'llm_adapter') + llm = PeftModel.from_pretrained( + llm, adapter_path, offload_folder=args.offload_folder) + print(f'Load LLM adapter from {args.llava}') + if 'visual_encoder_adapter' in os.listdir(llava_path): + adapter_path = osp.join(llava_path, 'visual_encoder_adapter') + visual_encoder = PeftModel.from_pretrained( + visual_encoder, + adapter_path, + offload_folder=args.offload_folder) + print(f'Load visual_encoder adapter from {args.llava}') - Streamer, stop_criteria = get_chat_utils(model) + # build projector + projector_path = osp.join(llava_path, 'projector') + projector = AutoModel.from_pretrained( + projector_path, torch_dtype=TORCH_DTYPE_MAP[args.torch_dtype]) + print(f'Load projector from {args.llava}') + + projector.cuda() + projector.eval() + visual_encoder.cuda() + visual_encoder.eval() + + llm.eval() + + if args.image is not None: + image = load_image(args.image) + image = expand2square( + image, tuple(int(x * 255) for x in image_processor.image_mean)) + image = image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + image = image.cuda().unsqueeze(0) + visual_outputs = visual_encoder(image, output_hidden_states=True) + pixel_values = projector( + visual_outputs.hidden_states[args.visual_select_layer][:, 1:]) + + Streamer, stop_criteria = get_chat_utils(llm) if args.no_streamer: Streamer = None @@ -254,6 +328,9 @@ def main(): print('Log: Exit!') exit(0) + if args.image is not None and n_turn == 0: + text = DEFAULT_IMAGE_TOKEN + '\n' + text + if args.prompt_template: prompt_text = '' template = PROMPT_TEMPLATE[args.prompt_template] @@ -291,56 +368,92 @@ def main(): else: prompt_text = text inputs += prompt_text - ids = tokenizer.encode(inputs, return_tensors='pt') - streamer = Streamer(tokenizer) if Streamer is not None else None - if args.with_plugins is not None: - generate_output = model.generate( - inputs=ids.cuda(), - generation_config=gen_config, - streamer=streamer, - stopping_criteria=command_stop_cr).cpu() - generate_output_text = tokenizer.decode( - generate_output[0][len(ids[0]):]) - if streamer is None: - end = '' if generate_output_text[-1] == '\n' else '\n' - print(generate_output_text, end=end) - pattern = r'<\|Commands\|>:(.*?)' - command_text = ', '.join( - re.findall(pattern, generate_output_text)) - extent_text = plugins_api( - command_text, - calculate_open=calculate_open, - solve_open=solve_open, - search_open=search_open) - end = '' if extent_text[-1] == '\n' else '\n' - print(extent_text, end=end) - extent_text_ids = tokenizer.encode( - extent_text, return_tensors='pt', add_special_tokens=False) - new_ids = torch.cat((generate_output, extent_text_ids), dim=1) - new_streamer = Streamer( + if args.image is None: + ids = tokenizer.encode(inputs, return_tensors='pt') + streamer = Streamer( tokenizer) if Streamer is not None else None - generate_output = model.generate( - inputs=new_ids.cuda(), - generation_config=gen_config, - streamer=new_streamer, - stopping_criteria=answer_stop_cr) - if streamer is None: - output_text = tokenizer.decode( - generate_output[0][len(new_ids[0]):]) - end = '' if output_text[-1] == '\n' else '\n' - print(output_text, end=end) + if args.with_plugins is not None: + generate_output = llm.generate( + inputs=ids.cuda(), + generation_config=gen_config, + streamer=streamer, + stopping_criteria=command_stop_cr).cpu() + generate_output_text = tokenizer.decode( + generate_output[0][len(ids[0]):]) + if streamer is None: + end = '' if generate_output_text[-1] == '\n' else '\n' + print(generate_output_text, end=end) + pattern = r'<\|Commands\|>:(.*?)' + command_text = ', '.join( + re.findall(pattern, generate_output_text)) + extent_text = plugins_api( + command_text, + calculate_open=calculate_open, + solve_open=solve_open, + search_open=search_open) + end = '' if extent_text[-1] == '\n' else '\n' + print(extent_text, end=end) + extent_text_ids = tokenizer.encode( + extent_text, + return_tensors='pt', + add_special_tokens=False) + new_ids = torch.cat((generate_output, extent_text_ids), + dim=1) + new_streamer = Streamer( + tokenizer) if Streamer is not None else None + generate_output = llm.generate( + inputs=new_ids.cuda(), + generation_config=gen_config, + streamer=new_streamer, + stopping_criteria=answer_stop_cr) + if streamer is None: + output_text = tokenizer.decode( + generate_output[0][len(new_ids[0]):]) + end = '' if output_text[-1] == '\n' else '\n' + print(output_text, end=end) + else: + generate_output = llm.generate( + inputs=ids.cuda(), + generation_config=gen_config, + streamer=streamer, + stopping_criteria=answer_stop_cr) + if streamer is None: + output_text = tokenizer.decode( + generate_output[0][len(ids[0]):]) + end = '' if output_text[-1] == '\n' else '\n' + print(output_text, end=end) + inputs = tokenizer.decode(generate_output[0]) else: - generate_output = model.generate( - inputs=ids.cuda(), + chunk_encode = [] + for idx, chunk in enumerate(inputs.split(DEFAULT_IMAGE_TOKEN)): + if idx == 0: + cur_encode = tokenizer(chunk) + else: + cur_encode = tokenizer(chunk, add_special_tokens=False) + chunk_encode.append(cur_encode) + assert len(chunk_encode) == 2 + ids = [] + for idx, cur_chunk_encode in enumerate(chunk_encode): + ids.extend(cur_chunk_encode['input_ids']) + if idx != len(chunk_encode) - 1: + ids.append(IMAGE_TOKEN_INDEX) + ids = torch.tensor(ids).cuda().unsqueeze(0) + mm_inputs = prepare_inputs_labels_for_multimodal( + llm=llm, input_ids=ids, pixel_values=pixel_values) + + streamer = Streamer( + tokenizer) if Streamer is not None else None + generate_output = llm.generate( + **mm_inputs, generation_config=gen_config, streamer=streamer, + bos_token_id=tokenizer.bos_token_id, stopping_criteria=answer_stop_cr) if streamer is None: - output_text = tokenizer.decode( - generate_output[0][len(ids[0]):]) + output_text = tokenizer.decode(generate_output[0]) end = '' if output_text[-1] == '\n' else '\n' print(output_text, end=end) - inputs = tokenizer.decode(generate_output[0]) + inputs += tokenizer.decode(generate_output[0]) n_turn += 1 if len(generate_output[0]) >= args.max_new_tokens: print( diff --git a/xtuner/tools/copy_cfg.py b/xtuner/tools/copy_cfg.py index 2a08a4fef..9c3ff69c1 100644 --- a/xtuner/tools/copy_cfg.py +++ b/xtuner/tools/copy_cfg.py @@ -1,6 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse -import os +import os.path as osp import shutil from mmengine.utils import mkdir_or_exist @@ -17,7 +17,7 @@ def parse_args(): def add_copy_suffix(string): - file_name, ext = os.path.splitext(string) + file_name, ext = osp.splitext(string) return f'{file_name}_copy{ext}' @@ -25,8 +25,8 @@ def main(): args = parse_args() mkdir_or_exist(args.save_dir) config_path = cfgs_name_path[args.config_name] - save_path = os.path.join(args.save_dir, - add_copy_suffix(os.path.basename(config_path))) + save_path = osp.join(args.save_dir, + add_copy_suffix(osp.basename(config_path))) shutil.copyfile(config_path, save_path) print(f'Copy to {save_path}') diff --git a/xtuner/tools/mmbench.py b/xtuner/tools/mmbench.py new file mode 100644 index 000000000..223b6102f --- /dev/null +++ b/xtuner/tools/mmbench.py @@ -0,0 +1,453 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import os +import os.path as osp +import re +import string +import time + +import numpy as np +import pandas as pd +import torch +import tqdm +from huggingface_hub import snapshot_download +from mmengine import mkdir_or_exist +from peft import PeftModel +from rich.console import Console +from rich.table import Table +from torch.utils.data import Dataset +from transformers import (AutoModel, AutoModelForCausalLM, AutoTokenizer, + BitsAndBytesConfig, CLIPImageProcessor, + CLIPVisionModel, GenerationConfig) + +from xtuner.dataset.utils import decode_base64_to_image, expand2square +from xtuner.model.utils import prepare_inputs_labels_for_multimodal +from xtuner.tools.utils import get_chat_utils, is_cn_string +from xtuner.utils import (DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX, + PROMPT_TEMPLATE) + +TORCH_DTYPE_MAP = dict( + fp16=torch.float16, bf16=torch.bfloat16, fp32=torch.float32, auto='auto') + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMBench') + parser.add_argument( + 'model_name_or_path', help='Hugging Face model name or path') + parser.add_argument('--data-path', default=None, help='data path') + parser.add_argument('--work-dir', help='the dir to save results') + parser.add_argument('--llava', default=None, help='llava name or path') + parser.add_argument( + '--visual-encoder', default=None, help='visual encoder name or path') + parser.add_argument( + '--visual-select-layer', default=-2, help='visual select layer') + parser.add_argument( + '--prompt-template', + choices=PROMPT_TEMPLATE.keys(), + default=None, + help='Specify a prompt template') + parser.add_argument( + '--torch-dtype', + default='fp16', + choices=TORCH_DTYPE_MAP.keys(), + help='Override the default `torch.dtype` and load the model under ' + 'a specific `dtype`.') + parser.add_argument( + '--bits', + type=int, + choices=[4, 8, None], + default=None, + help='LLM bits') + parser.add_argument( + '--bot-name', type=str, default='BOT', help='Name for Bot') + parser.add_argument( + '--offload-folder', + default=None, + help='The folder in which to offload the model weights (or where the ' + 'model weights are already offloaded).') + parser.add_argument( + '--max-new-tokens', + type=int, + default=100, + help='Maximum number of new tokens allowed in generated text') + parser.add_argument( + '--seed', + type=int, + default=0, + help='Random seed for reproducible text generation') + args = parser.parse_args() + return args + + +class MMBenchDataset(Dataset): + ABBRS = { + 'coarse_perception': 'CP', + 'finegrained_perception (instance-level)': 'FP-S', + 'finegrained_perception (cross-instance)': 'FP-C', + 'logic_reasoning': 'LR', + 'relation_reasoning': 'RR', + 'attribute_reasoning': 'AR', + 'sketch_reasoning': 'Sketch Reasoning', + 'scenery_building': 'Scenery & Building', + 'food_clothes': 'Food & Clothes', + 'historical_figure': 'Historical Figure', + 'traditional_show': 'Traditional Show', + 'calligraphy_painting': 'Calligraphy Painting', + 'cultural_relic': 'Cultural Relic' + } + + def __init__(self, data_file): + self.data_file = data_file + self.df = pd.read_csv(data_file, sep='\t') + self.split = 'dev' if 'answer' in self.df.iloc[0].keys() else 'test' + self.has_l2_category = 'l2-category' in self.df.columns.to_list() + + def get_image(self, image): + while len(image) < 16: + image = self.df[self.df['index'] == int(image)]['image'].values + assert len(image) == 1 + image = image[0] + image = decode_base64_to_image(image) + return image + + def __len__(self): + return len(self.df) + + def __getitem__(self, idx): + index = self.df.iloc[idx]['index'] + image = self.df.iloc[idx]['image'] + image = self.get_image(image) + question = self.df.iloc[idx]['question'] + answer = self.df.iloc[idx]['answer'] if 'answer' in self.df.iloc[ + 0].keys() else None + category = self.df.iloc[idx]['category'] + + options = { + cand: self.load_from_df(idx, cand) + for cand in string.ascii_uppercase + if self.load_from_df(idx, cand) is not None + } + options_prompt = '' + for key, item in options.items(): + options_prompt += f'{key}. {item}\n' + + hint = self.load_from_df(idx, 'hint') + data = { + 'img': image, + 'question': question, + 'answer': answer, + 'options': options_prompt, + 'category': category, + 'options_dict': options, + 'index': index, + 'context': hint, + } + if self.has_l2_category: + data.update({'l2-category': self.df.iloc[idx]['l2-category']}) + return data + + def load_from_df(self, idx, key): + if key in self.df.iloc[idx] and not pd.isna(self.df.iloc[idx][key]): + return self.df.iloc[idx][key] + else: + return None + + def eval_result(self, result_df, show=True): + + def calc_acc(df, group='category'): + assert group in ['overall', 'category', 'l2-category'] + if group == 'overall': + res = {'Average': np.mean(df['hit'])} + else: + res = {} + abilities = list(set(df[group])) + abilities.sort() + for ab in abilities: + sub_df = df[df[group] == ab] + ab = self.ABBRS[ab] if ab in self.ABBRS else ab + res[ab] = np.mean(sub_df['hit']) + return res + + def eval_sub_data(sub_data, answer_map): + lt = len(sub_data) + for i in range(lt): + item = sub_data.iloc[i] + match = re.search(r'([A-D]+)', item['prediction']) + pred = match.group(1) if match else '' + gt = answer_map[item['index']] + if gt != pred: + return 0 + return 1 + + def show_result(ret_json): + show_dict = ret_json.copy() + table = Table(title=f' MMBench ({self.data_file}) ') + console = Console() + table.add_column('Category', justify='left') + table.add_column('Accuracy (%)', justify='right') + average = show_dict.pop('Average') * 100 + table.add_row('Average', f'{average:.1f}') + table.add_section() + for cat_name, cat_acc in show_dict.items(): + table.add_row(cat_name, f'{cat_acc * 100:.1f}') + with console.capture() as capture: + console.print(table, end='') + print('\n' + capture.get()) + print('Note: Please be cautious if you use the results in papers, ' + "since we don't use ChatGPT as a helper for choice " + 'extraction') + + data = result_df.sort_values(by='index') + data['prediction'] = [str(x) for x in data['prediction']] + for k in data.keys(): + data[k.lower() if k not in 'ABCD' else k] = data.pop(k) + + data_main = data[data['index'] < int(1e6)] + cate_map = { + i: c + for i, c in zip(self.df['index'], self.df['category']) + } + if self.has_l2_category: + l2_cate_map = { + i: c + for i, c in zip(self.df['index'], self.df['l2-category']) + } + answer_map = { + i: c + for i, c in zip(self.df['index'], self.df['answer']) + } + + lt = len(data_main) + hit, tot = 0, 0 + result = {} + for i in range(lt): + item_main = data_main.iloc[i] + idx = item_main['index'] + assert idx not in result + sub_data = data[data['index'] % int(1e6) == idx] + ret = eval_sub_data(sub_data, answer_map) + result[idx] = ret + hit += ret + tot += 1 + + indices = data_main['index'] + data_main = data_main.copy() + data_main['hit'] = [result[i] for i in indices] + main_idx = data_main['index'] + data_main['category'] = [cate_map[i] for i in main_idx] + + ret_json = calc_acc(data_main, 'overall') + + if self.has_l2_category: + data_main['l2-category'] = [l2_cate_map[i] for i in main_idx] + l2 = calc_acc(data_main, 'l2-category') + ret_json.update(l2) + else: + leaf = calc_acc(data_main, 'category') + ret_json.update(leaf) + if show: + show_result(ret_json) + return ret_json + + +def main(): + args = parse_args() + torch.manual_seed(args.seed) + + # work_dir + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + save_dir = args.work_dir + else: + # use config filename as default work_dir + save_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.data_path))[0]) + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time())) + save_dir = osp.join(save_dir, timestamp) + mkdir_or_exist(osp.abspath(save_dir)) + print('=======================================================') + print(f'Dataset path: {osp.abspath(args.data_path)}\n' + f'Results will be saved to {osp.abspath(save_dir)}') + print('=======================================================') + results_xlsx_path = osp.join(save_dir, 'mmbench_result.xlsx') + results_json_path = osp.join(save_dir, 'mmbench_result.json') + args_path = osp.join(save_dir, 'args.json') + with open(args_path, 'w') as f: + json.dump(args.__dict__, f, indent=2) + + # build llm + quantization_config = None + load_in_8bit = False + if args.bits == 4: + quantization_config = BitsAndBytesConfig( + load_in_4bit=True, + load_in_8bit=False, + llm_int8_threshold=6.0, + llm_int8_has_fp16_weight=False, + bnb_4bit_compute_dtype=torch.float16, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type='nf4') + elif args.bits == 8: + load_in_8bit = True + model_kwargs = { + 'quantization_config': quantization_config, + 'load_in_8bit': load_in_8bit, + 'device_map': 'auto', + 'offload_folder': args.offload_folder, + 'trust_remote_code': True, + 'torch_dtype': TORCH_DTYPE_MAP[args.torch_dtype] + } + + # build llm + llm = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, + **model_kwargs) + tokenizer = AutoTokenizer.from_pretrained( + args.model_name_or_path, + trust_remote_code=True, + encode_special_tokens=True) + print(f'Load LLM from {args.model_name_or_path}') + + llava_path = snapshot_download( + repo_id=args.llava) if not osp.isdir(args.llava) else args.llava + + # build visual_encoder + if 'visual_encoder' in os.listdir(llava_path): + assert args.visual_encoder is None, ( + "Please don't specify the `--visual-encoder` since passed " + '`--llava` contains a visual encoder!') + visual_encoder_path = osp.join(llava_path, 'visual_encoder') + else: + assert args.visual_encoder is not None, ( + 'Please specify the `--visual-encoder`!') + visual_encoder_path = args.visual_encoder + visual_encoder = CLIPVisionModel.from_pretrained( + visual_encoder_path, torch_dtype=TORCH_DTYPE_MAP[args.torch_dtype]) + image_processor = CLIPImageProcessor.from_pretrained(visual_encoder_path) + print(f'Load visual_encoder from {visual_encoder_path}') + + # load adapter + if 'llm_adapter' in os.listdir(llava_path): + adapter_path = osp.join(llava_path, 'llm_adapter') + llm = PeftModel.from_pretrained( + llm, adapter_path, offload_folder=args.offload_folder) + print(f'Load LLM adapter from {args.llava}') + if 'visual_encoder_adapter' in os.listdir(llava_path): + adapter_path = osp.join(llava_path, 'visual_encoder_adapter') + visual_encoder = PeftModel.from_pretrained( + visual_encoder, adapter_path, offload_folder=args.offload_folder) + print(f'Load visual_encoder adapter from {args.llava}') + + # build projector + projector_path = osp.join(llava_path, 'projector') + projector = AutoModel.from_pretrained( + projector_path, torch_dtype=TORCH_DTYPE_MAP[args.torch_dtype]) + print(f'Load projector from {args.llava}') + + projector.cuda() + projector.eval() + visual_encoder.cuda() + visual_encoder.eval() + llm.eval() + + _, stop_criteria = get_chat_utils(llm) + + gen_config = GenerationConfig( + max_new_tokens=args.max_new_tokens, + do_sample=False, + eos_token_id=tokenizer.eos_token_id, + pad_token_id=tokenizer.pad_token_id + if tokenizer.pad_token_id is not None else tokenizer.eos_token_id, + ) + + dataset = MMBenchDataset(args.data_path) + results = [] + n_samples = len(dataset) + for i in tqdm.tqdm(range(n_samples)): + data_sample = dataset[i] + if data_sample['context'] is not None: + text = data_sample['context'] + '\n' + data_sample[ + 'question'] + '\n' + data_sample['options'] + else: + text = data_sample['question'] + '\n' + data_sample['options'] + + text = DEFAULT_IMAGE_TOKEN + '\n' + text + + if is_cn_string(text): + text = text + '请直接回答选项字母。' + else: + text = text + ("Answer with the option's letter from the " + 'given choices directly.') + + if args.prompt_template: + prompt_text = '' + template = PROMPT_TEMPLATE[args.prompt_template] + prompt_text += template['INSTRUCTION'].format( + input=text, round=1, bot_name=args.bot_name) + else: + prompt_text = text + inputs = prompt_text + + image = data_sample['img'].convert('RGB') + image = expand2square( + image, tuple(int(x * 255) for x in image_processor.image_mean)) + image = image_processor.preprocess( + image, return_tensors='pt')['pixel_values'][0] + image = image.cuda().unsqueeze(0) + visual_outputs = visual_encoder(image, output_hidden_states=True) + pixel_values = projector( + visual_outputs.hidden_states[args.visual_select_layer][:, 1:]) + + chunk_encode = [] + for idx, chunk in enumerate(inputs.split(DEFAULT_IMAGE_TOKEN)): + if idx == 0: + cur_encode = tokenizer(chunk) + else: + cur_encode = tokenizer(chunk, add_special_tokens=False) + chunk_encode.append(cur_encode) + assert len(chunk_encode) == 2 + ids = [] + for idx, cur_chunk_encode in enumerate(chunk_encode): + ids.extend(cur_chunk_encode['input_ids']) + if idx != len(chunk_encode) - 1: + ids.append(IMAGE_TOKEN_INDEX) + ids = torch.tensor(ids).cuda().unsqueeze(0) + mm_inputs = prepare_inputs_labels_for_multimodal( + llm=llm, input_ids=ids, pixel_values=pixel_values) + + generate_output = llm.generate( + **mm_inputs, + generation_config=gen_config, + streamer=None, + bos_token_id=tokenizer.bos_token_id, + stopping_criteria=stop_criteria) + + predict = tokenizer.decode( + generate_output[0], skip_special_tokens=True).strip() + cur_result = {} + cur_result['question'] = data_sample.get('question') + cur_result.update(data_sample.get('options_dict')) + cur_result['prediction'] = predict + if data_sample.get('category') is not None: + cur_result['category'] = data_sample.get('category') + if data_sample.get('l2-category') is not None: + cur_result['l2-category'] = data_sample.get('l2-category') + cur_result['index'] = data_sample.get('index') + cur_result['split'] = data_sample.get('split') + cur_result['answer'] = data_sample.get('answer') + results.append(cur_result) + + results_df = pd.DataFrame(results) + with pd.ExcelWriter(results_xlsx_path, engine='openpyxl') as writer: + results_df.to_excel(writer, index=False) + + if dataset.split == 'dev': + results_dict = dataset.eval_result(results_df, show=True) + with open(results_json_path, 'w') as f: + json.dump(results_dict, f, indent=2) + else: + print('All done!') + + +if __name__ == '__main__': + main() diff --git a/xtuner/tools/model_converters/merge.py b/xtuner/tools/model_converters/merge.py index 4b4503eb0..c6e4cddb7 100644 --- a/xtuner/tools/model_converters/merge.py +++ b/xtuner/tools/model_converters/merge.py @@ -3,12 +3,15 @@ import torch from peft import PeftModel -from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers import (AutoModelForCausalLM, AutoTokenizer, + CLIPImageProcessor, CLIPVisionModel) + +from xtuner.model.utils import LoadWoInit def parse_args(): parser = argparse.ArgumentParser( - description='Merge a HuggingFace adapter to LLM') + description='Merge a HuggingFace adapter to base model') parser.add_argument('model_name_or_path', help='model name or path') parser.add_argument('adapter_name_or_path', help='adapter name or path') parser.add_argument( @@ -20,39 +23,48 @@ def parse_args(): help='Only applicable for LLM. The maximum size for ' 'each sharded checkpoint.') parser.add_argument( - '--offload-folder', - default=None, - help='The folder in which to offload the model weights (or where ' - 'the model weights are already offloaded).') + '--is-clip', + action='store_true', + help='Indicate if the model is a clip model') + parser.add_argument( + '--device', + default='cuda', + choices=('cuda', 'cpu', 'auto'), + help='Indicate the device') + args = parser.parse_args() return args def main(): args = parse_args() - model = AutoModelForCausalLM.from_pretrained( - args.model_name_or_path, - torch_dtype=torch.float16, - low_cpu_mem_usage=True, - device_map='auto', - offload_folder=args.offload_folder, - trust_remote_code=True) - tokenizer = AutoTokenizer.from_pretrained( - args.model_name_or_path, - trust_remote_code=True, - encode_special_tokens=True) + if args.is_clip: + with LoadWoInit(): + model = CLIPVisionModel.from_pretrained( + args.model_name_or_path, device_map=args.device) + processor = CLIPImageProcessor.from_pretrained(args.model_name_or_path) + else: + with LoadWoInit(): + model = AutoModelForCausalLM.from_pretrained( + args.model_name_or_path, + torch_dtype=torch.float16, + low_cpu_mem_usage=True, + device_map=args.device, + trust_remote_code=True) + processor = AutoTokenizer.from_pretrained( + args.model_name_or_path, trust_remote_code=True) model_unmerged = PeftModel.from_pretrained( model, args.adapter_name_or_path, - device_map='auto', - torch_dtype=torch.float16, - offload_folder=args.offload_folder, + device_map=args.device, is_trainable=False) model_merged = model_unmerged.merge_and_unload() print(f'Saving to {args.save_dir}...') model_merged.save_pretrained( - args.save_dir, max_shard_size=args.max_shard_size) - tokenizer.save_pretrained(args.save_dir) + args.save_dir, + safe_serialization=False, + max_shard_size=args.max_shard_size) + processor.save_pretrained(args.save_dir) print('All done!') diff --git a/xtuner/tools/model_converters/pth_to_hf.py b/xtuner/tools/model_converters/pth_to_hf.py index d4a38e68b..ddb1d09ec 100644 --- a/xtuner/tools/model_converters/pth_to_hf.py +++ b/xtuner/tools/model_converters/pth_to_hf.py @@ -1,12 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse -import os +import os.path as osp import shutil -import torch from mmengine.config import Config, DictAction from xtuner.configs import cfgs_name_path +from xtuner.model.utils import guess_load_checkpoint from xtuner.registry import BUILDER @@ -41,34 +41,11 @@ def parse_args(): return args -def guess_load_checkpoint(pth_model): - if os.path.isfile(pth_model): - state_dict = torch.load(pth_model, map_location='cpu') - if 'state_dict' in state_dict: - state_dict = state_dict['state_dict'] - elif os.path.isdir(pth_model): - try: - from deepspeed.utils.zero_to_fp32 import \ - get_fp32_state_dict_from_zero_checkpoint - except ImportError: - raise ImportError( - 'The provided PTH model appears to be a DeepSpeed checkpoint. ' - 'However, DeepSpeed library is not detected in current ' - 'environment. This suggests that DeepSpeed may not be ' - 'installed or is incorrectly configured. Please verify your ' - 'setup.') - state_dict = get_fp32_state_dict_from_zero_checkpoint( - os.path.dirname(pth_model), os.path.basename(pth_model)) - else: - raise FileNotFoundError(f'Cannot find {pth_model}') - return state_dict - - def main(): args = parse_args() # parse config - if not os.path.isfile(args.config): + if not osp.isfile(args.config): try: args.config = cfgs_name_path[args.config] except KeyError: @@ -79,6 +56,10 @@ def main(): if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) + if (isinstance(cfg.model.type, str) and 'LLaVAModel' + in cfg.model.type) or 'LLaVAModel' == cfg.model.type.__name__: + cfg.model.pretrained_pth = None + model = BUILDER.build(cfg.model) state_dict = guess_load_checkpoint(args.pth_model) @@ -86,18 +67,60 @@ def main(): print(f'Load PTH model from {args.pth_model}') if not args.fp32: - print('Convert weights to float16') + print('Convert LLM to float16') model.llm.half() - print(f'Saving HuggingFace model to {args.save_dir}') - model.llm.save_pretrained( - args.save_dir, max_shard_size=args.max_shard_size) - if 'PeftModel' not in model.llm.__class__.__name__: - print(f'Saving HuggingFace tokenizer to {args.save_dir}') - tokenizer = BUILDER.build(cfg.tokenizer) - tokenizer.save_pretrained(args.save_dir) - shutil.copyfile(args.config, os.path.join(args.save_dir, - 'xtuner_config.py')) + if (isinstance(cfg.model.type, str) and 'LLaVAModel' + in cfg.model.type) or 'LLaVAModel' == cfg.model.type.__name__: + if cfg.model.get('llm') and (not cfg.model.get('freeze_llm', False) + or cfg.model.get('llm_lora')): + if 'PeftModel' in model.llm.__class__.__name__: + llm_path = osp.join(args.save_dir, 'llm_adapter') + print(f'Saving LLM adapter to {llm_path}') + else: + llm_path = args.save_dir + print(f'Saving LLM tokenizer to {llm_path}') + tokenizer = BUILDER.build(cfg.tokenizer) + tokenizer.save_pretrained(llm_path) + print(f'Saving LLM to {llm_path}') + model.llm.save_pretrained( + llm_path, max_shard_size=args.max_shard_size) + + if cfg.model.get('visual_encoder') and ( + not cfg.model.get('freeze_visual_encoder', False) + or cfg.model.get('visual_encoder_lora')): + if 'PeftModel' in model.visual_encoder.__class__.__name__: + visual_encoder_path = osp.join(args.save_dir, + 'visual_encoder_adapter') + print( + f'Saving visual_encoder adapter to {visual_encoder_path}') + else: + visual_encoder_path = osp.join(args.save_dir, 'visual_encoder') + print('Saving visual_encoder image_processor to' + f'{visual_encoder_path}') + image_processor = BUILDER.build(cfg.image_processor) + image_processor.save_pretrained(visual_encoder_path) + print(f'Saving visual_encoder to {visual_encoder_path}') + model.visual_encoder.save_pretrained( + visual_encoder_path, max_shard_size=args.max_shard_size) + + if hasattr(model, 'projector'): + projector_path = osp.join(args.save_dir, 'projector') + print(f'Saving projector to {projector_path}') + model.projector.save_pretrained( + projector_path, max_shard_size=args.max_shard_size) + else: + llm_path = args.save_dir + if 'PeftModel' in model.llm.__class__.__name__: + print(f'Saving adapter to {llm_path}') + else: + print(f'Saving LLM tokenizer to {llm_path}') + tokenizer = BUILDER.build(cfg.tokenizer) + tokenizer.save_pretrained(llm_path) + print(f'Saving LLM to {llm_path}') + model.llm.save_pretrained(llm_path, max_shard_size=args.max_shard_size) + + shutil.copyfile(args.config, osp.join(args.save_dir, 'xtuner_config.py')) print('All done!') diff --git a/xtuner/tools/model_converters/split.py b/xtuner/tools/model_converters/split.py index adf82c06e..da0e4d7b7 100644 --- a/xtuner/tools/model_converters/split.py +++ b/xtuner/tools/model_converters/split.py @@ -3,6 +3,7 @@ import copy import json import os +import os.path as osp import shutil import torch @@ -25,11 +26,11 @@ def main(): all_files = os.listdir(args.src_dir) for name in all_files: if not name.startswith(('pytorch_model', '.')): - src_path = os.path.join(args.src_dir, name) - dst_path = os.path.join(args.dst_dir, name) + src_path = osp.join(args.src_dir, name) + dst_path = osp.join(args.dst_dir, name) shutil.copy(src_path, dst_path) - with open(os.path.join(args.src_dir, 'pytorch_model.bin.index.json')) as f: + with open(osp.join(args.src_dir, 'pytorch_model.bin.index.json')) as f: index = json.load(f) n_shard = len(index['weight_map']) @@ -40,7 +41,7 @@ def main(): checkpoints = set(index['weight_map'].values()) for ckpt in checkpoints: state_dict = torch.load( - os.path.join(args.src_dir, ckpt), map_location='cuda') + osp.join(args.src_dir, ckpt), map_location='cuda') keys = sorted(list(state_dict.keys())) for k in keys: new_state_dict_name = 'pytorch_model-{:05d}-of-{:05d}.bin'.format( @@ -48,11 +49,11 @@ def main(): new_index['weight_map'][k] = new_state_dict_name new_state_dict = {k: state_dict[k]} torch.save(new_state_dict, - os.path.join(args.dst_dir, new_state_dict_name)) + osp.join(args.dst_dir, new_state_dict_name)) cnt += 1 del state_dict torch.cuda.empty_cache() - with open(os.path.join(args.dst_dir, 'pytorch_model.bin.index.json'), + with open(osp.join(args.dst_dir, 'pytorch_model.bin.index.json'), 'w') as f: json.dump(new_index, f) assert new_index['weight_map'].keys() == index['weight_map'].keys( diff --git a/xtuner/tools/test.py b/xtuner/tools/test.py index 41be08411..5eb3f6d9d 100644 --- a/xtuner/tools/test.py +++ b/xtuner/tools/test.py @@ -4,38 +4,15 @@ import os.path as osp from types import FunctionType -import torch from mmengine.config import Config, DictAction from mmengine.registry import RUNNERS from mmengine.runner import Runner from xtuner.configs import cfgs_name_path +from xtuner.model.utils import guess_load_checkpoint from xtuner.registry import MAP_FUNC -def guess_load_checkpoint(pth_model): - if os.path.isfile(pth_model): - state_dict = torch.load(pth_model, map_location='cpu') - if 'state_dict' in state_dict: - state_dict = state_dict['state_dict'] - elif os.path.isdir(pth_model): - try: - from deepspeed.utils.zero_to_fp32 import \ - get_fp32_state_dict_from_zero_checkpoint - except ImportError: - raise ImportError( - 'The provided PTH model appears to be a DeepSpeed checkpoint. ' - 'However, DeepSpeed library is not detected in current ' - 'environment. This suggests that DeepSpeed may not be ' - 'installed or is incorrectly configured. Please verify your ' - 'setup.') - state_dict = get_fp32_state_dict_from_zero_checkpoint( - os.path.dirname(pth_model), os.path.basename(pth_model)) - else: - raise FileNotFoundError(f'Cannot find {pth_model}') - return state_dict - - def parse_args(): parser = argparse.ArgumentParser(description='Test model') parser.add_argument('config', help='config file name or path.') @@ -84,7 +61,7 @@ def main(): args = parse_args() # parse config - if not os.path.isfile(args.config): + if not osp.isfile(args.config): try: args.config = cfgs_name_path[args.config] except KeyError: diff --git a/xtuner/tools/train.py b/xtuner/tools/train.py index 4f13d2484..dba400673 100644 --- a/xtuner/tools/train.py +++ b/xtuner/tools/train.py @@ -79,7 +79,7 @@ def main(): args = parse_args() # parse config - if not os.path.isfile(args.config): + if not osp.isfile(args.config): try: args.config = cfgs_name_path[args.config] except KeyError: @@ -117,7 +117,7 @@ def main(): osp.splitext(osp.basename(args.config))[0]) # enable deepspeed if args.deepspeed: - if not os.path.isfile(args.deepspeed): + if not osp.isfile(args.deepspeed): try: args.deepspeed = cfgs_name_path[args.deepspeed] except KeyError: @@ -191,7 +191,7 @@ def main(): logger='current', level=logging.WARNING) else: - if not os.path.isfile(args.deepspeed): + if not osp.isfile(args.deepspeed): try: args.deepspeed = cfgs_name_path[args.deepspeed] except KeyError: diff --git a/xtuner/tools/utils.py b/xtuner/tools/utils.py index 46e76a2b7..1d01ab32c 100644 --- a/xtuner/tools/utils.py +++ b/xtuner/tools/utils.py @@ -158,3 +158,9 @@ def auto_dtype_of_deepspeed_config(ds_config): ds_config['fp16']['enabled'] = False ds_config['bf16']['enabled'] = True return ds_config + + +def is_cn_string(s): + if re.search('[\u4e00-\u9fff]', s): + return True + return False diff --git a/xtuner/utils/__init__.py b/xtuner/utils/__init__.py index bb3e1528f..6bc9a1173 100644 --- a/xtuner/utils/__init__.py +++ b/xtuner/utils/__init__.py @@ -1,9 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .constants import DEFAULT_PAD_TOKEN_INDEX, IGNORE_INDEX +from .constants import (DEFAULT_IMAGE_TOKEN, DEFAULT_PAD_TOKEN_INDEX, + IGNORE_INDEX, IMAGE_TOKEN_INDEX) from .stop_criteria import StopWordStoppingCriteria from .templates import PROMPT_TEMPLATE, SYSTEM_TEMPLATE __all__ = [ 'IGNORE_INDEX', 'DEFAULT_PAD_TOKEN_INDEX', 'PROMPT_TEMPLATE', - 'SYSTEM_TEMPLATE', 'StopWordStoppingCriteria' + 'DEFAULT_IMAGE_TOKEN', 'SYSTEM_TEMPLATE', 'StopWordStoppingCriteria', + 'IMAGE_TOKEN_INDEX' ] diff --git a/xtuner/utils/constants.py b/xtuner/utils/constants.py index b05a12a44..2862c8ab5 100644 --- a/xtuner/utils/constants.py +++ b/xtuner/utils/constants.py @@ -1,3 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. IGNORE_INDEX = -100 DEFAULT_PAD_TOKEN_INDEX = 0 +IMAGE_TOKEN_INDEX = -200 +DEFAULT_IMAGE_TOKEN = '' diff --git a/xtuner/utils/templates.py b/xtuner/utils/templates.py index 8bbd854d2..7e85e0090 100644 --- a/xtuner/utils/templates.py +++ b/xtuner/utils/templates.py @@ -19,7 +19,7 @@ 'safe. Your answers should not include any harmful, unethical, ' 'racist, sexist, toxic, dangerous, or illegal content. Please ' 'ensure that your responses are socially unbiased and positive in ' - 'nature.\n{system}\n<>\n [/INST]'), + 'nature.\n{system}\n<>\n [/INST] '), INSTRUCTION='[INST] {input} [/INST]'), code_llama_chat=dict( SYSTEM='{system}\n', INSTRUCTION='[INST] {input} [/INST]'), @@ -43,14 +43,20 @@ SYSTEM=('A chat between a curious user and an artificial ' 'intelligence assistant. The assistant gives ' 'helpful, detailed, and polite answers to the ' - 'user\'s questions. {system}\n'), - INSTRUCTION=('USER: {input} ASSISTANT: ')), + 'user\'s questions. {system}\n '), + INSTRUCTION=('USER: {input} ASSISTANT:')), wizardcoder=dict( SYSTEM=( 'Below is an instruction that describes a task. ' 'Write a response that appropriately completes the request.\n\n' - '{system}\n'), + '{system}\n '), INSTRUCTION=('### Instruction:\n{input}\n\n### Response:')), + vicuna=dict( + SYSTEM=('A chat between a curious user and an artificial ' + 'intelligence assistant. The assistant gives ' + 'helpful, detailed, and polite answers to the ' + 'user\'s questions. {system}\n '), + INSTRUCTION=('USER: {input} ASSISTANT:')), deepseekcoder=dict( SYSTEM=('You are an AI programming assistant, utilizing ' 'the DeepSeek Coder model, developed by DeepSeek'