ZebangCheng's picture
qqqq
be07910
raw
history blame
643 Bytes
model:
arch: minigpt_v2
# vit encoder
image_size: 448
drop_path_rate: 0
use_grad_checkpoint: False
vit_precision: "fp16"
freeze_vit: True
# generation configs
prompt: ""
llama_model: "ZebangCheng/Emotion-LLaMA"
# llama_model: "/home/user/project/Emotion-LLaMA/checkpoints/Llama-2-7b-chat-hf"
lora_r: 64
lora_alpha: 16
preprocess:
vis_processor:
train:
name: "blip2_image_train"
image_size: 448
eval:
name: "blip2_image_eval"
image_size: 448
text_processor:
train:
name: "blip_caption"
eval:
name: "blip_caption"