configuration_prismatic.py
5.7 KB · 141 lines · python Raw
1 """
2 configuration_prismatic.py
3
4 HuggingFace-style configuration definition for Prismatic VLMs, inheriting from `transformers.PretrainedConfig`.
5 Default configuration specifies `siglip-224px+7b`.
6 """
7
8 from typing import Any, Dict, List, Optional
9
10 from transformers import PretrainedConfig
11 from transformers.models.auto import CONFIG_MAPPING
12
13 # === Utilities for Mapping Prismatic names to HF names ===
14 # fmt: off
15 VISION_BACKBONE_TO_RESOLUTION: Dict[str, List[int]] = {
16 "clip-vit-l": [224], "siglip-vit-so400m": [224], "dinov2-vit-l": [224], "in1k-vit-l": [224],
17
18 "clip-vit-l-336px": [336],
19 "siglip-vit-so400m-384px": [384],
20
21 "dinoclip-vit-l-336px": [336, 336],
22 "dinosiglip-vit-so-224px": [224, 224],
23 "dinosiglip-vit-so-384px": [384, 384],
24 }
25 VISION_BACKBONE_TO_TIMM_ID: Dict[str, List[str]] = {
26 "clip-vit-l": ["vit_large_patch14_clip_224.openai"],
27 "clip-vit-l-336px": ["vit_large_patch14_clip_336.openai"],
28
29 "dinov2-vit-l": ["vit_large_patch14_reg4_dinov2.lvd142m"],
30 "in1k-vit-l": ["vit_large_patch16_224.augreg_in21k_ft_in1k"],
31
32 "siglip-vit-so400m": ["vit_so400m_patch14_siglip_224"],
33 "siglip-vit-so400m-384px": ["vit_so400m_patch14_siglip_384"],
34
35 "dinoclip-vit-l-336px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_large_patch14_clip_336.openai"],
36 "dinosiglip-vit-so-224px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_224"],
37 "dinosiglip-vit-so-384px": ["vit_large_patch14_reg4_dinov2.lvd142m", "vit_so400m_patch14_siglip_384"],
38 }
39 TIMM_OVERRIDE_ACT_LAYER: Dict[str, List[Optional[str]]] = {
40 "clip-vit-l": ["quick_gelu"], "clip-vit-l-336px": ["quick_gelu"],
41 "dinov2-vit-l": [None], "in1k-vit-l": [None],
42 "siglip-vit-so400m": [None], "siglip-vit-so400m-384px": [None],
43 "dinoclip-vit-l-336px": [None, "quick_gelu"],
44 "dinosiglip-vit-so-224px": [None, None], "dinosiglip-vit-so-384px": [None, None]
45 }
46
47 LLM_BACKBONE_TO_HF_PATH = {
48 "llama2-7b-pure": "meta-llama/Llama-2-7b-hf", "llama2-13b-pure": "meta-llama/Llama-2-13b-hf",
49 "llama2-7b-chat": "meta-llama/Llama-2-7b-chat-hf", "llama2-13b-chat": "meta-llama/Llama-2-13b-chat-hf",
50
51 "vicuna-v15-7b": "lmsys/vicuna-7b-v1.5", "vicuna-v15-13b": "lmsys/vicuna-13b-v1.5",
52
53 "mistral-v0.1-7b-pure": "mistralai/Mistral-7B-v0.1",
54 "mistral-v0.1-7b-instruct": "mistralai/Mistral-7B-Instruct-v0.1",
55
56 "phi-2-3b": "microsoft/phi-2",
57 }
58 LLM_BACKBONE_TO_HF_METACLASS = {
59 "llama2-7b-pure": "llama", "llama2-13b-pure": "llama", "llama2-7b-chat": "llama", "llama2-13b-chat": "llama",
60 "vicuna-v15-7b": "llama", "vicuna-v15-13b": "llama",
61
62 "mistral-v0.1-7b-pure": "mistral", "mistral-v0.1-7b-instruct": "mistral",
63
64 "phi-2-3b": "phi",
65 }
66
67 VALID_VISION_BACKBONES = set(VISION_BACKBONE_TO_RESOLUTION.keys())
68 VALID_LLM_BACKBONES = set(LLM_BACKBONE_TO_HF_PATH)
69 # fmt: on
70
71
72 class PrismaticConfig(PretrainedConfig):
73 model_type: str = "prismatic"
74 is_composition: bool = False
75
76 def __init__(
77 self,
78 vision_backbone_id: str = "siglip-vit-so400m",
79 llm_backbone_id: str = "vicuna-v15-7b",
80 arch_specifier: str = "no-align+gelu-mlp",
81 use_fused_vision_backbone: Optional[bool] = None,
82 image_resize_strategy: str = "letterbox",
83 text_config: Optional[Dict[str, Any]] = None,
84 llm_max_length: int = 2048,
85 pad_token_id: int = 32000,
86 pad_to_multiple_of: int = 64,
87 output_projector_states: bool = False,
88 **kwargs: str,
89 ) -> None:
90 if vision_backbone_id not in VALID_VISION_BACKBONES:
91 raise ValueError(f"Vision backbone `{vision_backbone_id}` not in {VALID_VISION_BACKBONES = }")
92
93 if llm_backbone_id not in VALID_LLM_BACKBONES:
94 raise ValueError(f"LLM backbone `{llm_backbone_id}` not in {VALID_LLM_BACKBONES = }")
95
96 # Set Prismatic Configuration Fields
97 self.vision_backbone_id = vision_backbone_id
98 self.llm_backbone_id = llm_backbone_id
99 self.arch_specifier = arch_specifier
100 self.output_projector_states = output_projector_states
101
102 # [Contract] All vision backbone parameters are lists =>> supports fused backbones with different preprocessing
103 self.use_fused_vision_backbone = (
104 use_fused_vision_backbone
105 if use_fused_vision_backbone is not None
106 else any(self.vision_backbone_id.startswith(v) for v in ["dinoclip", "dinosiglip"])
107 )
108
109 self.timm_model_ids = VISION_BACKBONE_TO_TIMM_ID[self.vision_backbone_id]
110 self.timm_override_act_layers = TIMM_OVERRIDE_ACT_LAYER[self.vision_backbone_id]
111 self.image_sizes = VISION_BACKBONE_TO_RESOLUTION[self.vision_backbone_id]
112 self.image_resize_strategy = image_resize_strategy
113
114 self.hf_llm_id = LLM_BACKBONE_TO_HF_PATH[self.llm_backbone_id]
115 self.llm_max_length = llm_max_length
116 self.pad_token_id, self.pad_to_multiple_of = pad_token_id, pad_to_multiple_of
117
118 # [IMPORTANT] HF Utilities actually look for a `text_config` field... we need to use that specific naming!
119 self.text_config = (
120 CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]](**text_config)
121 if text_config is not None
122 else CONFIG_MAPPING[LLM_BACKBONE_TO_HF_METACLASS[self.llm_backbone_id]]()
123 )
124
125 # Dispatch **kwargs to super() =>> note that `pad_token_id` collides, so we pass it in here as well...
126 super().__init__(pad_token_id=pad_token_id, **kwargs)
127
128
129 class OpenVLAConfig(PrismaticConfig):
130 model_type: str = "openvla"
131
132 def __init__(
133 self,
134 norm_stats: Optional[Dict[str, Dict[str, Dict[str, Dict[str, List[float]]]]]] = None,
135 n_action_bins: int = 256,
136 **kwargs: str,
137 ) -> None:
138 self.norm_stats, self.n_action_bins = norm_stats, n_action_bins
139
140 super().__init__(**kwargs)
141