open_clip_config.json
| 1 | { |
| 2 | "model_cfg": { |
| 3 | "embed_dim": 512, |
| 4 | "vision_cfg": { |
| 5 | "image_size": 224, |
| 6 | "layers": 12, |
| 7 | "width": 768, |
| 8 | "patch_size": 32 |
| 9 | }, |
| 10 | "text_cfg": { |
| 11 | "context_length": 77, |
| 12 | "vocab_size": 49408, |
| 13 | "width": 512, |
| 14 | "heads": 8, |
| 15 | "layers": 12 |
| 16 | } |
| 17 | }, |
| 18 | "preprocess_cfg": { |
| 19 | "mean": [ |
| 20 | 0.48145466, |
| 21 | 0.4578275, |
| 22 | 0.40821073 |
| 23 | ], |
| 24 | "std": [ |
| 25 | 0.26862954, |
| 26 | 0.26130258, |
| 27 | 0.27577711 |
| 28 | ] |
| 29 | } |
| 30 | } |
| 31 | |