config.json
967 B · 47 lines · json Raw
1 {
2 "_name_or_path": "google/vit-base-patch16-224-in21k",
3 "architectures": [
4 "ViTForImageClassification"
5 ],
6 "attention_probs_dropout_prob": 0.0,
7 "encoder_stride": 16,
8 "hidden_act": "gelu",
9 "hidden_dropout_prob": 0.0,
10 "hidden_size": 768,
11 "id2label": {
12 "0": "0-2",
13 "1": "3-9",
14 "2": "10-19",
15 "3": "20-29",
16 "4": "30-39",
17 "5": "40-49",
18 "6": "50-59",
19 "7": "60-69",
20 "8": "more than 70"
21 },
22 "image_size": 224,
23 "initializer_range": 0.02,
24 "intermediate_size": 3072,
25 "label2id": {
26 "0-2": 0,
27 "10-19": 2,
28 "20-29": 3,
29 "3-9": 1,
30 "30-39": 4,
31 "40-49": 5,
32 "50-59": 6,
33 "60-69": 7,
34 "more than 70": 8
35 },
36 "layer_norm_eps": 1e-12,
37 "model_type": "vit",
38 "num_attention_heads": 12,
39 "num_channels": 3,
40 "num_hidden_layers": 12,
41 "patch_size": 16,
42 "problem_type": "single_label_classification",
43 "qkv_bias": true,
44 "torch_dtype": "float32",
45 "transformers_version": "4.47.0"
46 }
47