config.json
1.2 KB · 51 lines · json Raw
1 {
2 "_name_or_path": "/home/notebook/data/group/wth/QualityReasoning/Models/Qwen2.5-VL-7B-Instruct",
3 "architectures": [
4 "Qwen2_5_VLForConditionalGeneration"
5 ],
6 "attention_dropout": 0.0,
7 "bos_token_id": 151643,
8 "eos_token_id": 151645,
9 "hidden_act": "silu",
10 "hidden_size": 3584,
11 "image_token_id": 151655,
12 "initializer_range": 0.02,
13 "intermediate_size": 18944,
14 "max_position_embeddings": 128000,
15 "max_window_layers": 28,
16 "model_type": "qwen2_5_vl",
17 "num_attention_heads": 28,
18 "num_hidden_layers": 28,
19 "num_key_value_heads": 4,
20 "rms_norm_eps": 1e-06,
21 "rope_scaling": {
22 "mrope_section": [
23 16,
24 24,
25 24
26 ],
27 "rope_type": "default",
28 "type": "default"
29 },
30 "rope_theta": 1000000.0,
31 "sliding_window": 32768,
32 "tie_word_embeddings": false,
33 "torch_dtype": "bfloat16",
34 "transformers_version": "4.49.0",
35 "use_cache": false,
36 "use_sliding_window": false,
37 "video_token_id": 151656,
38 "vision_config": {
39 "hidden_size": 1280,
40 "in_chans": 3,
41 "model_type": "qwen2_5_vl",
42 "spatial_patch_size": 14,
43 "tokens_per_second": 2,
44 "torch_dtype": "bfloat16"
45 },
46 "vision_end_token_id": 151653,
47 "vision_start_token_id": 151652,
48 "vision_token_id": 151654,
49 "vocab_size": 152064
50 }
51