config.json
| 1 | { |
| 2 | "architectures": [ |
| 3 | "VoxtralRealtimeForConditionalGeneration" |
| 4 | ], |
| 5 | "audio_config": { |
| 6 | "activation_function": "gelu", |
| 7 | "attention_dropout": 0.0, |
| 8 | "head_dim": 64, |
| 9 | "hidden_act": "silu", |
| 10 | "hidden_size": 1280, |
| 11 | "initializer_range": 0.02, |
| 12 | "intermediate_size": 5120, |
| 13 | "max_position_embeddings": 1500, |
| 14 | "model_type": "voxtral_realtime_encoder", |
| 15 | "num_attention_heads": 32, |
| 16 | "num_hidden_layers": 32, |
| 17 | "num_key_value_heads": 32, |
| 18 | "num_mel_bins": 128, |
| 19 | "rms_norm_eps": 1e-05, |
| 20 | "rope_parameters": { |
| 21 | "rope_theta": 1000000.0, |
| 22 | "rope_type": "default" |
| 23 | }, |
| 24 | "sliding_window": 750, |
| 25 | "vocab_size": 131072 |
| 26 | }, |
| 27 | "audio_length_per_tok": 8, |
| 28 | "default_num_delay_tokens": 6, |
| 29 | "downsample_factor": 4, |
| 30 | "dtype": "bfloat16", |
| 31 | "hidden_size": 3072, |
| 32 | "model_type": "voxtral_realtime", |
| 33 | "projector_hidden_act": "gelu", |
| 34 | "text_config": { |
| 35 | "attention_dropout": 0.0, |
| 36 | "bos_token_id": 1, |
| 37 | "eos_token_id": 2, |
| 38 | "head_dim": 128, |
| 39 | "hidden_act": "silu", |
| 40 | "hidden_size": 3072, |
| 41 | "initializer_range": 0.02, |
| 42 | "intermediate_size": 9216, |
| 43 | "max_position_embeddings": 131072, |
| 44 | "model_type": "voxtral_realtime_text", |
| 45 | "num_attention_heads": 32, |
| 46 | "num_hidden_layers": 26, |
| 47 | "num_key_value_heads": 8, |
| 48 | "pad_token_id": null, |
| 49 | "rms_norm_eps": 1e-05, |
| 50 | "rope_parameters": { |
| 51 | "rope_theta": 1000000.0, |
| 52 | "rope_type": "default" |
| 53 | }, |
| 54 | "sliding_window": 8192, |
| 55 | "tie_word_embeddings": true, |
| 56 | "use_cache": true, |
| 57 | "vocab_size": 131072 |
| 58 | }, |
| 59 | "transformers_version": "5.2.0.dev0" |
| 60 | } |
| 61 | |