config.json
| 1 | { |
| 2 | "activation_dropout": 0.0, |
| 3 | "activation_function": "relu", |
| 4 | "architectures": [ |
| 5 | "M2M100ForConditionalGeneration" |
| 6 | ], |
| 7 | "attention_dropout": 0.1, |
| 8 | "bos_token_id": 0, |
| 9 | "d_model": 1024, |
| 10 | "decoder_attention_heads": 16, |
| 11 | "decoder_ffn_dim": 4096, |
| 12 | "decoder_layerdrop": 0, |
| 13 | "decoder_layers": 12, |
| 14 | "decoder_start_token_id": 2, |
| 15 | "dropout": 0.1, |
| 16 | "encoder_attention_heads": 16, |
| 17 | "encoder_ffn_dim": 4096, |
| 18 | "encoder_layerdrop": 0, |
| 19 | "encoder_layers": 12, |
| 20 | "eos_token_id": 2, |
| 21 | "init_std": 0.02, |
| 22 | "is_encoder_decoder": true, |
| 23 | "max_position_embeddings": 1024, |
| 24 | "model_type": "m2m_100", |
| 25 | "num_hidden_layers": 12, |
| 26 | "pad_token_id": 1, |
| 27 | "scale_embedding": true, |
| 28 | "torch_dtype": "float32", |
| 29 | "transformers_version": "4.21.0.dev0", |
| 30 | "use_cache": true, |
| 31 | "vocab_size": 256206, |
| 32 | "tokenizer_class": "NllbTokenizer", |
| 33 | "max_length": 200 |
| 34 | } |
| 35 | |