config.json
2.3 KB · 123 lines · json Raw
1 {
2 "_name_or_path": "torch",
3 "activation_dropout": 0.1,
4 "adapter_kernel_size": 3,
5 "adapter_stride": 2,
6 "add_adapter": false,
7 "apply_spec_augment": true,
8 "architectures": [
9 "Wav2Vec2ForSpeechClassification"
10 ],
11 "attention_dropout": 0.1,
12 "bos_token_id": 1,
13 "classifier_proj_size": 256,
14 "codevector_dim": 768,
15 "contrastive_logits_temperature": 0.1,
16 "conv_bias": true,
17 "conv_dim": [
18 512,
19 512,
20 512,
21 512,
22 512,
23 512,
24 512
25 ],
26 "conv_kernel": [
27 10,
28 3,
29 3,
30 3,
31 3,
32 2,
33 2
34 ],
35 "conv_stride": [
36 5,
37 2,
38 2,
39 2,
40 2,
41 2,
42 2
43 ],
44 "ctc_loss_reduction": "sum",
45 "ctc_zero_infinity": false,
46 "diversity_loss_weight": 0.1,
47 "do_stable_layer_norm": true,
48 "eos_token_id": 2,
49 "feat_extract_activation": "gelu",
50 "feat_extract_dropout": 0.0,
51 "feat_extract_norm": "layer",
52 "feat_proj_dropout": 0.1,
53 "feat_quantizer_dropout": 0.0,
54 "final_dropout": 0.1,
55 "finetuning_task": "wav2vec2_reg",
56 "gradient_checkpointing": false,
57 "hidden_act": "gelu",
58 "hidden_dropout": 0.1,
59 "hidden_dropout_prob": 0.1,
60 "hidden_size": 1024,
61 "id2label": {
62 "0": "arousal",
63 "1": "dominance",
64 "2": "valence"
65 },
66 "initializer_range": 0.02,
67 "intermediate_size": 4096,
68 "label2id": {
69 "arousal": 0,
70 "dominance": 1,
71 "valence": 2
72 },
73 "layer_norm_eps": 1e-05,
74 "layerdrop": 0.1,
75 "mask_feature_length": 10,
76 "mask_feature_min_masks": 0,
77 "mask_feature_prob": 0.0,
78 "mask_time_length": 10,
79 "mask_time_min_masks": 2,
80 "mask_time_prob": 0.05,
81 "model_type": "wav2vec2",
82 "num_adapter_layers": 3,
83 "num_attention_heads": 16,
84 "num_codevector_groups": 2,
85 "num_codevectors_per_group": 320,
86 "num_conv_pos_embedding_groups": 16,
87 "num_conv_pos_embeddings": 128,
88 "num_feat_extract_layers": 7,
89 "num_hidden_layers": 12,
90 "num_negatives": 100,
91 "output_hidden_size": 1024,
92 "pad_token_id": 0,
93 "pooling_mode": "mean",
94 "problem_type": "regression",
95 "proj_codevector_dim": 768,
96 "tdnn_dilation": [
97 1,
98 2,
99 3,
100 1,
101 1
102 ],
103 "tdnn_dim": [
104 512,
105 512,
106 512,
107 512,
108 1500
109 ],
110 "tdnn_kernel": [
111 5,
112 3,
113 3,
114 1,
115 1
116 ],
117 "torch_dtype": "float32",
118 "transformers_version": "4.17.0.dev0",
119 "use_weighted_layer_sum": false,
120 "vocab_size": null,
121 "xvector_output_dim": 512
122 }
123