text_encoder/config.json
| 1 | {
|
| 2 | "architectures": [
|
| 3 | "CLIPTextModel"
|
| 4 | ],
|
| 5 | "attention_dropout": 0.0,
|
| 6 | "bos_token_id": 0,
|
| 7 | "dropout": 0.0,
|
| 8 | "eos_token_id": 2,
|
| 9 | "hidden_act": "quick_gelu",
|
| 10 | "hidden_size": 768,
|
| 11 | "initializer_factor": 1.0,
|
| 12 | "initializer_range": 0.02,
|
| 13 | "intermediate_size": 3072,
|
| 14 | "layer_norm_eps": 1e-05,
|
| 15 | "max_position_embeddings": 77,
|
| 16 | "model_type": "clip_text_model",
|
| 17 | "num_attention_heads": 12,
|
| 18 | "num_hidden_layers": 12,
|
| 19 | "pad_token_id": 1,
|
| 20 | "projection_dim": 768,
|
| 21 | "torch_dtype": "float16",
|
| 22 | "transformers_version": "4.44.0",
|
| 23 | "vocab_size": 49408
|
| 24 | }
|
| 25 | |