tokenizer_config.json
1.3 KB · 41 lines · json Raw
1 {
2 "added_tokens_decoder": {
3 "0": {
4 "content": "<unk>",
5 "lstrip": false,
6 "normalized": false,
7 "rstrip": false,
8 "single_word": false,
9 "special": true
10 },
11 "1": {
12 "content": "<s>",
13 "lstrip": false,
14 "normalized": false,
15 "rstrip": false,
16 "single_word": false,
17 "special": true
18 },
19 "2": {
20 "content": "</s>",
21 "lstrip": false,
22 "normalized": false,
23 "rstrip": false,
24 "single_word": false,
25 "special": true
26 }
27 },
28 "bos_token": "<s>",
29 "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
30 "clean_up_tokenization_spaces": false,
31 "eos_token": "</s>",
32 "legacy": false,
33 "model_max_length": 2048,
34 "pad_token": "</s>",
35 "padding_side": "right",
36 "sp_model_kwargs": {},
37 "tokenizer_class": "LlamaTokenizer",
38 "unk_token": "<unk>",
39 "use_default_system_prompt": false
40 }
41