| { |
| "added_tokens_decoder": { |
| "0": { |
| "content": "[g_start]", |
| "lstrip": false, |
| "normalized": false, |
| "rstrip": false, |
| "single_word": false, |
| "special": true |
| }, |
| "1": { |
| "content": "[g_end]", |
| "lstrip": false, |
| "normalized": false, |
| "rstrip": false, |
| "single_word": false, |
| "special": true |
| }, |
| "2": { |
| "content": "[unknown]", |
| "lstrip": false, |
| "normalized": false, |
| "rstrip": false, |
| "single_word": false, |
| "special": true |
| }, |
| "3": { |
| "content": "[pad]", |
| "lstrip": false, |
| "normalized": false, |
| "rstrip": false, |
| "single_word": false, |
| "special": true |
| } |
| }, |
| "clean_up_tokenization_spaces": false, |
| "extra_special_tokens": {}, |
| "model_max_length": 1000000000000000019884624838656, |
| "tokenizer_class": "PreTrainedTokenizerFast" |
| } |
|
|