Mirror from microsoft/trocr-small-handwritten
Browse files- .gitattributes +3 -11
- README.md +61 -0
- config.json +159 -0
- generation_config.json +9 -0
- preprocessor_config.json +19 -0
- pytorch_model.bin +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
.gitattributes
CHANGED
|
@@ -1,35 +1,27 @@
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 4 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
| 5 |
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 6 |
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 11 |
*.model filter=lfs diff=lfs merge=lfs -text
|
| 12 |
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
| 13 |
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 14 |
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 15 |
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 16 |
*.pb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
| 17 |
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 18 |
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 19 |
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 21 |
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 22 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 23 |
*.tgz filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 24 |
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- trocr
|
| 4 |
+
- image-to-text
|
| 5 |
+
widget:
|
| 6 |
+
- src: https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg
|
| 7 |
+
example_title: Note 1
|
| 8 |
+
- src: https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSoolxi9yWGAT5SLZShv8vVd0bz47UWRzQC19fDTeE8GmGv_Rn-PCF1pP1rrUx8kOjA4gg&usqp=CAU
|
| 9 |
+
example_title: Note 2
|
| 10 |
+
- src: https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRNYtTuSBpZPV_nkBYPMFwVVD9asZOPgHww4epu9EqWgDmXW--sE2o8og40ZfDGo87j5w&usqp=CAU
|
| 11 |
+
example_title: Note 3
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
# TrOCR (small-sized model, fine-tuned on IAM)
|
| 15 |
+
|
| 16 |
+
TrOCR model fine-tuned on the [IAM dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database). It was introduced in the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Li et al. and first released in [this repository](https://github.com/microsoft/unilm/tree/master/trocr).
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
## Model description
|
| 20 |
+
|
| 21 |
+
The TrOCR model is an encoder-decoder model, consisting of an image Transformer as encoder, and a text Transformer as decoder. The image encoder was initialized from the weights of DeiT, while the text decoder was initialized from the weights of UniLM.
|
| 22 |
+
|
| 23 |
+
Images are presented to the model as a sequence of fixed-size patches (resolution 16x16), which are linearly embedded. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Next, the Transformer text decoder autoregressively generates tokens.
|
| 24 |
+
|
| 25 |
+
## Intended uses & limitations
|
| 26 |
+
|
| 27 |
+
You can use the raw model for optical character recognition (OCR) on single text-line images. See the [model hub](https://huggingface.co/models?search=microsoft/trocr) to look for fine-tuned versions on a task that interests you.
|
| 28 |
+
|
| 29 |
+
### How to use
|
| 30 |
+
|
| 31 |
+
Here is how to use this model in PyTorch:
|
| 32 |
+
|
| 33 |
+
```python
|
| 34 |
+
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
|
| 35 |
+
from PIL import Image
|
| 36 |
+
import requests
|
| 37 |
+
|
| 38 |
+
# load image from the IAM database
|
| 39 |
+
url = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg'
|
| 40 |
+
image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
|
| 41 |
+
|
| 42 |
+
processor = TrOCRProcessor.from_pretrained('microsoft/trocr-small-handwritten')
|
| 43 |
+
model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-small-handwritten')
|
| 44 |
+
pixel_values = processor(images=image, return_tensors="pt").pixel_values
|
| 45 |
+
|
| 46 |
+
generated_ids = model.generate(pixel_values)
|
| 47 |
+
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### BibTeX entry and citation info
|
| 51 |
+
|
| 52 |
+
```bibtex
|
| 53 |
+
@misc{li2021trocr,
|
| 54 |
+
title={TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models},
|
| 55 |
+
author={Minghao Li and Tengchao Lv and Lei Cui and Yijuan Lu and Dinei Florencio and Cha Zhang and Zhoujun Li and Furu Wei},
|
| 56 |
+
year={2021},
|
| 57 |
+
eprint={2109.10282},
|
| 58 |
+
archivePrefix={arXiv},
|
| 59 |
+
primaryClass={cs.CL}
|
| 60 |
+
}
|
| 61 |
+
```
|
config.json
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"VisionEncoderDecoderModel"
|
| 4 |
+
],
|
| 5 |
+
"decoder": {
|
| 6 |
+
"_name_or_path": "",
|
| 7 |
+
"activation_dropout": 0.0,
|
| 8 |
+
"activation_function": "relu",
|
| 9 |
+
"add_cross_attention": true,
|
| 10 |
+
"architectures": null,
|
| 11 |
+
"attention_dropout": 0.0,
|
| 12 |
+
"bad_words_ids": null,
|
| 13 |
+
"bos_token_id": 0,
|
| 14 |
+
"chunk_size_feed_forward": 0,
|
| 15 |
+
"classifier_dropout": 0.0,
|
| 16 |
+
"cross_attention_hidden_size": 384,
|
| 17 |
+
"d_model": 256,
|
| 18 |
+
"decoder_attention_heads": 8,
|
| 19 |
+
"decoder_ffn_dim": 1024,
|
| 20 |
+
"decoder_layerdrop": 0.0,
|
| 21 |
+
"decoder_layers": 6,
|
| 22 |
+
"decoder_start_token_id": 2,
|
| 23 |
+
"diversity_penalty": 0.0,
|
| 24 |
+
"do_sample": false,
|
| 25 |
+
"dropout": 0.1,
|
| 26 |
+
"early_stopping": false,
|
| 27 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 28 |
+
"eos_token_id": 2,
|
| 29 |
+
"finetuning_task": null,
|
| 30 |
+
"forced_bos_token_id": null,
|
| 31 |
+
"forced_eos_token_id": null,
|
| 32 |
+
"id2label": {
|
| 33 |
+
"0": "LABEL_0",
|
| 34 |
+
"1": "LABEL_1"
|
| 35 |
+
},
|
| 36 |
+
"init_std": 0.02,
|
| 37 |
+
"is_decoder": true,
|
| 38 |
+
"is_encoder_decoder": false,
|
| 39 |
+
"label2id": {
|
| 40 |
+
"LABEL_0": 0,
|
| 41 |
+
"LABEL_1": 1
|
| 42 |
+
},
|
| 43 |
+
"layernorm_embedding": true,
|
| 44 |
+
"length_penalty": 1.0,
|
| 45 |
+
"max_length": 20,
|
| 46 |
+
"max_position_embeddings": 512,
|
| 47 |
+
"min_length": 0,
|
| 48 |
+
"model_type": "trocr",
|
| 49 |
+
"no_repeat_ngram_size": 0,
|
| 50 |
+
"num_beam_groups": 1,
|
| 51 |
+
"num_beams": 1,
|
| 52 |
+
"num_return_sequences": 1,
|
| 53 |
+
"output_attentions": false,
|
| 54 |
+
"output_hidden_states": false,
|
| 55 |
+
"output_scores": false,
|
| 56 |
+
"pad_token_id": 1,
|
| 57 |
+
"prefix": null,
|
| 58 |
+
"problem_type": null,
|
| 59 |
+
"pruned_heads": {},
|
| 60 |
+
"remove_invalid_values": false,
|
| 61 |
+
"repetition_penalty": 1.0,
|
| 62 |
+
"return_dict": true,
|
| 63 |
+
"return_dict_in_generate": false,
|
| 64 |
+
"scale_embedding": true,
|
| 65 |
+
"sep_token_id": null,
|
| 66 |
+
"task_specific_params": null,
|
| 67 |
+
"temperature": 1.0,
|
| 68 |
+
"tie_encoder_decoder": false,
|
| 69 |
+
"tie_word_embeddings": false,
|
| 70 |
+
"tokenizer_class": null,
|
| 71 |
+
"top_k": 50,
|
| 72 |
+
"top_p": 1.0,
|
| 73 |
+
"torch_dtype": null,
|
| 74 |
+
"torchscript": false,
|
| 75 |
+
"transformers_version": "4.14.1",
|
| 76 |
+
"use_bfloat16": false,
|
| 77 |
+
"use_cache": false,
|
| 78 |
+
"use_learned_position_embeddings": true,
|
| 79 |
+
"vocab_size": 64044
|
| 80 |
+
},
|
| 81 |
+
"encoder": {
|
| 82 |
+
"_name_or_path": "",
|
| 83 |
+
"add_cross_attention": false,
|
| 84 |
+
"architectures": null,
|
| 85 |
+
"attention_probs_dropout_prob": 0.0,
|
| 86 |
+
"bad_words_ids": null,
|
| 87 |
+
"bos_token_id": null,
|
| 88 |
+
"chunk_size_feed_forward": 0,
|
| 89 |
+
"cross_attention_hidden_size": null,
|
| 90 |
+
"decoder_start_token_id": null,
|
| 91 |
+
"diversity_penalty": 0.0,
|
| 92 |
+
"do_sample": false,
|
| 93 |
+
"early_stopping": false,
|
| 94 |
+
"encoder_no_repeat_ngram_size": 0,
|
| 95 |
+
"eos_token_id": null,
|
| 96 |
+
"finetuning_task": null,
|
| 97 |
+
"forced_bos_token_id": null,
|
| 98 |
+
"forced_eos_token_id": null,
|
| 99 |
+
"hidden_act": "gelu",
|
| 100 |
+
"hidden_dropout_prob": 0.0,
|
| 101 |
+
"hidden_size": 384,
|
| 102 |
+
"id2label": {
|
| 103 |
+
"0": "LABEL_0",
|
| 104 |
+
"1": "LABEL_1"
|
| 105 |
+
},
|
| 106 |
+
"image_size": 384,
|
| 107 |
+
"initializer_range": 0.02,
|
| 108 |
+
"intermediate_size": 1536,
|
| 109 |
+
"is_decoder": false,
|
| 110 |
+
"is_encoder_decoder": false,
|
| 111 |
+
"label2id": {
|
| 112 |
+
"LABEL_0": 0,
|
| 113 |
+
"LABEL_1": 1
|
| 114 |
+
},
|
| 115 |
+
"layer_norm_eps": 1e-12,
|
| 116 |
+
"length_penalty": 1.0,
|
| 117 |
+
"max_length": 20,
|
| 118 |
+
"min_length": 0,
|
| 119 |
+
"model_type": "deit",
|
| 120 |
+
"no_repeat_ngram_size": 0,
|
| 121 |
+
"num_attention_heads": 6,
|
| 122 |
+
"num_beam_groups": 1,
|
| 123 |
+
"num_beams": 1,
|
| 124 |
+
"num_channels": 3,
|
| 125 |
+
"num_hidden_layers": 12,
|
| 126 |
+
"num_return_sequences": 1,
|
| 127 |
+
"output_attentions": false,
|
| 128 |
+
"output_hidden_states": false,
|
| 129 |
+
"output_scores": false,
|
| 130 |
+
"pad_token_id": null,
|
| 131 |
+
"patch_size": 16,
|
| 132 |
+
"prefix": null,
|
| 133 |
+
"problem_type": null,
|
| 134 |
+
"pruned_heads": {},
|
| 135 |
+
"qkv_bias": true,
|
| 136 |
+
"remove_invalid_values": false,
|
| 137 |
+
"repetition_penalty": 1.0,
|
| 138 |
+
"return_dict": true,
|
| 139 |
+
"return_dict_in_generate": false,
|
| 140 |
+
"sep_token_id": null,
|
| 141 |
+
"task_specific_params": null,
|
| 142 |
+
"temperature": 1.0,
|
| 143 |
+
"tie_encoder_decoder": false,
|
| 144 |
+
"tie_word_embeddings": true,
|
| 145 |
+
"tokenizer_class": null,
|
| 146 |
+
"top_k": 50,
|
| 147 |
+
"top_p": 1.0,
|
| 148 |
+
"torch_dtype": null,
|
| 149 |
+
"torchscript": false,
|
| 150 |
+
"transformers_version": "4.14.1",
|
| 151 |
+
"use_bfloat16": false
|
| 152 |
+
},
|
| 153 |
+
"eos_token_id": 2,
|
| 154 |
+
"is_encoder_decoder": true,
|
| 155 |
+
"model_type": "vision-encoder-decoder",
|
| 156 |
+
"tie_word_embeddings": false,
|
| 157 |
+
"torch_dtype": "float32",
|
| 158 |
+
"transformers_version": null
|
| 159 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 0,
|
| 4 |
+
"decoder_start_token_id": 2,
|
| 5 |
+
"eos_token_id": 2,
|
| 6 |
+
"pad_token_id": 1,
|
| 7 |
+
"transformers_version": "4.27.0.dev0",
|
| 8 |
+
"use_cache": false
|
| 9 |
+
}
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": 224,
|
| 3 |
+
"do_center_crop": false,
|
| 4 |
+
"do_normalize": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"image_processor_type": "DeiTImageProcessor",
|
| 7 |
+
"image_mean": [
|
| 8 |
+
0.5,
|
| 9 |
+
0.5,
|
| 10 |
+
0.5
|
| 11 |
+
],
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.5,
|
| 14 |
+
0.5,
|
| 15 |
+
0.5
|
| 16 |
+
],
|
| 17 |
+
"resample": 3,
|
| 18 |
+
"size": 384
|
| 19 |
+
}
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b83102cbc1520dee1c3937ac334da83da18cf4683d46ebd1bd4e93ebe584dd7
|
| 3 |
+
size 245933041
|
sentencepiece.bpe.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f5e2fefcf793761a76a6bfb8ad35489f9c203b25557673284b6d032f41043f4
|
| 3 |
+
size 1356293
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sp_model_kwargs": {}, "tokenizer_class": "XLMRobertaTokenizer"}
|