remove duplicates
Browse files- TextCaps.py +5 -11
TextCaps.py
CHANGED
|
@@ -105,11 +105,8 @@ class TextCapsDataset(datasets.GeneratorBasedBuilder):
|
|
| 105 |
"set_name": datasets.Value("string"),
|
| 106 |
"image_name": datasets.Value("string"),
|
| 107 |
"image_path": datasets.Value("string"),
|
| 108 |
-
"caption_id": datasets.Value("int32"),
|
| 109 |
-
"caption_str": datasets.Value("string"),
|
| 110 |
-
"caption_tokens": [datasets.Value("string")],
|
| 111 |
"reference_strs": [datasets.Value("string")],
|
| 112 |
-
"reference_tokens": [datasets.Value("string")],
|
| 113 |
}
|
| 114 |
)
|
| 115 |
return datasets.DatasetInfo(
|
|
@@ -157,7 +154,7 @@ class TextCapsDataset(datasets.GeneratorBasedBuilder):
|
|
| 157 |
def _generate_examples(
|
| 158 |
self, captions_path, ocr_tokens_path, images_path, split_name
|
| 159 |
):
|
| 160 |
-
|
| 161 |
captions = json.load(open(captions_path, "r"))["data"]
|
| 162 |
ocr_tokens = json.load(open(ocr_tokens_path, "r"))["data"]
|
| 163 |
|
|
@@ -166,6 +163,9 @@ class TextCapsDataset(datasets.GeneratorBasedBuilder):
|
|
| 166 |
ocr_tokens_per_image_id[ocr_item["image_id"]] = ocr_item
|
| 167 |
|
| 168 |
for caption_item in captions:
|
|
|
|
|
|
|
|
|
|
| 169 |
ocr_item = ocr_tokens_per_image_id[caption_item["image_id"]]
|
| 170 |
record = {
|
| 171 |
"ocr_tokens": ocr_item["ocr_tokens"],
|
|
@@ -182,15 +182,9 @@ class TextCapsDataset(datasets.GeneratorBasedBuilder):
|
|
| 182 |
"image" : str(images_path / f'{caption_item["image_name"]}.jpg')
|
| 183 |
}
|
| 184 |
if not split_name == "test":
|
| 185 |
-
record["caption_id"] = caption_item["caption_id"]
|
| 186 |
-
record["caption_str"] = caption_item["caption_str"]
|
| 187 |
-
record["caption_tokens"] = caption_item["caption_tokens"]
|
| 188 |
record["reference_strs"] = caption_item["reference_strs"]
|
| 189 |
record["reference_tokens"] = caption_item["reference_tokens"]
|
| 190 |
else:
|
| 191 |
-
record["caption_id"] = None
|
| 192 |
-
record["caption_str"] = None
|
| 193 |
-
record["caption_tokens"] = None
|
| 194 |
record["reference_strs"] = None
|
| 195 |
record["reference_tokens"] = None
|
| 196 |
yield caption_item["image_id"], record
|
|
|
|
| 105 |
"set_name": datasets.Value("string"),
|
| 106 |
"image_name": datasets.Value("string"),
|
| 107 |
"image_path": datasets.Value("string"),
|
|
|
|
|
|
|
|
|
|
| 108 |
"reference_strs": [datasets.Value("string")],
|
| 109 |
+
"reference_tokens": [[datasets.Value("string")]],
|
| 110 |
}
|
| 111 |
)
|
| 112 |
return datasets.DatasetInfo(
|
|
|
|
| 154 |
def _generate_examples(
|
| 155 |
self, captions_path, ocr_tokens_path, images_path, split_name
|
| 156 |
):
|
| 157 |
+
seen_image_ids = set()
|
| 158 |
captions = json.load(open(captions_path, "r"))["data"]
|
| 159 |
ocr_tokens = json.load(open(ocr_tokens_path, "r"))["data"]
|
| 160 |
|
|
|
|
| 163 |
ocr_tokens_per_image_id[ocr_item["image_id"]] = ocr_item
|
| 164 |
|
| 165 |
for caption_item in captions:
|
| 166 |
+
if caption_item["image_id"] in seen_image_ids:
|
| 167 |
+
continue
|
| 168 |
+
seen_image_ids.add(caption_item["image_id"])
|
| 169 |
ocr_item = ocr_tokens_per_image_id[caption_item["image_id"]]
|
| 170 |
record = {
|
| 171 |
"ocr_tokens": ocr_item["ocr_tokens"],
|
|
|
|
| 182 |
"image" : str(images_path / f'{caption_item["image_name"]}.jpg')
|
| 183 |
}
|
| 184 |
if not split_name == "test":
|
|
|
|
|
|
|
|
|
|
| 185 |
record["reference_strs"] = caption_item["reference_strs"]
|
| 186 |
record["reference_tokens"] = caption_item["reference_tokens"]
|
| 187 |
else:
|
|
|
|
|
|
|
|
|
|
| 188 |
record["reference_strs"] = None
|
| 189 |
record["reference_tokens"] = None
|
| 190 |
yield caption_item["image_id"], record
|