| """
|
| Convert Brat format annotations to JSONL format for NER training.
|
|
|
| Author: Amir Safari
|
| Date: 17.10.2025
|
|
|
| This script processes Brat annotation files (.ann and .txt) from train/dev/test
|
| directories and converts them into JSONL format suitable for NER model training.
|
| """
|
| import json
|
| import re
|
| from pathlib import Path
|
|
|
| print("Starting data conversion from Brat format to JSON Lines...")
|
|
|
|
|
| NER_TAGS = [
|
| "O", "B-Taxon", "I-Taxon", "B-Geographical_Location", "I-Geographical_Location",
|
| "B-Habitat", "I-Habitat", "B-Temporal_Expression", "I-Temporal_Expression",
|
| "B-Person", "I-Person",
|
| ]
|
|
|
|
|
| tag2id = {tag: i for i, tag in enumerate(NER_TAGS)}
|
|
|
|
|
| for split in ["train", "dev", "test"]:
|
| print(f"\nProcessing '{split}' split...")
|
| input_dir = Path(split)
|
| output_file = f"{split}.jsonl"
|
|
|
| if not input_dir.exists():
|
|
|
| print(f"Directory not found: {input_dir}. Skipping split.")
|
| continue
|
|
|
| with open(output_file, "w", encoding="utf-8") as outfile:
|
|
|
| ann_files = sorted(input_dir.glob("*.ann"))
|
| for ann_file in ann_files:
|
| txt_file = ann_file.with_suffix(".txt")
|
| if not txt_file.exists():
|
| continue
|
|
|
| with open(txt_file, "r", encoding="utf-8") as f:
|
|
|
| text = f.read()
|
|
|
| tokens_with_spans = [{"text": match.group(0), "start": match.start(), "end": match.end()} for match in
|
| re.finditer(r'\S+', text)]
|
| if not tokens_with_spans:
|
| continue
|
|
|
| tokens = [t["text"] for t in tokens_with_spans]
|
| ner_tags = ["O"] * len(tokens)
|
|
|
| with open(ann_file, "r", encoding="utf-8") as f:
|
| annotations = []
|
|
|
| for line in f:
|
| if not line.startswith("T"): continue
|
| parts = line.strip().split("\t")
|
| if len(parts) < 2: continue
|
| tag_info = parts[1]
|
| tag_parts = tag_info.split(" ")
|
| label = tag_parts[0].replace(" ", "_")
|
| spans_str = " ".join(tag_parts[1:])
|
| char_spans = []
|
|
|
| for span_part in spans_str.split(';'):
|
| try:
|
| start, end = map(int, span_part.split(' '))
|
| char_spans.append((start, end))
|
| except ValueError:
|
| continue
|
| if char_spans:
|
| annotations.append({"label": label, "spans": char_spans})
|
|
|
| for ann in annotations:
|
| is_first_token = True
|
| for start_char, end_char in ann["spans"]:
|
| for i, token in enumerate(tokens_with_spans):
|
| if token["start"] < end_char and token["end"] > start_char:
|
| ner_tags[i] = f"B-{ann['label']}" if is_first_token else f"I-{ann['label']}"
|
| is_first_token = False
|
|
|
|
|
| ner_tag_ids = [tag2id.get(tag, tag2id["O"]) for tag in ner_tags]
|
|
|
|
|
| json_line = json.dumps({
|
| "id": txt_file.stem,
|
| "tokens": tokens,
|
| "ner_tags": ner_tag_ids
|
| })
|
| outfile.write(json_line + "\n")
|
|
|
| print(f"Successfully created {output_file}")
|
|
|
| print("\nConversion complete! ✨") |