universal_dependencies / tools /02_traverse_ud_repos.py
iiegn's picture
Feat: Use metadata-VER.json
8fe2f7a verified
#!/usr/bin/env -S uv run --script
#
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "pyyaml",
# "load-dotenv",
# ]
# ///
"""
Collect relevant metadata from local UD directories:
- extracting the '# Summary' from the beginning and machine readable
metadata from the end of the README.{md,txt} file
- using the UD directory name for collecting metadata from the
codes_and_flags.yaml file
- collecting {dev,train,test}.conllu files.
"""
import json
import os
import xml.etree.ElementTree as ET
import argparse
import logging
from collections import defaultdict
from pathlib import Path
import yaml
from dotenv import load_dotenv
load_dotenv()
UD_VER = os.getenv('UD_VER', "2.15")
# Parse command line arguments
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-o', '--override', action='store_true',
help='override output file if it already exists')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='increase verbosity level')
args = parser.parse_args()
# Set up logging
logging.basicConfig(
level = max(logging.DEBUG, logging.INFO - args.verbose * 10),
format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
def extract_metadata(file_path) -> {}:
"""
Collect relevant metadata from UD directories.
Args:
file_path (str): The path to the README.{md,txt} file.
Returns:
dict: The extracted metadata.
"""
metadata = {
"summary": None,
"license": None,
"genre": None,
"lemmas": None,
"upos": None,
"xpos": None,
"language": None, # German:
"flag": None, # flag: DE
"lcode": None, # lcode: de
"iso3": None, # iso3: deu
"family": None, # family: IE
"genus": None, # genus: Germanic
# "splits": None, # {"train": {"files": []},
# "dev": {"files": []},
# "test": {"files": []},
# "unknown":{"files": []}}
}
with open(file_path, 'r') as file:
lines = [line.strip() for line in file.readlines()]
summary_start = None
summary_end = None
for i, line in enumerate(lines):
if not summary_start and line.strip().startswith('# '):
summary_start = i + 1
elif summary_start and line.strip().startswith('# '):
summary_end = i - 1
break
if summary_start and summary_end:
# we have a summary
metadata["summary"] = (' '.join(lines[summary_start:summary_end])).strip()
# This is a (quite hackish) approach inspired by:
# https://github.com/UniversalDependencies/LICENSE/blob/master/generate_license_for_lindat.pl
for line in lines[summary_end:]:
if ":" in line:
key, val = line.split(":", 1)
if key.lower() in metadata.keys():
if key.lower() == "genre":
val = val.strip().split(" ")
else:
val = val.strip()
metadata[key.lower()] = val
return metadata
def traverse_directory(directory):
"""
Traverses the directory and its first-level subdirectories, finds the
specified files, and extracts the summary from the README.{md,txt} file.
Args:
directory (str): The path to the directory.
Returns:
dict: A dictionary containing the extracted summaries for each file.
"""
results = defaultdict(lambda: defaultdict(dict))
with open(os.path.join('etc', f"codes_and_flags-latest.yaml"), 'r') as file:
codes_and_flags = yaml.safe_load(file)
logging.debug(codes_and_flags)
for item in os.listdir(directory):
if item.startswith("."):
continue
if os.path.isdir(os.path.join(directory, item)):
dir_path = os.path.join(directory, item)
logging.debug(dir_path)
tag_fn = os.path.join(dir_path, f".tag-r{UD_VER}")
if not Path(tag_fn).exists():
logging.info(f"No tag file:{tag_fn}: Ignoring content.")
continue
results[item]["splits"] = {
"train": {"files": [], "num_bytes": 0},
"dev": {"files": [], "num_bytes": 0},
"test": {"files": [], "num_bytes": 0},
"unknown": {"files": [], "num_bytes": 0}
}
for file in os.listdir(dir_path):
if file.endswith(".conllu"):
file_path = os.path.join(dir_path, file)
github_path = os.path.join(item, f"r{UD_VER}", file)
logging.debug(file_path)
match file:
case x if "dev" in file:
subset = "dev"
case x if "test" in file:
subset = "test"
case x if "train" in file:
subset = "train"
case _:
subset = "unknown"
results[item]["splits"][subset]["files"].append(github_path)
sum_bytes = os.stat(file_path).st_size
results[item]["splits"][subset]["num_bytes"] += sum_bytes
elif file.startswith("README") and (file.endswith(
tuple(["md", "txt"]))):
results[item].update(
extract_metadata(os.path.join(dir_path, file)))
# print(results[item])
elif file == "stats.xml":
# Extract size values:
# <treebank>
# <size>
# <total><sentences>15589</sentences><tokens>287708</tokens><words>292756</words><fused>5048</fused></total>
# <train><sentences>13813</sentences><tokens>259167</tokens><words>263777</words><fused>4610</fused></train>
# <dev><sentences>799</sentences><tokens>12316</tokens><words>12480</words><fused>164</fused></dev>
# <test><sentences>977</sentences><tokens>16225</tokens><words>16499</words><fused>274</fused></test>
# </size>
tree = ET.parse(os.path.join(dir_path, file))
root = tree.getroot()
size_node = root.find('.//size')
if size_node is None:
continue
for child_node_name in ["train", "dev", "test"]:
child_node = size_node.find(child_node_name)
if child_node is None:
continue
for child_child_node_name in ["sentences", "tokens", "words"]:
value = child_node.find(child_child_node_name).text
# print(f"Item:{item} {child_node_name}-{child_child_node_name}: {value}")
if value and int(value) > 0:
results[item]["splits"][child_node_name][f"num_{child_child_node_name}"] = value
# print(f"key: {child_node_name}")
# print(f"value: {value}")
# 'Read' language name from the dirname (UD_Abaza-ATB/), extract
# the relevant metadata from 'codes_and_flags.yaml' and add it to
# the metadata.
# ```
# Abaza:
# flag: RU-ABAZA
# lcode: abq
# iso3: abq
# family: Northwest Caucasian
# ```
language = item[3:].rsplit("-", 1)[0].replace("_", " ")
results[item]["language"] = language
#print(f"item: {item}, language: {language}")
results[item].update(codes_and_flags[language])
# 'Read' the corpus name from the dirname (UD_Abaza-ATB/) and
# construct a short name for the data set:
# abq_atb
corpus_name = item[3:].rsplit("-", 1)[1].lower()
name = f"{results[item]["lcode"]}_{corpus_name}"
results[item]["name"] = name
# Delete empty
# 1. file subsets
for fileset_k,fileset_v in list(results[item]["splits"].items()):
if not fileset_v["files"]:
del results[item]["splits"][fileset_k]
# 2. sets without a summary, files or license
if not results[item]["summary"]:
del results[item]
print(f"ITEM DELETED - no summary: {item}")
if not any([value["files"] for value in
results[item]["splits"].values()]):
print(results[item]["splits"])
del results[item]
print(f"ITEM DELETED - no files : {item}")
if not results[item]["license"]:
del results[item]
print(f"ITEM DELETED - no license: {item}")
# Swap keys and key.value()["name"]; add old key as key.value()["dirname"]
for key in list(results.keys()):
name = results[key]["name"]
del results[key]["name"]
results[key]["dirname"] = key
results[name] = results.pop(key)
return results
if __name__ == '__main__':
directory = 'UD_repos'
results = traverse_directory(directory)
for name, metadata in results.items():
print(f"Directory: {metadata['dirname']}")
# print(metadata)
# print('---')
# Write the metadata to json
output_fn = f"metadata-{UD_VER}.json"
if args.override or not open(output_fn, 'r').read():
with open(output_fn, 'w') as fh:
json.dump(results, fh, ensure_ascii=False)
print(f"{output_fn} written")
else:
logging.info(f"Output {output_fn} already exists: Not overriding.")