|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Collect relevant metadata from local UD directories: |
|
|
- extracting the '# Summary' from the beginning and machine readable |
|
|
metadata from the end of the README.{md,txt} file |
|
|
- using the UD directory name for collecting metadata from the |
|
|
codes_and_flags.yaml file |
|
|
- collecting {dev,train,test}.conllu files. |
|
|
""" |
|
|
|
|
|
import json |
|
|
import os |
|
|
import xml.etree.ElementTree as ET |
|
|
import argparse |
|
|
import logging |
|
|
from collections import defaultdict |
|
|
from pathlib import Path |
|
|
|
|
|
import yaml |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
UD_VER = os.getenv('UD_VER', "2.15") |
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser(description=__doc__, |
|
|
formatter_class=argparse.RawDescriptionHelpFormatter) |
|
|
parser.add_argument('-o', '--override', action='store_true', |
|
|
help='override output file if it already exists') |
|
|
parser.add_argument('-v', '--verbose', action='count', default=0, |
|
|
help='increase verbosity level') |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
level = max(logging.DEBUG, logging.INFO - args.verbose * 10), |
|
|
format='%(asctime)s [%(levelname)s] %(message)s', |
|
|
datefmt='%Y-%m-%d %H:%M:%S' |
|
|
) |
|
|
|
|
|
def extract_metadata(file_path) -> {}: |
|
|
""" |
|
|
Collect relevant metadata from UD directories. |
|
|
|
|
|
Args: |
|
|
file_path (str): The path to the README.{md,txt} file. |
|
|
|
|
|
Returns: |
|
|
dict: The extracted metadata. |
|
|
""" |
|
|
metadata = { |
|
|
"summary": None, |
|
|
"license": None, |
|
|
"genre": None, |
|
|
"lemmas": None, |
|
|
"upos": None, |
|
|
"xpos": None, |
|
|
"language": None, |
|
|
"flag": None, |
|
|
"lcode": None, |
|
|
"iso3": None, |
|
|
"family": None, |
|
|
"genus": None, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
with open(file_path, 'r') as file: |
|
|
lines = [line.strip() for line in file.readlines()] |
|
|
summary_start = None |
|
|
summary_end = None |
|
|
for i, line in enumerate(lines): |
|
|
if not summary_start and line.strip().startswith('# '): |
|
|
summary_start = i + 1 |
|
|
elif summary_start and line.strip().startswith('# '): |
|
|
summary_end = i - 1 |
|
|
break |
|
|
|
|
|
if summary_start and summary_end: |
|
|
|
|
|
metadata["summary"] = (' '.join(lines[summary_start:summary_end])).strip() |
|
|
|
|
|
|
|
|
|
|
|
for line in lines[summary_end:]: |
|
|
if ":" in line: |
|
|
key, val = line.split(":", 1) |
|
|
if key.lower() in metadata.keys(): |
|
|
if key.lower() == "genre": |
|
|
val = val.strip().split(" ") |
|
|
else: |
|
|
val = val.strip() |
|
|
metadata[key.lower()] = val |
|
|
return metadata |
|
|
|
|
|
|
|
|
def traverse_directory(directory): |
|
|
""" |
|
|
Traverses the directory and its first-level subdirectories, finds the |
|
|
specified files, and extracts the summary from the README.{md,txt} file. |
|
|
|
|
|
Args: |
|
|
directory (str): The path to the directory. |
|
|
|
|
|
Returns: |
|
|
dict: A dictionary containing the extracted summaries for each file. |
|
|
""" |
|
|
results = defaultdict(lambda: defaultdict(dict)) |
|
|
|
|
|
with open(os.path.join('etc', f"codes_and_flags-latest.yaml"), 'r') as file: |
|
|
codes_and_flags = yaml.safe_load(file) |
|
|
logging.debug(codes_and_flags) |
|
|
|
|
|
for item in os.listdir(directory): |
|
|
if item.startswith("."): |
|
|
continue |
|
|
|
|
|
if os.path.isdir(os.path.join(directory, item)): |
|
|
dir_path = os.path.join(directory, item) |
|
|
logging.debug(dir_path) |
|
|
|
|
|
tag_fn = os.path.join(dir_path, f".tag-r{UD_VER}") |
|
|
if not Path(tag_fn).exists(): |
|
|
logging.info(f"No tag file:{tag_fn}: Ignoring content.") |
|
|
continue |
|
|
|
|
|
results[item]["splits"] = { |
|
|
"train": {"files": [], "num_bytes": 0}, |
|
|
"dev": {"files": [], "num_bytes": 0}, |
|
|
"test": {"files": [], "num_bytes": 0}, |
|
|
"unknown": {"files": [], "num_bytes": 0} |
|
|
} |
|
|
for file in os.listdir(dir_path): |
|
|
if file.endswith(".conllu"): |
|
|
file_path = os.path.join(dir_path, file) |
|
|
github_path = os.path.join(item, f"r{UD_VER}", file) |
|
|
logging.debug(file_path) |
|
|
match file: |
|
|
case x if "dev" in file: |
|
|
subset = "dev" |
|
|
case x if "test" in file: |
|
|
subset = "test" |
|
|
case x if "train" in file: |
|
|
subset = "train" |
|
|
case _: |
|
|
subset = "unknown" |
|
|
results[item]["splits"][subset]["files"].append(github_path) |
|
|
|
|
|
sum_bytes = os.stat(file_path).st_size |
|
|
results[item]["splits"][subset]["num_bytes"] += sum_bytes |
|
|
|
|
|
elif file.startswith("README") and (file.endswith( |
|
|
tuple(["md", "txt"]))): |
|
|
results[item].update( |
|
|
extract_metadata(os.path.join(dir_path, file))) |
|
|
|
|
|
|
|
|
elif file == "stats.xml": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tree = ET.parse(os.path.join(dir_path, file)) |
|
|
root = tree.getroot() |
|
|
size_node = root.find('.//size') |
|
|
if size_node is None: |
|
|
continue |
|
|
|
|
|
for child_node_name in ["train", "dev", "test"]: |
|
|
|
|
|
child_node = size_node.find(child_node_name) |
|
|
if child_node is None: |
|
|
continue |
|
|
|
|
|
for child_child_node_name in ["sentences", "tokens", "words"]: |
|
|
value = child_node.find(child_child_node_name).text |
|
|
|
|
|
|
|
|
if value and int(value) > 0: |
|
|
results[item]["splits"][child_node_name][f"num_{child_child_node_name}"] = value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
language = item[3:].rsplit("-", 1)[0].replace("_", " ") |
|
|
results[item]["language"] = language |
|
|
|
|
|
results[item].update(codes_and_flags[language]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
corpus_name = item[3:].rsplit("-", 1)[1].lower() |
|
|
name = f"{results[item]["lcode"]}_{corpus_name}" |
|
|
results[item]["name"] = name |
|
|
|
|
|
|
|
|
|
|
|
for fileset_k,fileset_v in list(results[item]["splits"].items()): |
|
|
if not fileset_v["files"]: |
|
|
del results[item]["splits"][fileset_k] |
|
|
|
|
|
|
|
|
if not results[item]["summary"]: |
|
|
del results[item] |
|
|
print(f"ITEM DELETED - no summary: {item}") |
|
|
|
|
|
if not any([value["files"] for value in |
|
|
results[item]["splits"].values()]): |
|
|
print(results[item]["splits"]) |
|
|
del results[item] |
|
|
print(f"ITEM DELETED - no files : {item}") |
|
|
|
|
|
if not results[item]["license"]: |
|
|
del results[item] |
|
|
print(f"ITEM DELETED - no license: {item}") |
|
|
|
|
|
|
|
|
for key in list(results.keys()): |
|
|
name = results[key]["name"] |
|
|
del results[key]["name"] |
|
|
results[key]["dirname"] = key |
|
|
results[name] = results.pop(key) |
|
|
|
|
|
return results |
|
|
|
|
|
if __name__ == '__main__': |
|
|
directory = 'UD_repos' |
|
|
results = traverse_directory(directory) |
|
|
for name, metadata in results.items(): |
|
|
print(f"Directory: {metadata['dirname']}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
output_fn = f"metadata-{UD_VER}.json" |
|
|
if args.override or not open(output_fn, 'r').read(): |
|
|
with open(output_fn, 'w') as fh: |
|
|
json.dump(results, fh, ensure_ascii=False) |
|
|
print(f"{output_fn} written") |
|
|
else: |
|
|
logging.info(f"Output {output_fn} already exists: Not overriding.") |
|
|
|