File size: 4,441 Bytes
d54df43
 
e53d8dc
 
 
 
d54df43
e53d8dc
 
 
d54df43
e53d8dc
d54df43
e53d8dc
fb77874
 
 
 
 
 
 
d54df43
 
 
 
 
 
e53d8dc
0761efe
d54df43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e53d8dc
d54df43
 
 
 
 
 
 
 
 
e53d8dc
 
d54df43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e53d8dc
 
d54df43
e53d8dc
d54df43
e53d8dc
 
 
 
 
 
 
 
 
 
 
 
 
47b430f
e53d8dc
 
 
 
 
 
47b430f
e53d8dc
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import importlib.metadata
import inspect
import re
import subprocess
import sys
from datetime import datetime
from importlib import reload
from pathlib import Path

import gradio as gr
import optimum.intel.utils.import_utils as import_utils
import pandas as pd
from packaging.requirements import Requirement

if Path("optimum-intel").is_dir():
    subprocess.run(["git", "pull"], cwd="optimum-intel")
else:
    subprocess.run(["git", "clone", "https://github.com/huggingface/optimum-intel.git"])
test_path = Path(__file__).parent / "optimum-intel" / "tests" / "openvino"
sys.path.append(str(test_path))

# import test files globally. Importing inside a function with import_module is extremely
# slow inside a gradio app
import test_decoder
import test_diffusion
import test_modeling
import test_seq2seq


def get_supported_models_for_version(version):
    """
    Get supported architectures for a particular transformers version. Uses mocking to set the
    transformers version to `version`.
    """
    import_utils._transformers_version = version

    # Patch _transformers version
    test_seq2seq._transformers_version = version
    test_modeling._transformers_version = version
    test_diffusion._transformers_version = version
    test_decoder._transformers_version = version

    # Re-import to refresh SUPPORTED_ARCHITECTURES
    seq2seq = reload(test_seq2seq)
    decoder = reload(test_decoder)
    modeling = reload(test_modeling)
    diffusion = reload(test_diffusion)

    # Get SUPPORTED_ARCHITECUTRES for all modules
    d = {}
    modules = [seq2seq, decoder, modeling, diffusion]
    for mod in modules:
        for name, obj in inspect.getmembers(mod):
            if inspect.isclass(obj):
                if re.match(r"(OVModelFor.*IntegrationTest)", name) or re.match(r"(OVPipelineFor.*Test)", name):
                    task = name.replace("IntegrationTest", "").replace("Test", "")
                    if "CustomTasks" not in task:
                        d[task] = obj.SUPPORTED_ARCHITECTURES
    return d  # sorted(set(all_archs))


def get_min_max_transformers():
    """
    Get minumum and maximum supported transformers version by currently installed optimum-intel
    """
    meta = importlib.metadata.metadata("optimum-intel")
    requires = meta.get_all("Requires-Dist") or []
    transformers_versions = [item for item in requires if "transformers" in item and "extra" not in item][0]
    req = Requirement(transformers_versions)
    maxver, minver = [ver.version for ver in list(req.specifier)]
    return (minver, maxver)


def generate_model_list():
    RESULT_FILE = "supported_models.md"
    minver, maxver = get_min_max_transformers()  # This returns a too large maxver but that is not a problem
    versions = [minver, "4.53.0", maxver]

    model_classes_with_models = {}
    for v in versions:
        model_classes_with_models_version = get_supported_models_for_version(v)
        for item in model_classes_with_models_version:
            model_classes_with_models.setdefault(item, set())
            model_classes_with_models[item].update(model_classes_with_models_version[item])

    with open(RESULT_FILE, "w") as f:
        optimum_intel_version = importlib.metadata.version("optimum-intel")
        f.write(f"Updated at {datetime.now().strftime('%d %B %Y')} using optimum-intel {optimum_intel_version}\n\n")
        summary = []
        all_archs = []
        for archs in model_classes_with_models.values():
            all_archs += archs
        for title, supported_models in model_classes_with_models.items():
            f.write(f"## {title}\n\n")
            for item in supported_models:
                f.write(f" - {item}\n")
            f.write("\n")
            summary.append((title, len(supported_models)))
        md_summary = pd.DataFrame.from_records(summary, columns=["task", "number of architectures"]).to_markdown()
        f.write("# Summary\n\n")
        f.write(md_summary)
        f.write("\n\n")
        num_total_archs = len(set(all_archs))
        f.write(f"Total unique architectures: {num_total_archs}\n\n")
        f.write(f"Total validated architecture/task combinations: {len(all_archs)}\n\n")

    return Path(RESULT_FILE).read_text(), RESULT_FILE


demo = gr.Interface(
    fn=generate_model_list,
    title="List of validated architectures for optimum[openvino]",
    inputs=[],
    outputs=[gr.Markdown(), gr.File()],
)
demo.launch(server_name="0.0.0.0")