|
|
import torch |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
def test_model(): |
|
|
print("Loading Lbai-1-preview model...") |
|
|
|
|
|
model_path = "." |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_path, |
|
|
torch_dtype=torch.float16, |
|
|
device_map="auto", |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
|
|
|
|
print("Model loaded successfully!\n") |
|
|
|
|
|
|
|
|
test_prompts = [ |
|
|
"Diagnosis MRI Image-processing model result: Mild demented, confidence (%76.5), risk (%9.4) - interpret this output." |
|
|
] |
|
|
|
|
|
for i, prompt in enumerate(test_prompts, 1): |
|
|
print(f"\n{'='*60}") |
|
|
print(f"TEST {i}/{len(test_prompts)}") |
|
|
print('='*60) |
|
|
print(f"INPUT PROMPT:\n{prompt}\n") |
|
|
print("Generating response...\n") |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
|
|
input_length = inputs['input_ids'].shape[1] |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model.generate( |
|
|
**inputs, |
|
|
max_new_tokens=150, |
|
|
temperature=0.7, |
|
|
top_p=0.9, |
|
|
do_sample=True, |
|
|
pad_token_id=tokenizer.eos_token_id |
|
|
) |
|
|
|
|
|
full_response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
generated_text = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True) |
|
|
|
|
|
print("-"*60) |
|
|
print("FULL OUTPUT (Input + Generated):") |
|
|
print("-"*60) |
|
|
print(full_response) |
|
|
|
|
|
print("\n" + "-"*60) |
|
|
print("GENERATED TEXT ONLY (Model's response):") |
|
|
print("-"*60) |
|
|
print(generated_text) |
|
|
print("="*60) |
|
|
|
|
|
print("\n\nAll tests completed successfully!") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
test_model() |
|
|
|