Initial upload of TinyLLM
Browse files- .gitignore +2 -0
- README.md +133 -0
- config.json +11 -0
- custom_run.py +154 -0
- pytorch_model.bin +3 -0
- src/__init__.py +0 -0
- src/dataset.py +48 -0
- src/model.py +170 -0
- src/tokenizer.py +81 -0
- train.py +100 -0
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Content of .gitignore
|
| 2 |
+
upload_repo.py
|
README.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
language: en
|
| 4 |
+
tags:
|
| 5 |
+
- llm
|
| 6 |
+
- pytorch
|
| 7 |
+
- custom-model
|
| 8 |
+
- causal-lm
|
| 9 |
+
- character-level
|
| 10 |
+
- math
|
| 11 |
+
- tiny-model
|
| 12 |
+
model_type: tiny-causal-llm
|
| 13 |
+
datasets:
|
| 14 |
+
- custom
|
| 15 |
+
pipeline_tag: text-generation
|
| 16 |
+
---
|
| 17 |
+
# TinyLLM: Character-Level Math Solver
|
| 18 |
+
|
| 19 |
+
## Model Description
|
| 20 |
+
|
| 21 |
+
**TinyLLM** is a highly compact, character-level **Causal Language Model** (based on the standard Transformer decoder architecture) trained specifically to solve single-digit math problems.
|
| 22 |
+
|
| 23 |
+
This model serves as a minimalist, educational example of how a standard LLM architecture can be trained from scratch on a very small, custom dataset.
|
| 24 |
+
|
| 25 |
+
### Key Features
|
| 26 |
+
* **Architecture:** Causal Transformer Decoder.
|
| 27 |
+
* **Task:** Character-level text generation (autoregressive).
|
| 28 |
+
* **Input/Output:** Solves problems formatted as `N op N` and generates the answer, e.g., `4 + 5 = 9<EOS>`.
|
| 29 |
+
* **Custom Code Required:** This is a custom PyTorch model and requires custom code (`model.py`, `tokenizer.py`) to be loaded by users.
|
| 30 |
+
|
| 31 |
+
---
|
| 32 |
+
## How to Use (Inference)
|
| 33 |
+
|
| 34 |
+
To load and run this custom model, users must download the entire repository structure and use the provided custom code, specifically the `TinyLLM` class defined in **`model.py`** and the `CharacterTokenizer` in **`tokenizer.py`**.
|
| 35 |
+
|
| 36 |
+
### 1. Installation
|
| 37 |
+
|
| 38 |
+
First, ensure you have the required libraries installed:
|
| 39 |
+
```bash
|
| 40 |
+
pip install torch huggingface-hub
|
| 41 |
+
from huggingface_hub import snapshot_download
|
| 42 |
+
import torch
|
| 43 |
+
import os
|
| 44 |
+
import sys
|
| 45 |
+
|
| 46 |
+
# 1. Configuration: REPLACE with your repository ID
|
| 47 |
+
MODEL_ID = "anujbhatt4ai/tiny-math-llm"
|
| 48 |
+
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 49 |
+
|
| 50 |
+
# 2. Download all files (code and weights)
|
| 51 |
+
local_path = snapshot_download(repo_id=MODEL_ID)
|
| 52 |
+
|
| 53 |
+
# 3. Import Custom Classes
|
| 54 |
+
# The downloaded path must be added to sys.path to allow custom imports
|
| 55 |
+
sys.path.append(local_path)
|
| 56 |
+
from model import TinyLLM
|
| 57 |
+
from tokenizer import CharacterTokenizer, generate_v1_data
|
| 58 |
+
|
| 59 |
+
# 4. Setup and Load Model
|
| 60 |
+
def load_tiny_llm():
|
| 61 |
+
# In this minimal case, we hardcode the known config values
|
| 62 |
+
vocab_size = 22
|
| 63 |
+
block_size = 14
|
| 64 |
+
|
| 65 |
+
# Initialize the model with the exact trained parameters
|
| 66 |
+
model = TinyLLM(
|
| 67 |
+
vocab_size=vocab_size,
|
| 68 |
+
block_size=block_size,
|
| 69 |
+
n_embed=64, n_head=4, n_layer=4, dropout=0.1
|
| 70 |
+
).to(DEVICE)
|
| 71 |
+
|
| 72 |
+
# Load the trained weights
|
| 73 |
+
weights_path = os.path.join(local_path, "pytorch_model.bin")
|
| 74 |
+
model.load_state_dict(torch.load(weights_path, map_location=DEVICE))
|
| 75 |
+
model.eval()
|
| 76 |
+
|
| 77 |
+
# Initialize the tokenizer
|
| 78 |
+
raw_data = generate_v1_data()
|
| 79 |
+
tokenizer = CharacterTokenizer(raw_data)
|
| 80 |
+
|
| 81 |
+
return model, tokenizer
|
| 82 |
+
|
| 83 |
+
# Use the loaded model and tokenizer in your own generation logic
|
| 84 |
+
model, tokenizer = load_tiny_llm()
|
| 85 |
+
print("Model loaded and ready for math inference!")
|
| 86 |
+
|
| 87 |
+
**Block 4: Training Details and Repository Files**
|
| 88 |
+
|
| 89 |
+
`markdown
|
| 90 |
+
## Training Details
|
| 91 |
+
|
| 92 |
+
### Architecture Configuration
|
| 93 |
+
|
| 94 |
+
The `TinyLLM` is configured with the following parameters, derived from the `config.json` and `model.py` files:
|
| 95 |
+
|
| 96 |
+
| Parameter | Value | Description |
|
| 97 |
+
| :--- | :--- | :--- |
|
| 98 |
+
| **`vocab_size`** | `22` | The size of the character vocabulary. |
|
| 99 |
+
| **`block_size`** | `14` | The maximum sequence length (context window). |
|
| 100 |
+
| **`n_embed`** | `64` | Embedding dimension. |
|
| 101 |
+
| **`n_head`** | `4` | Number of attention heads. |
|
| 102 |
+
| **`n_layer`** | `4` | Number of Transformer decoder blocks. |
|
| 103 |
+
| **`dropout`** | `0.1` | Dropout rate. |
|
| 104 |
+
|
| 105 |
+
### Training Hyperparameters (from `train.py`)
|
| 106 |
+
|
| 107 |
+
| Parameter | Value |
|
| 108 |
+
| :--- | :--- |
|
| 109 |
+
| **`BATCH_SIZE`** | `32` |
|
| 110 |
+
| **`LEARNING_RATE`** | `1e-3` (AdamW) |
|
| 111 |
+
| **`EPOCHS`** | `100` |
|
| 112 |
+
| **`DEVICE`** | `cuda` if available, else `cpu` |
|
| 113 |
+
|
| 114 |
+
### Dataset
|
| 115 |
+
|
| 116 |
+
The model was trained on an **exhaustive set of all single-digit math problems** (addition, subtraction, multiplication, and non-remainder division) where the result is also a single digit (0-9). The **`dataset.py`** file contains the logic for the essential sequence shift used for language modeling training.
|
| 117 |
+
|
| 118 |
+
---
|
| 119 |
+
|
| 120 |
+
## Repository Files
|
| 121 |
+
|
| 122 |
+
This flat repository contains all the source code needed for complete reproducibility.
|
| 123 |
+
|
| 124 |
+
| File Name | Description |
|
| 125 |
+
| :--- | :--- |
|
| 126 |
+
| **`pytorch_model.bin`** | The trained model weights. |
|
| 127 |
+
| **`config.json`** | Model configuration/hyperparameters. |
|
| 128 |
+
| **`model.py`** | **Core Logic:** Custom `TinyLLM` architecture definition. |
|
| 129 |
+
| **`tokenizer.py`** | **Core Logic:** Custom `CharacterTokenizer` and data generator. |
|
| 130 |
+
| **`dataset.py`** | Defines the `MathDataset` class and sequence shift logic. |
|
| 131 |
+
| **`train.py`** | The complete training script and final hyperparameters. |
|
| 132 |
+
| **`custom_run.py`** (or `run.py`) | Example script demonstrating how to use the model for generation. |
|
| 133 |
+
| **`README.md`** | This model card and documentation. |
|
config.json
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"n_embed": 64,
|
| 3 |
+
"n_head": 4,
|
| 4 |
+
"n_layer": 4,
|
| 5 |
+
"dropout": 0.1,
|
| 6 |
+
"vocab_size": 22,
|
| 7 |
+
"block_size": 14,
|
| 8 |
+
"architectures": ["TinyLLM"],
|
| 9 |
+
"model_type": "tiny-causal-llm",
|
| 10 |
+
"_from_model_config": true
|
| 11 |
+
}
|
custom_run.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
# --- Ensure src folder is in the path for imports ---
|
| 7 |
+
# This helps the script find model.py, tokenizer.py, etc.
|
| 8 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'src')))
|
| 9 |
+
|
| 10 |
+
# --- Import all project components ---
|
| 11 |
+
from src.tokenizer import generate_v1_data, CharacterTokenizer
|
| 12 |
+
from src.model import TinyLLM, n_embed, n_head, n_layer, dropout # Also import hyperparams
|
| 13 |
+
|
| 14 |
+
# --- Configuration (CHECK THIS PATH!) ---
|
| 15 |
+
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 16 |
+
# Use the file name confirmed in your last successful training run
|
| 17 |
+
WEIGHTS_PATH = 'data/tinyllm_v1_weights1.pt'
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@torch.no_grad()
|
| 21 |
+
def generate(model, idx, max_new_tokens):
|
| 22 |
+
"""
|
| 23 |
+
Takes a sequence of indices (idx) and generates max_new_tokens new indices
|
| 24 |
+
using the model autoregressively.
|
| 25 |
+
"""
|
| 26 |
+
model.eval() # Set model to evaluation mode
|
| 27 |
+
|
| 28 |
+
# idx is (B, T) array of indices in the current context
|
| 29 |
+
for _ in range(max_new_tokens):
|
| 30 |
+
# Crop context to the model's block size (block_size will be set below)
|
| 31 |
+
block_size = model.block_size
|
| 32 |
+
idx_cond = idx[:, -block_size:]
|
| 33 |
+
|
| 34 |
+
# Get predictions
|
| 35 |
+
logits, _ = model(idx_cond)
|
| 36 |
+
|
| 37 |
+
# Focus only on the last time step (the next token)
|
| 38 |
+
logits = logits[:, -1, :]
|
| 39 |
+
|
| 40 |
+
# Apply softmax to get probabilities
|
| 41 |
+
probs = F.softmax(logits, dim=-1)
|
| 42 |
+
|
| 43 |
+
# Sample from the distribution
|
| 44 |
+
idx_next = torch.multinomial(probs, num_samples=1)
|
| 45 |
+
|
| 46 |
+
# Append sampled index to the running sequence
|
| 47 |
+
idx = torch.cat((idx, idx_next), dim=1)
|
| 48 |
+
|
| 49 |
+
return idx
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def setup_inference():
|
| 53 |
+
"""Sets up the model, tokenizer, and loads weights for inference."""
|
| 54 |
+
try:
|
| 55 |
+
# 1. Setup Data Pipeline to determine sequence lengths
|
| 56 |
+
raw_data = generate_v1_data()
|
| 57 |
+
tokenizer = CharacterTokenizer(raw_data)
|
| 58 |
+
max_len = max(len(s) for s in raw_data)
|
| 59 |
+
|
| 60 |
+
# FIX: Ensure block_size matches the model's training size (14)
|
| 61 |
+
# block_size is the maximum sequence length (T) the model can handle
|
| 62 |
+
block_size = max_len # Use max_len directly to get the 14 size for the V1 dataset
|
| 63 |
+
|
| 64 |
+
# 2. Initialize Model Architecture
|
| 65 |
+
model = TinyLLM(
|
| 66 |
+
vocab_size=tokenizer.vocab_size,
|
| 67 |
+
n_embed=n_embed,
|
| 68 |
+
n_head=n_head,
|
| 69 |
+
n_layer=n_layer,
|
| 70 |
+
block_size=block_size,
|
| 71 |
+
dropout=dropout
|
| 72 |
+
).to(DEVICE)
|
| 73 |
+
|
| 74 |
+
# 3. Load Trained Weights
|
| 75 |
+
model.load_state_dict(torch.load(WEIGHTS_PATH, map_location=DEVICE))
|
| 76 |
+
print(f"\nSuccessfully loaded model weights from {WEIGHTS_PATH}")
|
| 77 |
+
|
| 78 |
+
return model, tokenizer, block_size
|
| 79 |
+
|
| 80 |
+
except FileNotFoundError:
|
| 81 |
+
print(f"Error: Weights file not found at {WEIGHTS_PATH}. Please run train.py first.")
|
| 82 |
+
return None, None, None
|
| 83 |
+
except RuntimeError as e:
|
| 84 |
+
print(f"Runtime Error during loading: {e}")
|
| 85 |
+
print("Please ensure your src/model.py hyperparameters match the saved weights.")
|
| 86 |
+
return None, None, None
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def solve_problem(model, tokenizer, question_str, block_size):
|
| 90 |
+
"""Encodes a question, generates the answer, and prints the result."""
|
| 91 |
+
|
| 92 |
+
# 1. Encode the question string (e.g., "5 + 3")
|
| 93 |
+
context_tokens = tokenizer.encode(question_str)
|
| 94 |
+
# Add an extra space before the = for clean formatting
|
| 95 |
+
context_tokens.append(tokenizer.encode(' ')[0])
|
| 96 |
+
|
| 97 |
+
# Convert list of token IDs to a PyTorch tensor (1, T)
|
| 98 |
+
idx = torch.tensor([context_tokens], dtype=torch.long, device=DEVICE)
|
| 99 |
+
|
| 100 |
+
# 2. Generate the rest of the sequence (the "= ANS" part)
|
| 101 |
+
# The max_len is the length of the expected output: = 9 (4 characters)
|
| 102 |
+
max_new_tokens = block_size - idx.shape[1]
|
| 103 |
+
|
| 104 |
+
if max_new_tokens <= 0:
|
| 105 |
+
print("Error: Input sequence is too long.")
|
| 106 |
+
return
|
| 107 |
+
|
| 108 |
+
# Generate tokens
|
| 109 |
+
generated_idx = generate(model, idx, max_new_tokens=max_new_tokens)
|
| 110 |
+
|
| 111 |
+
# 3. Decode the result and print
|
| 112 |
+
generated_sequence = tokenizer.decode(generated_idx[0].tolist())
|
| 113 |
+
|
| 114 |
+
print(f"Question: '{question_str}'")
|
| 115 |
+
print(f"Model Output: '{generated_sequence}'")
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# --- Main Interactive User Loop ---
|
| 119 |
+
if __name__ == '__main__':
|
| 120 |
+
model, tokenizer, block_size = setup_inference()
|
| 121 |
+
|
| 122 |
+
if model is not None:
|
| 123 |
+
print("\n--- TinyLLM Math Chatbot Initialized ---")
|
| 124 |
+
print("Enter a single-digit math problem (e.g., 4 + 5, 8 / 2).")
|
| 125 |
+
print("Type 'exit' to quit.")
|
| 126 |
+
|
| 127 |
+
while True:
|
| 128 |
+
# 1. Get user input
|
| 129 |
+
question_str = input("Input: ")
|
| 130 |
+
|
| 131 |
+
if question_str.lower() == 'exit':
|
| 132 |
+
break
|
| 133 |
+
|
| 134 |
+
# 2. Basic Input Validation
|
| 135 |
+
question_str = question_str.strip()
|
| 136 |
+
parts = question_str.split()
|
| 137 |
+
|
| 138 |
+
# Simple check for format N op N and single digits
|
| 139 |
+
is_valid = (
|
| 140 |
+
len(parts) == 3 and
|
| 141 |
+
parts[0].isdigit() and len(parts[0]) == 1 and
|
| 142 |
+
parts[2].isdigit() and len(parts[2]) == 1 and
|
| 143 |
+
parts[1] in ['+', '-', '*', '/']
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
if not is_valid:
|
| 147 |
+
print("Error: Please enter a problem in the format 'N op N' with single-digit operands (e.g., 2 + 3).\n")
|
| 148 |
+
continue
|
| 149 |
+
|
| 150 |
+
# 3. Solve the problem using the trained model
|
| 151 |
+
solve_problem(model, tokenizer, question_str, block_size)
|
| 152 |
+
print("-" * 30)
|
| 153 |
+
|
| 154 |
+
print("\n--- Chatbot Shutting Down ---")
|
pytorch_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:81314013353fa80296b075a6a7a45e0db34ddc5584bbcac4728160cd5c60e3ad
|
| 3 |
+
size 832685
|
src/__init__.py
ADDED
|
File without changes
|
src/dataset.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.utils.data import Dataset
|
| 3 |
+
from typing import List, Tuple
|
| 4 |
+
|
| 5 |
+
class MathDataset(Dataset):
|
| 6 |
+
"""
|
| 7 |
+
A custom PyTorch Dataset to handle the encoded math problem sequences.
|
| 8 |
+
It performs the crucial language model shift (X is the input, Y is X shifted by one)
|
| 9 |
+
and handles padding.
|
| 10 |
+
"""
|
| 11 |
+
def __init__(self, data: List[str], tokenizer, max_len: int):
|
| 12 |
+
self.data = data
|
| 13 |
+
self.tokenizer = tokenizer
|
| 14 |
+
self.max_len = max_len
|
| 15 |
+
self.pad_token_id = tokenizer.pad_token_id # Use the ID stored in the tokenizer
|
| 16 |
+
|
| 17 |
+
def __len__(self):
|
| 18 |
+
# Returns the total number of problems in the dataset
|
| 19 |
+
return len(self.data)
|
| 20 |
+
|
| 21 |
+
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 22 |
+
# 1. Get the raw encoded sequence (list of IDs)
|
| 23 |
+
raw_text = self.data[idx]
|
| 24 |
+
sequence_ids = self.tokenizer.encode(raw_text)
|
| 25 |
+
|
| 26 |
+
# 2. Sequence Shift: The core of Language Modeling
|
| 27 |
+
# X (Input): The Transformer sees this. (e.g., [7, +, 2, =])
|
| 28 |
+
# Y (Target): The Transformer must predict this at the next step. (e.g., [+, 2, =, 9])
|
| 29 |
+
|
| 30 |
+
# X: All tokens except the final <EOS> token (or final answer token)
|
| 31 |
+
# We cut off the last token because there is no token for the model to predict AFTER it.
|
| 32 |
+
x = sequence_ids[:-1]
|
| 33 |
+
|
| 34 |
+
# Y: All tokens except the first one. This is the sequence X is trying to predict.
|
| 35 |
+
# This is the "correct next token" for every position in X.
|
| 36 |
+
y = sequence_ids[1:]
|
| 37 |
+
|
| 38 |
+
# 3. Padding
|
| 39 |
+
# All sequences in a batch must have the same length (T or block_size).
|
| 40 |
+
|
| 41 |
+
padding_length = self.max_len - len(x)
|
| 42 |
+
|
| 43 |
+
# Pad the sequences X and Y with the <PAD> token ID
|
| 44 |
+
x_padded = x + [self.pad_token_id] * padding_length
|
| 45 |
+
y_padded = y + [self.pad_token_id] * padding_length
|
| 46 |
+
|
| 47 |
+
# 4. Convert to PyTorch Tensors (dtype=torch.long is standard for integer IDs)
|
| 48 |
+
return torch.tensor(x_padded, dtype=torch.long), torch.tensor(y_padded, dtype=torch.long)
|
src/model.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import PyTorchModelHubMixin
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# ... (rest of your model code)
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
import math
|
| 9 |
+
|
| 10 |
+
# --- Hyperparameters (You can adjust these later) ---
|
| 11 |
+
# For a "Tiny" LLM, we keep the size very small.
|
| 12 |
+
n_embed = 64 # C: Embedding dimension (size of the vector representing a character)
|
| 13 |
+
n_head = 4 # H: Number of attention heads
|
| 14 |
+
n_layer = 4 # Number of repeating Transformer blocks
|
| 15 |
+
dropout = 0.1 # Dropout rate
|
| 16 |
+
|
| 17 |
+
# --- 1. Causal Self-Attention (The "Attention is All You Need" Component) ---
|
| 18 |
+
|
| 19 |
+
class CausalSelfAttention(nn.Module):
|
| 20 |
+
"""A multi-head masked self-attention module."""
|
| 21 |
+
|
| 22 |
+
def __init__(self, n_embed, n_head, block_size, dropout):
|
| 23 |
+
super().__init__()
|
| 24 |
+
|
| 25 |
+
self.n_embed = n_embed
|
| 26 |
+
self.n_head = n_head
|
| 27 |
+
self.head_size = n_embed // n_head
|
| 28 |
+
|
| 29 |
+
# Combined projection for Q, K, and V (more efficient)
|
| 30 |
+
self.c_attn = nn.Linear(n_embed, 3 * n_embed, bias=False)
|
| 31 |
+
# Output projection
|
| 32 |
+
self.c_proj = nn.Linear(n_embed, n_embed, bias=False)
|
| 33 |
+
self.attn_dropout = nn.Dropout(dropout)
|
| 34 |
+
self.resid_dropout = nn.Dropout(dropout)
|
| 35 |
+
|
| 36 |
+
# Causal Mask (tril = lower triangular matrix)
|
| 37 |
+
# This mask prevents a token from attending to future tokens (autoregressive)
|
| 38 |
+
self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size))
|
| 39 |
+
.view(1, 1, block_size, block_size))
|
| 40 |
+
|
| 41 |
+
def forward(self, x):
|
| 42 |
+
B, T, C = x.shape # Batch size, Sequence length (Time), Embedding dimension (Channel)
|
| 43 |
+
|
| 44 |
+
# 1. Compute Q, K, V and split (efficiently)
|
| 45 |
+
# q, k, v are (B, T, C)
|
| 46 |
+
qkv = self.c_attn(x)
|
| 47 |
+
q, k, v = qkv.split(self.n_embed, dim=2)
|
| 48 |
+
|
| 49 |
+
# 2. Reshape for Multi-Head Attention (B, T, C) -> (B, H, T, Head_size)
|
| 50 |
+
# We prepare the tensors so that each head processes a smaller chunk of the dimension C
|
| 51 |
+
k = k.view(B, T, self.n_head, self.head_size).transpose(1, 2)
|
| 52 |
+
q = q.view(B, T, self.n_head, self.head_size).transpose(1, 2)
|
| 53 |
+
v = v.view(B, T, self.n_head, self.head_size).transpose(1, 2)
|
| 54 |
+
|
| 55 |
+
# 3. Scaled Dot-Product Attention: (B, H, T, T)
|
| 56 |
+
# wei = (q @ k.transpose(-2, -1)) / sqrt(Head_size)
|
| 57 |
+
wei = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(self.head_size))
|
| 58 |
+
|
| 59 |
+
# 4. Apply Causal Mask
|
| 60 |
+
# Set attention scores to -inf for future tokens (where tril == 0)
|
| 61 |
+
wei = wei.masked_fill(self.tril[:,:,:T,:T] == 0, float('-inf'))
|
| 62 |
+
|
| 63 |
+
# 5. Softmax and Dropout
|
| 64 |
+
wei = F.softmax(wei, dim=-1)
|
| 65 |
+
wei = self.attn_dropout(wei)
|
| 66 |
+
|
| 67 |
+
# 6. Compute Weighted Sum of Values: (B, H, T, Head_size)
|
| 68 |
+
out = wei @ v
|
| 69 |
+
|
| 70 |
+
# 7. Re-assemble heads: (B, H, T, Head_size) -> (B, T, C)
|
| 71 |
+
out = out.transpose(1, 2).contiguous().view(B, T, C)
|
| 72 |
+
|
| 73 |
+
# 8. Final Linear Projection
|
| 74 |
+
out = self.resid_dropout(self.c_proj(out))
|
| 75 |
+
return out
|
| 76 |
+
|
| 77 |
+
# --- 2. Feed Forward Network (FFN) ---
|
| 78 |
+
|
| 79 |
+
class FeedForward(nn.Module):
|
| 80 |
+
"""A two-layer MLP for processing attention output."""
|
| 81 |
+
def __init__(self, n_embed, dropout):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.net = nn.Sequential(
|
| 84 |
+
# Standard ratio is 4x the embedding size
|
| 85 |
+
nn.Linear(n_embed, 4 * n_embed),
|
| 86 |
+
nn.GELU(), # Modern activation function (smoother than ReLU)
|
| 87 |
+
nn.Linear(4 * n_embed, n_embed),
|
| 88 |
+
nn.Dropout(dropout),
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def forward(self, x):
|
| 92 |
+
return self.net(x)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# --- 3. Transformer Block (The Repeating Unit) ---
|
| 96 |
+
|
| 97 |
+
class TransformerBlock(nn.Module):
|
| 98 |
+
"""A standard Transformer decoder block with Attention and FFN."""
|
| 99 |
+
|
| 100 |
+
def __init__(self, n_embed, n_head, block_size, dropout):
|
| 101 |
+
super().__init__()
|
| 102 |
+
# LayerNorm applied BEFORE the sub-layer (Pre-Norm style)
|
| 103 |
+
self.ln_1 = nn.LayerNorm(n_embed)
|
| 104 |
+
self.attn = CausalSelfAttention(n_embed, n_head, block_size, dropout)
|
| 105 |
+
self.ln_2 = nn.LayerNorm(n_embed)
|
| 106 |
+
self.ffn = FeedForward(n_embed, dropout)
|
| 107 |
+
|
| 108 |
+
def forward(self, x):
|
| 109 |
+
# 1. Attention with Residual Connection and LayerNorm
|
| 110 |
+
x = x + self.attn(self.ln_1(x))
|
| 111 |
+
# 2. FFN with Residual Connection and LayerNorm
|
| 112 |
+
x = x + self.ffn(self.ln_2(x))
|
| 113 |
+
return x
|
| 114 |
+
|
| 115 |
+
# --- 4. The Final TinyLLM Model ---
|
| 116 |
+
|
| 117 |
+
class TinyLLM(nn.Module, PyTorchModelHubMixin):
|
| 118 |
+
"""The complete Decoder-Only Transformer model."""
|
| 119 |
+
|
| 120 |
+
def __init__(self, vocab_size, n_embed, n_head, n_layer, block_size, dropout):
|
| 121 |
+
super().__init__()
|
| 122 |
+
|
| 123 |
+
self.block_size = block_size
|
| 124 |
+
|
| 125 |
+
self.token_embedding_table = nn.Embedding(vocab_size, n_embed)
|
| 126 |
+
# Positional Encoding: A fixed table for position information
|
| 127 |
+
self.position_embedding_table = nn.Embedding(block_size, n_embed)
|
| 128 |
+
|
| 129 |
+
# Stack of Transformer Blocks
|
| 130 |
+
self.blocks = nn.Sequential(*[
|
| 131 |
+
TransformerBlock(n_embed, n_head, block_size, dropout)
|
| 132 |
+
for _ in range(n_layer)
|
| 133 |
+
])
|
| 134 |
+
|
| 135 |
+
self.ln_f = nn.LayerNorm(n_embed) # Final LayerNorm
|
| 136 |
+
# Linear layer to map the embedding vector back to the vocabulary space
|
| 137 |
+
self.lm_head = nn.Linear(n_embed, vocab_size)
|
| 138 |
+
|
| 139 |
+
def forward(self, idx, targets=None):
|
| 140 |
+
# idx is the input tensor X of shape (B, T)
|
| 141 |
+
B, T = idx.shape
|
| 142 |
+
|
| 143 |
+
# 1. Token and Positional Embeddings
|
| 144 |
+
# Token embedding: (B, T, C)
|
| 145 |
+
tok_emb = self.token_embedding_table(idx)
|
| 146 |
+
# Position embedding: (T, C) -> expanded to (B, T, C)
|
| 147 |
+
pos = torch.arange(T, device=idx.device)
|
| 148 |
+
pos_emb = self.position_embedding_table(pos)
|
| 149 |
+
|
| 150 |
+
# 2. Combine (Add) Embeddings
|
| 151 |
+
x = tok_emb + pos_emb # (B, T, C)
|
| 152 |
+
|
| 153 |
+
# 3. Pass through Transformer Blocks
|
| 154 |
+
x = self.blocks(x) # (B, T, C)
|
| 155 |
+
|
| 156 |
+
# 4. Final LayerNorm and Linear Head
|
| 157 |
+
x = self.ln_f(x)
|
| 158 |
+
logits = self.lm_head(x) # (B, T, vocab_size)
|
| 159 |
+
|
| 160 |
+
loss = None
|
| 161 |
+
if targets is not None:
|
| 162 |
+
# Reshape for CrossEntropyLoss: (B*T, vocab_size) and (B*T)
|
| 163 |
+
B, T, C = logits.shape
|
| 164 |
+
logits = logits.view(B*T, C)
|
| 165 |
+
targets = targets.view(B*T)
|
| 166 |
+
|
| 167 |
+
# Compute the negative log-likelihood loss
|
| 168 |
+
loss = F.cross_entropy(logits, targets)
|
| 169 |
+
|
| 170 |
+
return logits, loss
|
src/tokenizer.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
def generate_v1_data():
|
| 4 |
+
"""Generates all exhaustive single-digit math problems."""
|
| 5 |
+
data = []
|
| 6 |
+
|
| 7 |
+
# Operators and their functions
|
| 8 |
+
ops = {'+': lambda a, b: a + b,
|
| 9 |
+
'-': lambda a, b: a - b,
|
| 10 |
+
'*': lambda a, b: a * b,
|
| 11 |
+
'/': lambda a, b: a / b}
|
| 12 |
+
|
| 13 |
+
# Iterate through all single-digit pairs (0-9)
|
| 14 |
+
for a in range(10):
|
| 15 |
+
for b in range(10):
|
| 16 |
+
for op_char, op_func in ops.items():
|
| 17 |
+
|
| 18 |
+
# Check for constraints: Single-Digit Answer (0-9) & Validity
|
| 19 |
+
|
| 20 |
+
if op_char == '+':
|
| 21 |
+
result = op_func(a, b)
|
| 22 |
+
# Constraint: Sum must be a single digit (<= 9)
|
| 23 |
+
if result <= 9:
|
| 24 |
+
problem = f"{a} + {b} = {result}"
|
| 25 |
+
data.append(problem)
|
| 26 |
+
|
| 27 |
+
elif op_char == '-':
|
| 28 |
+
result = op_func(a, b)
|
| 29 |
+
# Constraint: Result must be non-negative (>= 0) and <= 9
|
| 30 |
+
if 0 <= result <= 9:
|
| 31 |
+
problem = f"{a} - {b} = {result}"
|
| 32 |
+
data.append(problem)
|
| 33 |
+
|
| 34 |
+
elif op_char == '*':
|
| 35 |
+
result = op_func(a, b)
|
| 36 |
+
# Constraint: Product must be a single digit (<= 9)
|
| 37 |
+
if result <= 9:
|
| 38 |
+
problem = f"{a} * {b} = {result}"
|
| 39 |
+
data.append(problem)
|
| 40 |
+
|
| 41 |
+
elif op_char == '/':
|
| 42 |
+
# Cannot divide by zero
|
| 43 |
+
if b == 0:
|
| 44 |
+
continue
|
| 45 |
+
result = op_func(a, b)
|
| 46 |
+
# Constraint: Result must be a whole number (no remainder) AND a single digit (<= 9)
|
| 47 |
+
if a % b == 0 and result <= 9:
|
| 48 |
+
# Use int() to remove potential float from division result
|
| 49 |
+
problem = f"{a} / {b} = {int(result)}"
|
| 50 |
+
data.append(problem)
|
| 51 |
+
|
| 52 |
+
# IMPORTANT: Shuffle and add <EOS> marker
|
| 53 |
+
random.shuffle(data)
|
| 54 |
+
final_data = [d + "<EOS>" for d in data]
|
| 55 |
+
|
| 56 |
+
return final_data
|
| 57 |
+
|
| 58 |
+
class CharacterTokenizer:
|
| 59 |
+
"""A simple character-level tokenizer for the math problems."""
|
| 60 |
+
|
| 61 |
+
def __init__(self, data):
|
| 62 |
+
# 1. Build the unique vocabulary from the entire dataset
|
| 63 |
+
# We need to make sure the data is generated first!
|
| 64 |
+
chars = sorted(list(set("".join(data))))
|
| 65 |
+
|
| 66 |
+
# Add a Padding token for PyTorch batching
|
| 67 |
+
if '<PAD>' not in chars:
|
| 68 |
+
chars.append('<PAD>')
|
| 69 |
+
|
| 70 |
+
self.stoi = {ch: i for i, ch in enumerate(chars)}
|
| 71 |
+
self.itos = {i: ch for i, ch in enumerate(chars)}
|
| 72 |
+
self.vocab_size = len(chars)
|
| 73 |
+
self.pad_token_id = self.stoi['<PAD>']
|
| 74 |
+
|
| 75 |
+
def encode(self, s):
|
| 76 |
+
"""Encodes a string into a list of integers."""
|
| 77 |
+
return [self.stoi[c] for c in s]
|
| 78 |
+
|
| 79 |
+
def decode(self, l):
|
| 80 |
+
"""Decodes a list of integers back into a string."""
|
| 81 |
+
return "".join([self.itos[i] for i in l])
|
train.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from torch.utils.data import DataLoader
|
| 4 |
+
|
| 5 |
+
# --- Import all project components ---
|
| 6 |
+
from src.tokenizer import generate_v1_data, CharacterTokenizer
|
| 7 |
+
from src.dataset import MathDataset
|
| 8 |
+
from src.model import TinyLLM, n_embed, n_head, n_layer, dropout # Also import hyperparams
|
| 9 |
+
|
| 10 |
+
# --- Hyperparameters for Training ---
|
| 11 |
+
BATCH_SIZE = 32
|
| 12 |
+
LEARNING_RATE = 1e-3 # Standard starting learning rate for Adam
|
| 13 |
+
EPOCHS = 100 # Number of full passes over the dataset (Adjust as needed)
|
| 14 |
+
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def setup_data_pipeline(batch_size=BATCH_SIZE):
|
| 18 |
+
"""Sets up the data generation, tokenization, and PyTorch DataLoaders."""
|
| 19 |
+
|
| 20 |
+
# 1. Generate data and initialize tokenizer
|
| 21 |
+
raw_data = generate_v1_data()
|
| 22 |
+
tokenizer = CharacterTokenizer(raw_data)
|
| 23 |
+
max_len = max(len(s) for s in raw_data)
|
| 24 |
+
|
| 25 |
+
# 2. Create Dataset and DataLoader
|
| 26 |
+
train_dataset = MathDataset(raw_data, tokenizer, max_len)
|
| 27 |
+
train_dataloader = DataLoader(
|
| 28 |
+
train_dataset,
|
| 29 |
+
batch_size=batch_size,
|
| 30 |
+
shuffle=True,
|
| 31 |
+
drop_last=True
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
print(f"Total problems: {len(raw_data)}")
|
| 35 |
+
print(f"Vocabulary Size: {tokenizer.vocab_size}")
|
| 36 |
+
print(f"Max Sequence Length (T): {max_len}")
|
| 37 |
+
print(f"Device: {DEVICE}")
|
| 38 |
+
|
| 39 |
+
return train_dataloader, tokenizer.vocab_size, max_len
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def train(dataloader, vocab_size, block_size):
|
| 43 |
+
"""Initializes the model and runs the full training loop."""
|
| 44 |
+
|
| 45 |
+
# 1. Initialize Model, Optimizer, and move to Device
|
| 46 |
+
model = TinyLLM(
|
| 47 |
+
vocab_size=vocab_size,
|
| 48 |
+
n_embed=n_embed,
|
| 49 |
+
n_head=n_head,
|
| 50 |
+
n_layer=n_layer,
|
| 51 |
+
block_size=block_size,
|
| 52 |
+
dropout=dropout
|
| 53 |
+
).to(DEVICE)
|
| 54 |
+
|
| 55 |
+
optimizer = torch.optim.AdamW(model.parameters(), lr=LEARNING_RATE)
|
| 56 |
+
|
| 57 |
+
print(f"TinyLLM Parameters: {sum(p.numel() for p in model.parameters())/1e3:.1f}K")
|
| 58 |
+
print(f"Starting training for {EPOCHS} epochs...")
|
| 59 |
+
|
| 60 |
+
# 2. Training Loop
|
| 61 |
+
for epoch in range(EPOCHS):
|
| 62 |
+
model.train() # Set model to training mode
|
| 63 |
+
total_loss = 0
|
| 64 |
+
|
| 65 |
+
for batch_idx, (X, Y) in enumerate(dataloader):
|
| 66 |
+
# Move data to the selected device (CPU or CUDA)
|
| 67 |
+
X, Y = X.to(DEVICE), Y.to(DEVICE)
|
| 68 |
+
|
| 69 |
+
# Forward pass: calculate logits and loss
|
| 70 |
+
logits, loss = model(X, targets=Y)
|
| 71 |
+
total_loss += loss.item()
|
| 72 |
+
|
| 73 |
+
# Backward pass: calculate gradients and update weights
|
| 74 |
+
optimizer.zero_grad()
|
| 75 |
+
loss.backward()
|
| 76 |
+
optimizer.step()
|
| 77 |
+
|
| 78 |
+
# Log progress every 100 batches (adjust frequency if needed)
|
| 79 |
+
if batch_idx % 100 == 0 and batch_idx > 0:
|
| 80 |
+
print(f" Epoch {epoch}/{EPOCHS} | Batch {batch_idx}/{len(dataloader)} | Loss: {loss.item():.4f}")
|
| 81 |
+
|
| 82 |
+
avg_loss = total_loss / len(dataloader)
|
| 83 |
+
print(f"--- Epoch {epoch+1} Complete --- Average Loss: {avg_loss:.4f}")
|
| 84 |
+
|
| 85 |
+
# If the loss is very low, the model has likely memorized the math.
|
| 86 |
+
if avg_loss < 0.01:
|
| 87 |
+
print("Loss is very low. Stopping training early.")
|
| 88 |
+
break
|
| 89 |
+
|
| 90 |
+
# 3. Save the trained model
|
| 91 |
+
torch.save(model.state_dict(), 'data/tinyllm_v1_weights1.pt')
|
| 92 |
+
print("\nTraining complete! Model weights saved to data/tinyllm_v1_weights1.pt")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
if __name__ == '__main__':
|
| 96 |
+
# 1. Setup the data
|
| 97 |
+
dataloader, vocab_size, max_len = setup_data_pipeline()
|
| 98 |
+
|
| 99 |
+
# 2. Start the training process
|
| 100 |
+
train(dataloader, vocab_size, max_len)
|