AI-RESEARCHER-2024 commited on
Commit
9bedf7f
·
verified ·
1 Parent(s): 8112719

Upload 6 files

Browse files
Files changed (6) hide show
  1. 04.png +0 -0
  2. 07.png +0 -0
  3. app.py +50 -0
  4. model.h5 +3 -0
  5. requirements.txt +2 -0
  6. tensorflow_datasets.ipynb +250 -0
04.png ADDED
07.png ADDED
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ import numpy as np
4
+
5
+ # Load the trained model
6
+ model = tf.keras.models.load_model('model.h5')
7
+ print("Model loaded successfully!")
8
+
9
+ def preprocess_image(image):
10
+ """Process the input image to match MNIST format"""
11
+ # Convert to grayscale
12
+ image = image.convert('L')
13
+ # Resize to 28x28
14
+ image = image.resize((28, 28))
15
+ # Convert to numpy array and normalize
16
+ image_array = np.array(image)
17
+ image_array = image_array / 255.0
18
+ # Reshape to match model input
19
+ image_array = np.expand_dims(image_array, axis=0)
20
+ return image_array
21
+
22
+ def predict_digit(image):
23
+ if image is None:
24
+ return None
25
+
26
+ # Preprocess the image
27
+ processed_image = preprocess_image(image)
28
+
29
+ # Make prediction
30
+ predictions = model.predict(processed_image)
31
+ pred_scores = tf.nn.softmax(predictions[0]).numpy()
32
+ pred_class = np.argmax(pred_scores)
33
+
34
+ # Create result string
35
+ result = f"Prediction: {pred_class}"
36
+
37
+ return result
38
+
39
+ # Create Gradio interface
40
+ demo = gr.Interface(
41
+ fn=predict_digit,
42
+ inputs=gr.Image(type="pil"),
43
+ outputs=gr.Textbox(label="Result"),
44
+ title="MNIST Digit Recognizer",
45
+ description="Upload a digit from 0-9 and the model will predict which digit it is.",
46
+ examples=None,
47
+ )
48
+
49
+ if __name__ == "__main__":
50
+ demo.launch()
model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aac72609e1d15790a883acae3830d5afbb79e073e74714f5b997d4b6f64e40e
3
+ size 452712
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # requirements.txt
2
+ tensorflow
tensorflow_datasets.ipynb ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "USSV_OlCFKOD"
7
+ },
8
+ "source": [
9
+ "# Training a neural network on MNIST with Keras\n",
10
+ "\n",
11
+ "This simple example demonstrates how to plug TensorFlow Datasets (TFDS) into a Keras model.\n"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "markdown",
16
+ "metadata": {
17
+ "id": "J8y9ZkLXmAZc"
18
+ },
19
+ "source": [
20
+ "Copyright 2020 The TensorFlow Datasets Authors, Licensed under the Apache License, Version 2.0"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "markdown",
25
+ "metadata": {
26
+ "id": "OGw9EgE0tC0C"
27
+ },
28
+ "source": [
29
+ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n",
30
+ " <td>\n",
31
+ " <a target=\"_blank\" href=\"https://www.tensorflow.org/datasets/keras_example\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n",
32
+ " </td>\n",
33
+ " <td>\n",
34
+ " <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/datasets/blob/master/docs/keras_example.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
35
+ " </td>\n",
36
+ " <td>\n",
37
+ " <a target=\"_blank\" href=\"https://github.com/tensorflow/datasets/blob/master/docs/keras_example.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n",
38
+ " </td>\n",
39
+ " <td>\n",
40
+ " <a href=\"https://storage.googleapis.com/tensorflow_docs/datasets/docs/keras_example.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n",
41
+ " </td>\n",
42
+ "</table>"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": null,
48
+ "metadata": {
49
+ "id": "TTBSvHcSLBzc"
50
+ },
51
+ "outputs": [],
52
+ "source": [
53
+ "import tensorflow as tf\n",
54
+ "import tensorflow_datasets as tfds"
55
+ ]
56
+ },
57
+ {
58
+ "cell_type": "markdown",
59
+ "metadata": {
60
+ "id": "VjI6VgOBf0v0"
61
+ },
62
+ "source": [
63
+ "## Step 1: Create your input pipeline\n",
64
+ "\n",
65
+ "Start by building an efficient input pipeline using advices from:\n",
66
+ "* The [Performance tips](https://www.tensorflow.org/datasets/performances) guide\n",
67
+ "* The [Better performance with the `tf.data` API](https://www.tensorflow.org/guide/data_performance#optimize_performance) guide\n"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "markdown",
72
+ "metadata": {
73
+ "id": "c3aH3vP_XLI8"
74
+ },
75
+ "source": [
76
+ "### Load a dataset\n",
77
+ "\n",
78
+ "Load the MNIST dataset with the following arguments:\n",
79
+ "\n",
80
+ "* `shuffle_files=True`: The MNIST data is only stored in a single file, but for larger datasets with multiple files on disk, it's good practice to shuffle them when training.\n",
81
+ "* `as_supervised=True`: Returns a tuple `(img, label)` instead of a dictionary `{'image': img, 'label': label}`."
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": null,
87
+ "metadata": {
88
+ "id": "ZUMhCXhFXdHQ"
89
+ },
90
+ "outputs": [],
91
+ "source": [
92
+ "(ds_train, ds_test), ds_info = tfds.load(\n",
93
+ " 'mnist',\n",
94
+ " split=['train', 'test'],\n",
95
+ " shuffle_files=True,\n",
96
+ " as_supervised=True,\n",
97
+ " with_info=True,\n",
98
+ ")"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "markdown",
103
+ "metadata": {
104
+ "id": "rgwCFAcWXQTx"
105
+ },
106
+ "source": [
107
+ "### Build a training pipeline\n",
108
+ "\n",
109
+ "Apply the following transformations:\n",
110
+ "\n",
111
+ "* `tf.data.Dataset.map`: TFDS provide images of type `tf.uint8`, while the model expects `tf.float32`. Therefore, you need to normalize images.\n",
112
+ "* `tf.data.Dataset.cache` As you fit the dataset in memory, cache it before shuffling for a better performance.<br/>\n",
113
+ "__Note:__ Random transformations should be applied after caching.\n",
114
+ "* `tf.data.Dataset.shuffle`: For true randomness, set the shuffle buffer to the full dataset size.<br/>\n",
115
+ "__Note:__ For large datasets that can't fit in memory, use `buffer_size=1000` if your system allows it.\n",
116
+ "* `tf.data.Dataset.batch`: Batch elements of the dataset after shuffling to get unique batches at each epoch.\n",
117
+ "* `tf.data.Dataset.prefetch`: It is good practice to end the pipeline by prefetching [for performance](https://www.tensorflow.org/guide/data_performance#prefetching)."
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "metadata": {
124
+ "id": "haykx2K9XgiI"
125
+ },
126
+ "outputs": [],
127
+ "source": [
128
+ "def normalize_img(image, label):\n",
129
+ " \"\"\"Normalizes images: `uint8` -> `float32`.\"\"\"\n",
130
+ " return tf.cast(image, tf.float32) / 255., label\n",
131
+ "\n",
132
+ "ds_train = ds_train.map(\n",
133
+ " normalize_img, num_parallel_calls=tf.data.AUTOTUNE)\n",
134
+ "ds_train = ds_train.cache()\n",
135
+ "ds_train = ds_train.shuffle(ds_info.splits['train'].num_examples)\n",
136
+ "ds_train = ds_train.batch(128)\n",
137
+ "ds_train = ds_train.prefetch(tf.data.AUTOTUNE)"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "markdown",
142
+ "metadata": {
143
+ "id": "RbsMy4X1XVFv"
144
+ },
145
+ "source": [
146
+ "### Build an evaluation pipeline\n",
147
+ "\n",
148
+ "Your testing pipeline is similar to the training pipeline with small differences:\n",
149
+ "\n",
150
+ " * You don't need to call `tf.data.Dataset.shuffle`.\n",
151
+ " * Caching is done after batching because batches can be the same between epochs."
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "metadata": {
158
+ "id": "A0KjuDf7XiqY"
159
+ },
160
+ "outputs": [],
161
+ "source": [
162
+ "ds_test = ds_test.map(\n",
163
+ " normalize_img, num_parallel_calls=tf.data.AUTOTUNE)\n",
164
+ "ds_test = ds_test.batch(128)\n",
165
+ "ds_test = ds_test.cache()\n",
166
+ "ds_test = ds_test.prefetch(tf.data.AUTOTUNE)"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "markdown",
171
+ "metadata": {
172
+ "id": "nTFoji3INMEM"
173
+ },
174
+ "source": [
175
+ "## Step 2: Create and train the model\n",
176
+ "\n",
177
+ "Plug the TFDS input pipeline into a simple Keras model, compile the model, and train it."
178
+ ]
179
+ },
180
+ {
181
+ "cell_type": "code",
182
+ "execution_count": null,
183
+ "metadata": {
184
+ "id": "XWqxdmS1NLKA"
185
+ },
186
+ "outputs": [],
187
+ "source": [
188
+ "model = tf.keras.models.Sequential([\n",
189
+ " tf.keras.layers.Flatten(input_shape=(28, 28)),\n",
190
+ " tf.keras.layers.Dense(128, activation='relu'),\n",
191
+ " tf.keras.layers.Dense(10)\n",
192
+ "])\n",
193
+ "model.compile(\n",
194
+ " optimizer=tf.keras.optimizers.Adam(0.001),\n",
195
+ " loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n",
196
+ " metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],\n",
197
+ ")\n",
198
+ "\n",
199
+ "model.fit(\n",
200
+ " ds_train,\n",
201
+ " epochs=6,\n",
202
+ " validation_data=ds_test,\n",
203
+ ")"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "markdown",
208
+ "source": [
209
+ "Save Model Weights"
210
+ ],
211
+ "metadata": {
212
+ "id": "lOLnm8sk-rDP"
213
+ }
214
+ },
215
+ {
216
+ "cell_type": "code",
217
+ "source": [
218
+ "# save model\n",
219
+ "model.save('model.h5')"
220
+ ],
221
+ "metadata": {
222
+ "id": "8nd9iSyG-s9p"
223
+ },
224
+ "execution_count": null,
225
+ "outputs": []
226
+ },
227
+ {
228
+ "cell_type": "code",
229
+ "source": [],
230
+ "metadata": {
231
+ "id": "DQuEn_g7-vlR"
232
+ },
233
+ "execution_count": null,
234
+ "outputs": []
235
+ }
236
+ ],
237
+ "metadata": {
238
+ "colab": {
239
+ "private_outputs": true,
240
+ "provenance": [],
241
+ "toc_visible": true
242
+ },
243
+ "kernelspec": {
244
+ "display_name": "Python 3",
245
+ "name": "python3"
246
+ }
247
+ },
248
+ "nbformat": 4,
249
+ "nbformat_minor": 0
250
+ }