import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.activations import linear, relu, sigmoid
%matplotlib widget
import matplotlib.pyplot as plt
plt.style.use(‘./deeplearning.mplstyle’)
import logging
logging.getLogger(“tensorflow”).setLevel(logging.ERROR)
tf.autograph.set_verbosity(0)
from public_tests import *
from autils import *
from lab_utils_softmax import plt_softmax
np.set_printoptions(precision=2)
plt_act_trio()
# UNQ_C1
# GRADED CELL: my_softmax
def my_softmax(z):
“”” Softmax converts a vector of values to a probability distribution.
Args:
z (ndarray (N,)) : input data, N features
Returns:
a (ndarray (N,)) : softmax of z
“””
### START CODE HERE ###
N = len(z)
a = np.zeros(N) # initialize a to zeros
ez_sum = 0 # initialize sum to zero
for k in range(N): # loop over number of outputs
ez_sum += np.exp(z[k]) # sum exp(z[k]) to build the shared denominator
for j in range(N): # loop over number of outputs again
a[j] = np.exp(z[j])/ez_sum # divide each the exp of each output by the denominator
### END CODE HERE ###
return a
z = np.array([1., 2., 3., 4.])
a = my_softmax(z)
atf = tf.nn.softmax(z)
print(f”my_softmax(z): {a}”)
print(f”tensorflow softmax(z): {atf}”)
# BEGIN UNIT TEST
test_my_softmax(my_softmax)
# END UNIT TEST
plt.close(“all”)
plt_softmax(my_softmax)
# Load data stored in arrays X, y from data folder (ex3data1.mat)
from scipy.io import loadmat
import os
data = loadmat(os.path.join(‘data’, ‘ex3data1.mat’))
X, y = data[‘X’], data[‘y’].ravel()
# set the zero digit to 0, rather than its mapped 10 in this dataset
# This is an artifact due to the fact that this dataset was used in
# MATLAB where there is no index 0
y[y == 10] = 0
print (‘The first element of X is: ‘, X[0])
print (‘The first element of y is: ‘, y[0])
print (‘The last element of y is: ‘, y[-1])
print (‘The shape of X is: ‘ + str(X.shape))
print (‘The shape of y is: ‘ + str(y.shape))
import warnings
warnings.simplefilter(action=’ignore’, category=FutureWarning)
# You do not need to modify anything in this cell
m, n = X.shape
fig, axes = plt.subplots(8,8, figsize=(5,5))
fig.tight_layout(pad=0.13,rect=[0, 0.03, 1, 0.91]) #[left, bottom, right, top]
#fig.tight_layout(pad=0.5)
widgvis(fig)
for i,ax in enumerate(axes.flat):
# Select random indices
random_index = np.random.randint(m)
# Select rows corresponding to the random indices and
# reshape the image
X_random_reshaped = X[random_index].reshape((20,20)).T
# Display the image
ax.imshow(X_random_reshaped, cmap=’gray’)
# Display the label above the image
ax.set_title(y[random_index])
ax.set_axis_off()
fig.suptitle(“Label, image”, fontsize=14)
# Exercise 2
# UNQ_C2
# GRADED CELL: Sequential model
tf.random.set_seed(1234) # for consistent results
model = Sequential(
[
### START CODE HERE ###
tf.keras.layers.InputLayer((400,)),
tf.keras.layers.Dense(25, activation=”relu”, name=”L1″),
tf.keras.layers.Dense(15, activation=”relu”, name=”L2″),
tf.keras.layers.Dense(10, activation=”linear”, name=”L3″)
### END CODE HERE ###
], name = “my_model”
)
model.summary()
# BEGIN UNIT TEST
test_model(model, 10, 400)
# END UNIT TEST
[layer1, layer2, layer3] = model.layers
#### Examine Weights shapes
W1,b1 = layer1.get_weights()
W2,b2 = layer2.get_weights()
W3,b3 = layer3.get_weights()
print(f”W1 shape = {W1.shape}, b1 shape = {b1.shape}”)
print(f”W2 shape = {W2.shape}, b2 shape = {b2.shape}”)
print(f”W3 shape = {W3.shape}, b3 shape = {b3.shape}”)
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
)
history = model.fit(
X,y,
epochs=40
)
plot_loss_tf(history)
image_of_two = X[1015]
display_digit(image_of_two)
prediction = model.predict(image_of_two.reshape(1,400)) # prediction
print(f” predicting a Two: \n{prediction}”)
print(f” Largest Prediction index: {np.argmax(prediction)}”)
prediction_p = tf.nn.softmax(prediction)
print(f” predicting a Two. Probability vector: \n{prediction_p}”)
print(f”Total of predictions: {np.sum(prediction_p):0.3f}”)
yhat = np.argmax(prediction_p)
print(f”np.argmax(prediction_p): {yhat}”)
import warnings
warnings.simplefilter(action=’ignore’, category=FutureWarning)
# You do not need to modify anything in this cell
m, n = X.shape
fig, axes = plt.subplots(8,8, figsize=(5,5))
fig.tight_layout(pad=0.13,rect=[0, 0.03, 1, 0.91]) #[left, bottom, right, top]
widgvis(fig)
for i,ax in enumerate(axes.flat):
# Select random indices
random_index = np.random.randint(m)
# Select rows corresponding to the random indices and
# reshape the image
X_random_reshaped = X[random_index].reshape((20,20)).T
# Display the image
ax.imshow(X_random_reshaped, cmap=’gray’)
# Predict using the Neural Network
prediction = model.predict(X[random_index].reshape(1,400))
prediction_p = tf.nn.softmax(prediction)
yhat = np.argmax(prediction_p)
# Display the label above the image
ax.set_title(f”{y[random_index]},{yhat}”,fontsize=10)
ax.set_axis_off()
fig.suptitle(“Label, yhat”, fontsize=14)
plt.show()
print( f”{display_errors(model,X,y)} errors out of {len(X)} images”)
Be the first to comment