# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2 19:10:44 2017
@author: xiaofeixiazyh
"""
# import packages
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
# load the data
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
##Example of a picture
#index = 24
#plt.imshow(train_set_x_orig[index])
#print("y = " + str(train_set_y[:,index]) + ", it is a " + classes[np.squeeze(train_set_y[:,index])].decode("utf-8") + " picture")
#
#print(np.squeeze(train_set_y[:,index]))
#print(train_set_x_orig.shape)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
#print("Number of training example: m_train = " + str(m_train))
#print("Number of testing example : m_test = " + str(m_test))
#print("Height/width of each image: num_px = " + str(num_px))
#print("Each image is of size:(" + str(num_px) + "," + str(num_px) + ",3)")
#print("train_set_x shape: " + str(train_set_x_orig.shape))
#print("train_set_y shape :" + str(train_set_y.shape))
#print("test_set_x shape: " + str(test_set_x_orig.shape))
#print("test_set_y shape :" + str(test_set_y.shape))
#-----------------------------------------
# reshape the train and test set
#----------------------------------------
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
#print("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
#print("train_set_y shape :" + str(train_set_y.shape))
#print("test_set_x_flatten shape :" + str(test_set_x_flatten.shape))
#print("test_set_y shape : " + str(test_set_y.shape))
#print("sanity check atfer reshapeing : " + str(train_set_x_flatten[0:5,0]))
# standarded the train/test data set
train_set_x = train_set_x_flatten / 255
test_set_x = test_set_x_flatten / 225
#print("check after standarded : " + str(train_set_x[0:5,0]))
# Building the parts of algorithm-----------------------------
#---------------------------------------------------------#
def sigmod(z):
"""
Compute the sigmoid of z
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
s = 1. / (1 + np.exp(-z))
return s
#print("sigmod(0) = " + str(sigmod(0)))
#print("sigmod(9) = " + str(sigmod(9)))
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
w = np.zeros((dim, 1))
b = 0
assert(w.shape == (dim,1))
assert(isinstance(b,float) or isinstance(b,int))
return w,b
#dim = 2
#w,b = initialize_with_zeros(dim)
#print("w = " + str(w))
#print("b = " + str(b))
def propagate(w,b,X,Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation
"""
m = X.shape[1]
A = sigmod(np.dot(w.T,X) + b)
cost = np.sum(Y*np.log(A) + (1-Y) *np.log(1-A)) / (-m)
dw = np.dot(X, (A-Y).T) / m
db = np.sum(A-Y) / m
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {
"dw" : dw,
"db" : db
}
return grads, cost
w, b, X, Y = np.array([[1],[2]]), 2 , np.array([[1,2],[3,4]]), np.array([[1,0]])
#grads, cost = propagate(w,b,X,Y)
#print("dw = " + str(grads["dw"]))
#print("db = " + str(grads["db"]))
#print("cost = " + str(cost))
#==============================================================================
# 从这里开始检查
#==============================================================================
def optimize(w, b ,X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
grads , cost = propagate(w,b,X,Y)
dw = grads["dw"]
db = grads["db"]
w = w - learning_rate * dw
b = b - learning_rate * db
if i % 100 == 0 :
costs.append(cost)
if print_cost or i %100 ==0 :
print("Cost after iteration %i: %f" %(i,cost))
params = {
"w" : w,
"b" : b
}
grads = {
"dw" : dw,
"db" : db
}
return params, grads , costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 1000, learning_rate = 0.009, print_cost = False)
#print ("w = " + str(params["w"]))
#print ("b = " + str(params["b"]))
#print ("dw = " + str(grads["dw"]))
#print ("db = " + str(grads["db"]))
def predict(w,b,X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0],1)
A = sigmod(np.dot(w.T,X))
for i in range(A.shape[1]):
if (A[0,i] > 0.5):
Y_prediction[0][i] = 1
else:
Y_prediction[0][1] = 0
assert(Y_prediction.shape == (1,m) )
return Y_prediction
#print("prediction = " + str(predict(w,b,X)))
def model(X_train, Y_train, X_test, Y_test, num_iterations, learning_rate, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
w , b = initialize_with_zeros(X_train.shape[0])
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost = False)
w = parameters["w"]
b = parameters["b"]
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
import time
tic = time.process_time()
num_iterations = 10000
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = num_iterations, learning_rate = 0.5, print_cost = True)
toc = time.process_time()
print('Use num_iterations of %i, run %f sec ' %(num_iterations, toc - tic))
deeplearning1 logstic regression as a Neural Networks
猜你喜欢
转载自blog.csdn.net/xiaofeixiazyh/article/details/78698373
今日推荐
周排行