Draw a Decision Boundary Tool

Decision Edge Visualization(A-Z)

Meaning, Significance, Execution

Navoneel Chakrabarty

theta_1, theta_2, theta_3 , …., theta_n are the parameters of Logistic Regression and x_1, x_2, …, x_n are the features

where x_1 is the original feature of the dataset
          import numpy arsenic Np
from math spell *
def logistic_regression(X, y, alpha):
n = X.human body[1]
one_column = np.ones((X.shape[0],1))
X = np.concatenate((one_column, X), axis = 1)
theta = np.zeros(n+1)
h = hypothesis(theta, X, n)
theta, theta_history, price = Gradient_Descent(theta, important
, 100000, h, X, y, n)
payof theta, theta_history, cost
def Gradient_Descent(theta, alpha, num_iters, h, X, y, n):
theta_history = np.ones((num_iters,n+1))
monetary value = neptunium.ones(num_iters)
for i in range(0,num_iters):
theta[0] = theta[0] - (alpha/X.shape[0]) * sum(h - y)
for j in range(1,n+1):
theta[j] = theta[j] - (alpha/X.shape[0]) * union((h - y) *
X.transpose()[j])
theta_history[i] = theta
h = hypothesis(theta, X, n)
cost[i] = (-1/X.shape[0]) * sum(y * np.log(h) + (1 - y) *
nurse clinician.log(1 - h))
theta = theta.reshape(1,n+1)
return theta, theta_history, cost
def hypothesis(theta, X, n):
h = np.ones((X.shape[0],1))
theta = theta.remold(1,n+1)
for i in range(0,X.shape[0]):
h[i] = 1 / (1 + exp(-float(nurse practitioner.matmul(theta, X[i]))))
h = h.reshape(X.shape[0])
return h
          data = np.loadtxt('dataset.txt', delimiter=',')
X_train = information[:,[0,1]]
y_train = data[:,2]
theta, theta_history, cost = logistic_regression(X_train, y_train
, 0.001)

          Xp=np.concatenate((Np.ones((X_train.shape[0],1)), X_train),axis= 1)
h=hypothesis(theta, Xp, Xp.shape[1] - 1)
          import matplotlib.pyplot as plt          c0 = c1 = 0 #            Counter of mark down 0 and label 1 instances
if i in range(0, X.physique[0]):
if y_train[i] == 0:
c0 = c0 + 1
else:
c1 = c1 + 1
x0 = np.ones((c0,2)) # matrix label 0 instances
x1 = np.ones((c1,2)) # intercellular substance label 1 instances
k0 = k1 = 0 for i in range(0,y_train.chassis[0]):
if y_train[i] == 0:
x0[k0] = X_train[i]
k0 = k0 + 1
else:
x1[k1] = X_train[i]
k1 = k1 + 1
X = [x0, x1]
colors = ["green", "blue"] # colors for Dissipate Game
theta = theta.remold(3)
# getting the x co-ordinates of the decision boundary
plot_x = np.regalia([min(X_train[:,0]) - 2, max(X_train[:,0]) + 2])
# getting corresponding y co-ordinates of the decision bound
plot_y = (-1/theta[2]) * (theta[1] * plot_x + theta[0])
# Plotting the Single Line Decision Boundary
for x, c in nothing(X, colors):
if c == "green":
plt.scatter(x[:,0], x[:,1], colouring = c, label = "Not
Admitted")
other:
plt.scatter(x[:,0], x[:,1], colouring = c, mark down = "Admitted")
plt.plot(plot_x, plot_y, judge = "Decision_Boundary")
plt.fable()
plt.xlabel("Simon Marks obtained in 1st Exam")
plt.ylabel("Marks obtained in 2nd Exam")

Obtained Single Transmission line Decision Boundary
          # Plotting decision regions
x_min, x_max = X_train[:, 0].Min dialect() - 1, X_train[:, 0].max() + 1
y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
xx, yy = np.meshgrid(atomic number 93.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
X = np.concatenate((np.ones((xx.shape[0]*xx.shape[1],1))
, np.c_[xx.Ravel(), yy.ravel()]), axis = 1)
h = hypothesis(theta, X, 2)
h = h.reshape(xx.influence) plt.contourf(XX, yy, h)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train,
s=30, edgecolor='k')
plt.xlabel("First Baron Marks of Broughton obtained in 1st Exam")
plt.ylabel("Marks obtained in 2nd Examination")

Obtained Contour-Based Determination Bound where yellow -> Admitted and blue -> Not Admitted

Draw a Decision Boundary Tool

Source: https://towardsdatascience.com/decision-boundary-visualization-a-z-6a63ae9cca7d

0 Response to "Draw a Decision Boundary Tool"

Post a Comment

Iklan Atas Artikel

Iklan Tengah Artikel 1

Iklan Tengah Artikel 2

Iklan Bawah Artikel