1e-5):
break
last_error = new_error
#print(gradient)
return theta
optimal...= gradient_descent(X, y, alpha)
print('optimal:', optimal)
print('error function:', error_function(optimal..., X, y)[0,0])
x=np.linspace(1,20,100)
fx=optimal[1,0]*x+optimal[0,0]
plt.plot(x,fx)
plt.scatter(np.transpose