Commit 54a5f960 authored by Guillaume Garrigos's avatar Guillaume Garrigos
Browse files

Nasty changes to make it compatible with autograd

parent 117dca61
......@@ -211,16 +211,39 @@ def sequence_gradient(func, x0=np.ones(2),stepsize=0.1,iter_max=100, **param):
Y = [x[1] for x in seq]
return X, Y
def sequence_newton(func, x0=np.ones(2), stepsize=1, iter_max=10, **param):
# here func is a function with a gradient and hessian parameter
seq = []
x = x0
if 'plot_box' in param.keys() and is_in_box(x, param['plot_box']):
seq.append(x)
for t in range(iter_max):
grad = func(x[0], x[1], gradient=True) # grad is a tuple, we can compine it with arrays
hess = func(x[0], x[1], hessian=True) # hess is a 2D array
d = np.linalg.solve(hess, grad)
x = x - stepsize*d
if 'plot_box' in param.keys():
# we check that the sequence is still within the bounds of our plot.
if is_in_box(x, param['plot_box']):
seq.append(x)
else:
seq.append(x)
X = [x[0] for x in seq]
Y = [x[1] for x in seq]
return X, Y
def quadratic(A, b=np.zeros(2), c=0):
# returns the function 0.5*<AX,X> + <b,X> + c or its gradient
def func(x,y, gradient=None):
if gradient is None:
def func(x,y, gradient=None, hessian=None):
if gradient is None and hessian is None:
return 0.5*( x*(A[0,0]*x + A[0,1]*y) + y*(A[1,0]*x + A[1,1]*y) ) + b[0]*x + b[1]*y + c
else:
elif gradient:
AA = 0.5*(A + A.T)
return ( AA[0,0]*x + AA[0,1]*y + b[0], AA[1,0]*x + AA[1,1]*y + b[1] )
elif hessian:
return 0.5*(A + A.T)
return func
......@@ -301,18 +324,30 @@ def plot2d_function(func, fig=None, **plot_param):
# deal with optional parameters
plot_param = Dict(dict_merge(PARAM, plot_param))
have_to_plot, number_plots = get_plots_we_want(**plot_param)
# untangle 2D/1D inconsistencies
func = flatten_2D_function(func)
if fig is None:
fig = plt.gcf()
if 'algo' in plot_param.keys() and plot_param.algo is not None:
if plot_param.algo == 'gradient':
seq = sequence_gradient(func, **plot_param)
if len(seq[0]) == 0: # we are out ouf bounds
if len(seq[0]) == 0: # we are out of bounds
plot_param.sequence = None
else:
plot_param.sequence = seq
s = np.ones(len(plot_param.sequence[0]))*40
s[0] = 120
plot_param.scatter_size = s
if plot_param.algo == 'Newton':
seq = sequence_newton(func, **plot_param)
if len(seq[0]) == 0: # we are out of bounds
plot_param.sequence = None
else:
plot_param.sequence = seq
if 'sequence' in plot_param.keys() and plot_param['sequence'] is not None:
s = np.ones(len(plot_param.sequence[0]))*40
s[0] = 120
plot_param.scatter_size = s
fig.canvas.header_visible = plot_param.header_visible
k = 0
......@@ -322,6 +357,7 @@ def plot2d_function(func, fig=None, **plot_param):
ax = set_axis3d(ax, **plot_param)
ax.plot_surface(*function_values(func, **plot_param), cmap=plot_param.cmap, rstride=1, cstride=1);
ax.set_zlim(plot_param.zlim);
if have_to_plot['levelset']:
k = k+1
ax = fig.add_subplot(1, number_plots, k)
......@@ -332,6 +368,7 @@ def plot2d_function(func, fig=None, **plot_param):
plt.axis('off')
if 'sequence' in plot_param.keys() and plot_param['sequence'] is not None:
ax.scatter(*plot_param['sequence'], s=plot_param.scatter_size)
if have_to_plot['gradient']:
k = k+1
ax = fig.add_subplot(1, number_plots, k)
......@@ -342,6 +379,7 @@ def plot2d_function(func, fig=None, **plot_param):
plt.axis('off')
if 'sequence' in plot_param.keys() and plot_param['sequence'] is not None:
ax.scatter(*plot_param['sequence'], s=plot_param.scatter_size)
if have_to_plot['flow']:
k = k+1
ax = fig.add_subplot(1, number_plots, k)
......@@ -394,7 +432,7 @@ def widget_quadratic(**plot_param):
if plot_param['algo'] is not None:
slider_x0 = FloatSlider(description='$x_0$', value=1.0, **slider_param)
slider_y0 = FloatSlider(description='$y_0$', value=1.0, **slider_param)
slider_stepsize = FloatSlider(description='stepsize', value=0.1, min=0.01, max=1, step=0.05)
slider_stepsize = FloatSlider(description='stepsize', value=1, min=0.01, max=2, step=0.05)
# we initialize everything
A_slider = np.array([[slider_a11.value, slider_a12.value], [slider_a21.value, slider_a22.value]])
......@@ -487,3 +525,38 @@ def widget_quadratic(**plot_param):
# what follows is terrible coding for dealing with 2D vectors versus 2x1D entries
# but it is the fastest way to do without having to rewrite everything
from inspect import signature
import autograd
def flatten_2D_function(func):
# Given a function taking a vector entry z=(x,y) and returning f(z)
# we return a function taking as first entries x and y to return f(x,y)
nb_entry_param = len( signature(func).parameters )
if nb_entry_param >= 2: # nothing to be done here
return func
else: # we flatten it
def newfunc(x,y, gradient=None, hessian=None):
x = float(x) # autograd doesn't like integers
y = float(y)
if gradient is None and hessian is None:
return func(np.array([x, y]))
else:
if gradient:
dfz = autograd.grad(func)(np.array([x, y]))
return ( dfz[0], dfz[1] )
elif hessian:
ddfz = autograd.jacobian(autograd.grad(func))(np.array([x, y]))
return ddfz
return newfunc
def flatten_2D_sequence(list2D):
if len(list2D) == 2:
return list2D
else:
listx = [z[0] for z in list2D]
listy = [z[1] for z in list2D]
return (listx, listy)
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment