Day 16

Galerkin for arbitrary differential equation

In [8]:
import sympy as sp
sp.init_printing()
In [3]:
from matplotlib import rcdefaults
rcdefaults()  # restore default matplotlib rc parameters
%config InlineBackend.figure_format='retina'   
import seaborn as sns  # wrapper for matplotlib that provides prettier styles and more
import matplotlib.pyplot as plt  # use matplotlib functionality directly
%matplotlib inline   
sns.set()
In [9]:
import numpy as np

cook up a differential equation with specified solution

In [5]:
def f_for_chosen(r,s,y):
    x = sp.symbols('x')
    fexpr = sp.expand(-sp.diff( r(x)*sp.diff(y(x),x), x ) + s(x)*y(x))
    return sp.lambdify(x, fexpr)

def r(x): return 1+x**2 
def s(x): return 2-x
def y(x): return x**2*(1-x)
f = f_for_chosen( r,s,y )
x = sp.symbols('x')
f(x)
Out[5]:
$\displaystyle x^{4} + 9 x^{3} - 4 x^{2} + 6 x - 2$
In [6]:
def make_phi(template,h,xi):
    def phi(x):
        return template( (x-xi)/h )
    return phi

def pwlinear_spline(x):
    f = np.zeros_like(x)
    subinterval = (x>=-1) & (x< 0); subx = x[subinterval]+1; f[subinterval] = subx
    subinterval = (x>= 0) & (x< 1); subx = 1-x[subinterval]; f[subinterval] = subx
    return f

def calculate_hat_Aij(r,s,im1,jm1,gridx):
    i = im1 + 1
    j = jm1 + 1
    x = sp.symbols('x')
    h = gridx[1] - gridx[0]
    #print('h =',h)
    if j<i:
        assert(0)  # use symmetry to get the lower left triangle
    elif i==j:
        rpart = sp.integrate(  r(x)/h**2                  , (x, gridx[i-1], gridx[i+1]) )
        spart = sp.integrate(  s(x)*(x-gridx[i-1])**2/h**2, (x, gridx[i-1], gridx[i  ]) ) + \
                sp.integrate(  s(x)*(x-gridx[i+1])**2/h**2, (x, gridx[i  ], gridx[i+1]) )
    elif j==i+1:
        rpart = sp.integrate( -r(x)/h**2, (x,gridx[i  ], gridx[i+1]) )
        spart = sp.integrate(  s(x)*(x-gridx[i])*(gridx[i+1]-x)/h**2, (x,gridx[i  ], gridx[i+1]) )
    else:
        rpart = 0
        spart = 0
    return rpart + spart

def calculate_hat_bi(f,im1,gridx):
    i = im1+1
    h = gridx[1] - gridx[0]
    x = sp.symbols('x')
    #print(gridx[i-1], gridx[i  ], gridx[i+1])
    return sp.integrate(  f(x)*(x-gridx[i-1])/h, (x, gridx[i-1], gridx[i  ]) ) + \
           sp.integrate(  f(x)*(gridx[i+1]-x)/h, (x, gridx[i  ], gridx[i+1]) )

fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(15,10))
gs = axs[0,1].get_gridspec()
for ax in axs[:, -1]:
    ax.remove()
axbig = fig.add_subplot(gs[:, -1])

# Ackleh example
def r(x): return 1+x
def s(x): return 1
def f(x): return 100


# arbitrary diff eq
def f_for_chosen(r,s,y):  # find the f that makes y the exact solutions
    x = sp.symbols('x')
    fexpr = sp.expand(-sp.diff( r(x)*sp.diff(y(x),x), x ) + s(x)*y(x))
    return sp.lambdify(x, fexpr)

def r(x): return 1+x**2 
def s(x): return 2-x
def y(x): return x**2*(1-x)
f = f_for_chosen( r,s,y )
x = sp.symbols('x')
f(x)

ns = []
errors = []
for np2 in [3,5,9,17,33,65]:  # various sizes of basis set

    # build the basis functions (pwlinear hat functions)
    gridx = np.linspace(0,1,np2)
    h = gridx[1] - gridx[0]
    phis = [make_phi(pwlinear_spline,h,xi) for xi in gridx[1:-1]]
    
    n = np2 - 2
    A = np.zeros((n,n))
    b = np.empty(n)
    for i in range(n):
        for j in range(i,n):
            A[i,j] = calculate_hat_Aij(r,s,i,j,gridx)
            if j!=i: A[j,i] = A[i,j]
        b[i] = calculate_hat_bi(f,i,gridx)
    #display(A)
    #display(b)
    
    # solve for the coefficients
    a = np.linalg.solve(A,b)
    
    # make pictures
    x = np.linspace(0,1,500)
    approx = np.sum([ai*phi(x) for ai,phi in zip(a,phis)],axis=0)
    exact = y(x) #-100*x + 100*np.log(1+x)/np.log(2)
    axs[0,0].plot(x, exact,'k')
    axs[0,0].plot(x, approx,alpha=0.5)
    axs[1,0].plot(x,approx-exact)
    ns.append(n)
    errors.append(np.abs(approx-exact).max())

axs[0,0].set_ylabel('exact and approximations')
axs[1,0].set_xlabel('x')
axs[1,0].set_ylabel('error(x)')
axbig.set_aspect(1)
axbig.loglog(ns,errors,'o')
axbig.set_xlabel('n')
axbig.set_ylabel('max error');

Plot on right appears to have asymptotic slope -2, indicating max error goes to zero as $1/N^2$ or as $h^2$.

In [7]:
# ratios of successive max errors as h is repeatedly halved
[errors[i]/errors[i+1] for i in range(len(errors)-1)]
Out[7]:
$\displaystyle \left[ 3.0087703398668344, \ 3.486164294533322, \ 3.740761080505155, \ 3.8718222312972643, \ 3.9330583380328883\right]$
In [ ]: