In [4]:
import matplotlib.pyplot as plt
%matplotlib inline
In [5]:
import numpy
In [6]:
from functools import partial

Uncertainty

Let’s start by defining a FOPDT function as in the textbook

In [7]:
def G_P(k, theta, tau, s):
    """ Equation 7.19 """
    return k/(tau*s + 1)*numpy.exp(-theta*s)

Let’s see what this looks like for particular values of \(k\), \(\tau\) and \(\theta\)

In [8]:
omega = numpy.logspace(-2, 2, 1000)
s = 1j*omega
In [9]:
Gnom = partial(G_P, 2.5, 2.5, 2.5)
In [10]:
def Gnom(s):
    return G_P(2.5, 2.5, 2.5, s)
In [11]:
Gfr = Gnom(s)
In [12]:
pomega = 0.5
In [13]:
def nominal_curve():
    plt.plot(Gfr.real, Gfr.imag)
    plt.axhline(0, color='black')
    plt.axvline(0, color='black')
In [14]:
def nominal_point(pomega):
    Gnomp = Gnom(1j*pomega)
    plt.scatter(Gnomp.real, Gnomp.imag, s=100, color='magenta')
In [15]:
Gnom(1j*pomega)
Out[15]:
(-0.8496667432187457-1.310378119365534j)
In [16]:
nominal_curve()
nominal_point(pomega)
../_images/notebooks_Uncertainty_14_0.png

At a particular frequency, let’s look at a couple of possible plants

In [17]:
varrange = numpy.arange(2, 3, 0.1)
def cloudpoints(pomega):
    points = numpy.array([G_P(k, theta, tau, pomega*1j)
                          for k in varrange
                          for tau in varrange
                          for theta in varrange])
    nominal_curve()
    nominal_point(pomega)
    plt.scatter(points.real, points.imag, color='red', alpha=0.1)
In [18]:
from ipywidgets import interact
import ipywidgets as widgets
In [19]:
interact(cloudpoints, pomega=(0.1, 10))
Out[19]:
<function __main__.cloudpoints>

Let’s try to approximate this region by a disc

In [20]:
Gnomp = Gnom(1j*pomega)
points = numpy.array([G_P(k, theta, tau, pomega*1j)
                      for k in varrange
                      for tau in varrange
                      for theta in varrange])

radius = max(abs(P - Gnomp) for P in points)
radius
Out[20]:
0.8068845952466887
In [21]:
def discapprox(pomega, radius):
    Gnomp = Gnom(1j*pomega)
    c = plt.Circle((Gnomp.real, Gnomp.imag), radius, alpha=0.2)
    cloudpoints(pomega)
    plt.gca().add_artist(c)
    plt.axis('equal')
interact(discapprox, pomega=(0.1, 5), radius=radius)
Out[21]:
<function __main__.discapprox>

The above represents an additive uncertainty description,

\[|\Delta_A| < 1\]
\[G_p(s) = G(s) + w_A(s)\Delta_A(s); \quad |\Delta_A(j\omega) | \leq 1 \forall \omega| (7.20)\]
In [22]:
Npoints = 10000
Delta_As = (numpy.random.rand(Npoints)*2-1 +
            (numpy.random.rand(Npoints)*2 - 1)*1j)
valid_values = numpy.abs(Delta_As) < 1
Delta_As = Delta_As[valid_values]
In [23]:
plt.scatter(Delta_As.real, Delta_As.imag, alpha=0.03)
plt.axis('equal')
Out[23]:
(-1.1075495875096064,
 1.1094475944764177,
 -1.108437989610789,
 1.1089294077137237)
../_images/notebooks_Uncertainty_24_1.png
In [24]:
def discapprox(pomega, radius):
    Gnomp = Gnom(1j*pomega)
    #c = plt.Circle((Gnomp.real, Gnomp.imag), radius, alpha=0.2)
    Gp_A = Gnomp + radius*Delta_As
    plt.scatter(Gp_A.real, Gp_A.imag, alpha=0.03)
    cloudpoints(pomega)
    #plt.gca().add_artist(c)
    plt.axis('equal')
interact(discapprox, pomega=(0.1, 5), radius=radius)
Out[24]:
<function __main__.discapprox>

We now build frequency response for \(\Pi\) (all possible plants).

In [25]:
Pi = numpy.array([G_P(k, theta, tau, s)
                  for k in varrange
                  for tau in varrange
                  for theta in varrange])
In [26]:
deviations = numpy.abs(Pi - Gfr)
In [27]:
maxima = numpy.max(deviations, axis=0)
In [28]:
maxima.max()
Out[28]:
0.806912987162208
In [29]:
plt.loglog(omega, numpy.abs(deviations.T), color='blue', alpha=0.2);
plt.loglog(omega, maxima, color='magenta')
plt.axhline(radius)
#plt.loglog(pomega, radius, 'r.')
plt.ylim(ymin=1e-2)
Out[29]:
(0.01, 5.958602718540838)
../_images/notebooks_Uncertainty_31_1.png

Optimisation of the maximum

In [30]:
from scipy.optimize import minimize
In [31]:
def objective(x, omega):
    k, tau, theta = x
    s = 1j*omega
    return -numpy.abs(Gnom(s) - G_P(k, tau, theta, s))
In [32]:
objective([2.5, 2.5, 2.5], 0)
Out[32]:
-0.0
In [42]:
x0 = numpy.array([2.5, 2.5, 2.5])
bounds = [[2, 3]]*3
minimize(objective, x0, args=0, bounds=bounds)
Out[42]:
fun: -0.5
 hess_inv: <3x3 LbfgsInvHessProduct with dtype=float64>
      jac: array([-0.99999999,  0.        ,  0.        ])
  message: b'CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL'
     nfev: 8
      nit: 1
   status: 0
  success: True
        x: array([3. , 2.5, 2.5])
In [59]:
vals = []
starts = 10
for omegai in omega:
    best = float('-inf')
    # We will use "multi-start" strategy
    for start in range(starts):
        x0 = numpy.random.uniform(2, 3, size=3)
        r = minimize(objective,
                     x0,
                     args=omegai,
                     bounds=bounds,
                     method='TNC')  # TNC and L-BFGS-B can handle bounds
        if -r['fun'] > best:
            best = -r['fun']
    vals.append(best)
In [60]:
plt.loglog(omega, maxima, label='Mesh')
plt.loglog(omega, vals, label='Optimisation')
plt.legend()
Out[60]:
<matplotlib.legend.Legend at 0x11a1b05c0>
../_images/notebooks_Uncertainty_38_1.png
In [61]:
def plot_upper(K, tau, plotmax):
    w_A = K/(tau*s + 1)
    if plotmax:
        plt.loglog(omega, maxima, color='blue')
    else:
        plt.loglog(omega, numpy.abs(deviations.T), color='blue', alpha=0.1);
    plt.loglog(omega, numpy.abs(w_A), color='red')
    plt.ylim(ymin=1e-2)
In [62]:
i = interact(plot_upper, K=(0.1, 2, 0.01), tau=(0.01, 1, 0.01),
             plotmax=widgets.Checkbox())
In [63]:
def combined(pomega, K, tau, plotmax):
    f = plt.figure(figsize=(12, 6))
    plt.subplot(1, 2, 1)
    plot_upper(K, tau, plotmax)
    plt.axvline(pomega)
    plt.subplot(1, 2, 2)
    s = 1j*pomega
    radius = numpy.abs(K/(tau*s + 1))
    discapprox(pomega, radius)
In [64]:
interact(combined, pomega=(0.1, 10), K=(0.1, 2, 0.01), tau=(0.01, 1, 0.01),
         plotmax=widgets.Checkbox())
Out[64]:
<function __main__.combined>

Addednum: understanding the use of partial

In the code above we used the function functools.partial. What’s going on there?

Let’s say we have a function with many arguments, but we want to optimise only one:

In [65]:
def function_with_many_arguments(a, b, c, d):
    return 1*a + 2*b + 3*c + 4*d

We could define a “wrapper” which just calls that function with the single argument we need.

In [66]:
def wrapper_function(d):
    return function_with_many_arguments(1, 2, 3, d)
In [67]:
wrapper_function(2)
Out[67]:
22
In [68]:
import scipy.optimize
In [69]:
scipy.optimize.fsolve(wrapper_function, 3)
Out[69]:
array([-3.5])

functools.partial automates the creation of this wrapper function.

In [70]:
from functools import partial
In [71]:
wrapper_function = partial(function_with_many_arguments, 1, 2, 3)
In [72]:
wrapper_function(2)
Out[72]:
22