Interactive fits

This notebook showcases the interactive fitting capability of iminuit. Interactive fitting is useful to find good starting values and to debug the fit.

Note: If you see this notebook on ReadTheDocs or otherwise statically rendered, changing the sliders won’t change the plot. This requires a running Jupyter kernel.

[1]:
from iminuit import cost
from iminuit import Minuit
from numba_stats import norm, t, bernstein, truncexpon
import numpy as np
from matplotlib import pyplot as plt

UnbinnedNLL

[2]:
rng = np.random.default_rng(1)

s = rng.normal(0.5, 0.1, size=1000)
b = rng.exponential(1, size=1000)
b = b[b < 1]
x = np.append(s, b)

truth = len(s) / len(x), 0.5, 0.1, 1.0

n, xe = np.histogram(x, bins=50)

def model(x, f, mu, sigma, slope):
    return f * norm.pdf(x, mu, sigma) + (1 - f) * truncexpon.pdf(x, 0, 1, 0, slope)

c = cost.UnbinnedNLL(x, model)
m = Minuit(c, *truth)
m.limits["f", "mu"] = (0, 1)
m.limits["sigma", "slope"] = (0, None)

m.interactive(model_points=1000)
[2]:

ExtendedUnbinnedNLL

[3]:
rng = np.random.default_rng(1)

s = rng.normal(0.5, 0.1, size=1000)
b = rng.exponential(1, size=1000)
b = b[b < 1]
x = np.append(s, b)

truth = len(s), 0.5, 0.1, len(b), 1.0

n, xe = np.histogram(x, bins=50)

def model(x, s, mu, sigma, b, slope):
    x = s * norm.pdf(x, mu, sigma) + b * truncexpon.pdf(x, 0, 1, 0, slope)
    return s + b, x

c = cost.ExtendedUnbinnedNLL(x, model)
m = Minuit(c, *truth)
m.limits["mu"] = (0, 1)
m.limits["sigma", "slope", "s", "b"] = (0, None)

m.interactive()
[3]:

BinnedNLL

[4]:
def model(xe, f, mu, sigma, nuinv, slope):
    nu = 1 / nuinv
    a, b = t.cdf((0, 1), nu, mu, sigma)
    sn = f * (t.cdf(xe, nu, mu, sigma) - a) / (b - a)
    bn = (1 - f) * truncexpon.cdf(xe, 0, 1, 0, slope)
    return sn + bn

rng = np.random.default_rng(1)

truth = 0.5, 0.5, 0.1, 0.1, 1

xe = np.linspace(0, 1, 100)
sm = truth[0] * np.diff(model(xe, 1, *truth[1:]))
bm = (1 - truth[0]) * np.diff(model(xe, 0, *truth[1:]))
n = rng.poisson(1000 * np.diff(model(xe, *truth)))

c = cost.BinnedNLL(n, xe, model)

m = Minuit(c, *truth)
m.limits["sigma", "slope"] = (0, None)
m.limits["mu", "f", "nuinv"] = (0, 1)

m.interactive()
[4]:
[5]:
c = cost.BinnedNLL(n, xe, model)

cx = 0.5 * (xe[1:] + xe[:-1])
c.mask = np.abs(cx - 0.5) > 0.3

m = Minuit(c, *truth)
m.limits["sigma", "slope"] = (0, None)
m.limits["mu", "f", "nuinv"] = (0, 1)

m.interactive()
[5]:

ExtendedBinnedNLL

[6]:
def model(xe, s, mu, sigma, nuinv, b1, b2, b3):
    nu = 1 / nuinv
    sn = s * t.cdf(xe, nu, mu, sigma)
    bn = bernstein.integral(xe, (b1, b2, b3), 0, 1)
    return sn + bn

truth = 1000., 0.5, 0.1, 0.1, 1000., 3000., 2000.

xe = np.linspace(0, 1, 100)

rng = np.random.default_rng(1)
n = rng.poisson(np.diff(model(xe, *truth)))

c = cost.ExtendedBinnedNLL(n, xe, model)

m = Minuit(c, *truth)
m.limits["s", "sigma", "b1", "b2", "b3"] = (0, None)
m.limits["mu", "nuinv"] = (0, 1)

m.interactive()
[6]:

You can pass a custom plotting routine with Minuit.interactive to draw more detail. A simple function works that accesses data from the outer scope, but we create a class in the following example to store the cost function, which has all data we need, because we override the variables in the outer scope in this notebook.

[7]:
# Visualize signal and background components with different colors
class Plotter:
    def __init__(self, cost):
        self.cost = cost

    def __call__(self, args):
        xe = self.cost.xe
        n = self.cost.data
        cx = 0.5 * (xe[1:] + xe[:-1])
        plt.errorbar(cx, n, n ** 0.5, fmt="ok")
        sm = np.diff(self.cost.scaled_cdf(xe, *args[:4], 0, 0, 0))
        bm = np.diff(self.cost.scaled_cdf(xe, 0, *args[1:]))
        plt.stairs(bm, xe, fill=True, color="C1")
        plt.stairs(bm + sm, xe, baseline = bm, fill=True, color="C0")

m.interactive(Plotter(c))
[7]:
[8]:
c = cost.ExtendedBinnedNLL(n, xe, model)

cx = 0.5 * (xe[1:] + xe[:-1])
c.mask = np.abs(cx - 0.5) > 0.3

m = Minuit(c, *truth)
m.limits["s", "sigma", "nuinv", "b1", "b2", "b3"] = (0, None)
m.limits["mu", "nuinv"] = (0, 1)
m.fixed["s", "mu", "sigma", "nuinv"] = True
m.values["s"] = 0

m.interactive()
[8]:

Template

[9]:
xe = np.linspace(0, 1, 20)
bm = np.diff(truncexpon.cdf(xe, 0, 1, 0, 1))
sm = np.diff(norm.cdf(xe, 0.5, 0.1))

rng = np.random.default_rng(1)
n = rng.poisson(1000 * bm + 100 * sm)
b = rng.poisson(1e4 * bm)
s = rng.poisson(1e2 * sm)

c = cost.Template(n, xe, (b, s))

m = Minuit(c, 1000, 100, name=("b", "s"))
m.limits = (0, None)

m.interactive()
[9]:
[10]:
c = cost.Template(n, xe, (b, s))
cx = 0.5 * (xe[1:] + xe[:-1])
c.mask = np.abs(cx - 0.5) > 0.2

m = Minuit(c, 1000, 100, name=("b", "s"))
m.limits = (0, None)

m.interactive()
[10]:

LeastSquares

[11]:
def model(x, a, b):
    return a + b * x

truth = (1., 2.)
x = np.linspace(0, 1)
ym = model(x, *truth)
ye = 0.1
y = rng.normal(ym, ye)

c = cost.LeastSquares(x, y, ye, model)

m = Minuit(c, *truth)
m.interactive()
[11]:
[12]:
c = cost.LeastSquares(x, y, ye, model)
c.mask = (x > 0.6) | (x < 0.2)

m = Minuit(c, *truth)
m.interactive()
[12]:

Cost functions with shared parameters

[13]:
def model(xe, s, mu, sigma, nuinv):
    nu = 1 / nuinv
    return s * t.cdf(xe, nu, mu, sigma)

truth = 100., 0.5, 0.1, 0.5

rng = np.random.default_rng(1)
xe = np.linspace(0, 1, 20)
m = np.diff(model(xe, *truth))
n = rng.poisson(m)

c = cost.ExtendedBinnedNLL(n, xe, model) + cost.NormalConstraint(["mu", "sigma"], [0.5, 0.1], [0.1, 0.1])

m = Minuit(c, *truth)

m.interactive()
[13]: