<html><head><meta name="color-scheme" content="light dark"></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">import numpy as np

# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534

# Generate some synthetic data from the model.
N = 50
x = np.sort(10*np.random.rand(N))
yerr = 0.1+0.5*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)


def lnlike(theta, x, y, yerr):
    m, b, lnf = theta
    model = m * x + b
    inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
    return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))

import scipy.optimize as op
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
m_ml, b_ml, lnf_ml = result["x"]


def lnprior(theta):
    m, b, lnf = theta
    if -5.0 &lt; m &lt; 0.5 and 0.0 &lt; b &lt; 10.0 and -10.0 &lt; lnf &lt; 1.0:
        return 0.0
    return -np.inf




def lnprob(theta, x, y, yerr):
    lp = lnprior(theta)
    if not np.isfinite(lp):
        return -np.inf
    return lp + lnlike(theta, x, y, yerr)

ndim, nwalkers = 3, 100
#pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
pos=np.random.uniform(-5.,5.,size=300).reshape(100,3)
import emcee
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr))

sampler.run_mcmc(pos, 500)


print sampler.chain
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
import matplotlib.pyplot as plt
plt.figure()`

for i in sampler.chain:
	first = 
first = [xfor x in sampler.chain]

plt.subplot(3,1,1)
plt.plot(sampler.chain[0])
plt.subplot(3,1,2)
plt.plot(sampler.chain[1])
plt.subplot(3,1,3)
plt.plot(sampler.chain[2])
plt.savefig("chains")

for i in sampler.chain:
	print i
print len(sampler.chain)
import corner
fig = corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
                      truths=[m_true, b_true, np.log(f_true)])
fig.savefig("tutorial.pdf")
</pre></body></html>