Section 5.4 — Bayesian difference between means#

This notebook contains the code examples from Section 5.4 Bayesian difference between means from the No Bullshit Guide to Statistics.

See also:

Notebook setup#

# load Python modules
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

import bambi as bmb
import arviz as az
WARNING (pytensor.tensor.blas): Using NumPy C-API based implementation for BLAS functions.
# Figures setup
plt.clf()  # needed otherwise `sns.set_theme` doesn"t work
from plot_helpers import RCPARAMS
RCPARAMS.update({"figure.figsize": (5, 3)})   # good for screen
# RCPARAMS.update({"figure.figsize": (5, 1.6)})  # good for print
sns.set_theme(
    context="paper",
    style="whitegrid",
    palette="colorblind",
    rc=RCPARAMS,
)

# High-resolution please
%config InlineBackend.figure_format = "retina"

# Where to store figures
from ministats.utils import savefigure
DESTDIR = "figures/bayes/dmeans"
<Figure size 640x480 with 0 Axes>
# set random seed for repeatability
np.random.seed(42)
#######################################################

Introduciton#

We’ll now revisit the problem of comparison between two groups from a Bayesian perspective.

Model#

The data models for the two groups are based on Student’s \(t\)-distribution:

\[ X \sim \mathcal{T}(\nu, M_X, \Sigma_X) \qquad \text{and} \qquad Y \sim \mathcal{T}(\nu, M_Y, \Sigma_Y). \]

We’ll use a normal prior for the means, half-\(t\) priors for standard deviations, and a gamma distribution for the degrees of freedom parameter:

\[ M_X, M_Y \sim \mathcal{N}(\mu_{M}, \sigma_{M}), \quad \Sigma_X, \Sigma_Y \sim \mathcal{T}^+\!(\nu_{\Sigma}, \sigma_{\Sigma}), \quad \nu \sim \textrm{Gamma}(\alpha,\beta). \]

Bambi formula objects#

import bambi as bmb
formula = bmb.Formula("y ~ 0 + group",
                      "sigma ~ 0 + group")
formula
Formula('y ~ 0 + group', 'sigma ~ 0 + group')
formula.main, formula.additionals
('y ~ 0 + group', ('sigma ~ 0 + group',))
formula.get_all_formulas()
['y ~ 0 + group', 'sigma ~ 0 + group']

Choosing priors for log-sigma#

Normal distribution for log-sigma#

mu_Sigma, sigma_Sigma = 0, 1

from scipy.stats import norm
logsigs = np.linspace(-5, 5, 1000)
density_logsigs = norm(mu_Sigma, sigma_Sigma).pdf(logsigs)
sigmas = np.exp(logsigs)
# Apply the change of variables to get the density for sigma
# based on the Jacobian adjustment |d(log(sigma))/d(sigma)| = 1/sigma
density_sigmas = density_logsigs / sigmas  

# Plot the transformed density using seaborn
ax = sns.lineplot(x=sigmas, y=density_sigmas)
ax.set_xlabel('$\\sigma$')
ax.set_ylabel('Density')
ax.set_title('PDF of sigma = exp(log_sigma) when log_sigma ~ N(0,1)')
ax.set_xlim(0, 10)
ax.set_ylim(0, 0.7);
../_images/5a3a57dcc61903a3c9e85f3c027b4901fbe8acfb7f43fce5d59a0356baf68ad1.png
mu_Sigma, sigma_Sigma = 0, 1

# verify using simulation
N = 10000
np.random.seed(44)
logsigs = norm(mu_Sigma, sigma_Sigma).rvs(N)
sigs = np.exp(logsigs)
ax = sns.histplot(sigs)
ax.set_xlim(0, 15);
sigs.mean(), sigs.std(ddof=1)
(1.6513486244380393, 2.1603481268169995)
../_images/ea80503be80e153f51103d60338f15e89604b18c45d962bfb4d8297ee440203e.png
# mu, sigma = 2.0059216703551783, 1.7466319059099038
mean = np.exp(mu_Sigma+sigma_Sigma**2/2)
std = np.sqrt( (np.exp(sigma_Sigma**2)-1)*np.exp(2*mu_Sigma+sigma_Sigma**2) )
mean, std
(1.6487212707001282, 2.1611974158950877)

NOT TRUE: In order to produce a distribution with desired mean \(\mu_X\) and variance \(\sigma_X^2\), one uses

\[ \mu = \ln\left( \frac{ \mu_X^2 }{\ \sqrt{ \mu_X^2 + \sigma_X^2\ }\ }\right) \quad \text{and} \quad \sigma^2 = \ln\left( 1 + \frac{\ \sigma_X^2\ }{ \mu_X^2 } \right). \]

want mean around 10, std 10 wet get

# mean = 1.2
# std = 0.2
# mu = np.log( mean**2 / np.sqrt(mean**2 + std**2) )
# sigma = np.sqrt( np.log(1 + std**2/mu**2) )
# mu, sigma

Gumbel distribution?#

# np.random.seed(43)
# from scipy.stats import gumbel_r, expon
# N = 1000
# gumbels = gumbel_r(loc=1.609, scale=1).rvs(N)
# exp_gumbels = np.exp(gumbels)
# exps = expon(loc=0, scale=5).rvs(N)
# ax = sns.histplot(exp_gumbels, alpha=0.2, stat="density", label="expgumbel")
# sns.histplot(exps, alpha=0.2, stat="density", ax=ax, label="exp")
# ax.set_xlim(0, 40);
# ax.legend();

Choosing priors for the degrees of freedom parameter#

from scipy.stats import gamma
from ministats import plot_pdf

alpha, beta = 2, 0.1
rv_nu = gamma(a=2, scale=1/beta)
plot_pdf(rv_nu, rv_name="ν");
../_images/edfbbeabe05cbfd74b74fec32a52ee784f286dddc5da588d500438395f8eefce.png
# # FIGURES ONLY
# from scipy.stats import gamma
# from scipy.stats import norm
# from ministats import plot_pdf

# with plt.rc_context({"figure.figsize":(6,2.2)}):
#     fig, (ax1, ax2) = plt.subplots(1, 2)
#
#     # log(sigma) ~ N(0,1)
#     logsig = np.linspace(-5, 5, 1000)
#     density_logsig = norm(0,1).pdf(logsig)
#     sigma = np.exp(logsig)
#     density_sigma = density_logsig / sigma
#     sns.lineplot(x=sigma, y=density_sigma, ax=ax1)
#     ax1.set_xlabel('$\\sigma$')
#     ax1.set_ylabel('$f_{\\Sigma}$')
#     ax1.set_title("(a) log($\\sigma$) ~ $\\mathcal{N}$(0,1)")
#     ax1.set_xlim(-0.2, 10);
#
#     # nu ~ Gamma(2,0.1)
#     alpha, beta = 2, 0.1
#     rv_nu = gamma(a=2, scale=1/beta)
#     plot_pdf(rv_nu, rv_name="ν", ax=ax2)
#     ax2.set_title('(b) $\\nu$ ~ Gamma(2,0.1)')
#     ax2.set_ylabel('$f_{\\nu}$')
#     ax2.set_xlim(-1, 75);
#
#     filename = os.path.join(DESTDIR, "log_sigma_normal_and_gamma_priors.pdf")
#     savefigure(fig, filename)

Example 1: comparing electricity prices#

Electricity prices from East End and West End

Electricity prices dataset#

eprices = pd.read_csv("../datasets/eprices.csv")
eprices.groupby("loc")["price"].describe()
count mean std min 25% 50% 75% max
loc
East 9.0 6.155556 0.877655 4.8 5.5 6.3 6.5 7.7
West 9.0 9.155556 1.562139 6.8 8.3 8.6 10.0 11.8
eprices["price"].mean(), eprices["price"].std()
(7.655555555555556, 1.973120020267162)

Bayesian model#

\[\begin{align*} X_W \sim \mathcal{T}(\nu, M_W, \Sigma_W), \qquad \qquad X_E \sim \mathcal{T}(\nu, M_E, \Sigma_E), \qquad\qquad \\ M_W, M_E \!\sim\! \mathcal{N}(\mu_{M}, \sigma_{M}), \quad \log\Sigma_W, \log\Sigma_E\!\sim\!\mathcal{N}(0,1), \quad \nu \!\sim\! \text{Gamma}(2,0.1). \end{align*}\]

Bambi model#

formula1 = bmb.Formula("price ~ 0 + loc",
                       "sigma ~ 0 + loc")

priors1 = {
    "loc": bmb.Prior("Normal", mu=8, sigma=5),
    "sigma": {
        "loc": bmb.Prior("Normal", mu=0, sigma=1)
    },
    "nu": bmb.Prior("Gamma", alpha=2, beta=0.1),
}

mod1 = bmb.Model(formula=formula1,
                 family="t",
                 link="identity",
                 priors=priors1,
                 data=eprices)
mod1
       Formula: price ~ 0 + loc
                sigma ~ 0 + loc
        Family: t
          Link: mu = identity
                sigma = log
  Observations: 18
        Priors: 
    target = mu
        Common-level effects
            loc ~ Normal(mu: 8.0, sigma: 5.0)
        
        Auxiliary parameters
            nu ~ Gamma(alpha: 2.0, beta: 0.1)
    target = sigma
        Common-level effects
            sigma_loc ~ Normal(mu: 0.0, sigma: 1.0)
mod1.build()
mod1.graph()

# # FIGURES ONLY
# filename = os.path.join(DESTDIR, "example1_eprices_mod1_graph")
# mod1.graph(name=filename, fmt="png", dpi=300)
../_images/09dc0483a9e5ab777285fa18eaca10cda4000eb5437e65bb02c7a86971dade8e.svg

Model fitting and analysis#

idata1 = mod1.fit(random_seed=[42,43,44,45])
/opt/hostedtoolcache/Python/3.10.15/x64/lib/python3.10/site-packages/pymc/sampling/mcmc.py:736: UserWarning: A list or tuple of random_seed no longer specifies the specific random_seed of each chain. Use a single seed instead.
  warnings.warn(
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [nu, loc, sigma_loc]

Sampling 2 chains for 1_000 tune and 1_000 draw iterations (2_000 + 2_000 draws total) took 2 seconds.
We recommend running at least 4 chains for robust computation of convergence diagnostics

Processing the results#

Calculate derived quantities used for the analysis plots and summaries.

post1 = idata1["posterior"]

# Calculate the difference between between means
post1["mu_East"] = post1["loc"].sel(loc_dim="East")
post1["mu_West"] = post1["loc"].sel(loc_dim="West")
post1["dmeans"] = post1["mu_West"] - post1["mu_East"]

# Calculate sigmas from log-sigmas
#######################################################
logsig_E = post1["sigma_loc"].sel(sigma_loc_dim="East")
logsig_W = post1["sigma_loc"].sel(sigma_loc_dim="West")
post1["sigma_East"] = np.exp(logsig_E)
post1["sigma_West"] = np.exp(logsig_W)

# Calculate the difference between standard deviations
post1["dstd"] = post1["sigma_West"]-post1["sigma_East"]

# Effect size
#######################################################
pvar =(post1["sigma_West"]**2+post1["sigma_East"]**2)/2
post1["cohend"] = post1["dmeans"] / np.sqrt(pvar)
# #ALT: use helper function
# from ministats import calc_dmeans_stats
# calc_dmeans_stats(idata1, group_name="loc")
az.summary(idata1, kind="stats", hdi_prob=0.95,
           var_names=["mu_West", "mu_East", "dmeans", "sigma_West", "sigma_East", "dstd", "nu", "cohend"])
mean sd hdi_2.5% hdi_97.5%
mu_West 9.087 0.560 7.953 10.184
mu_East 6.155 0.320 5.463 6.759
dmeans 2.932 0.646 1.669 4.271
sigma_West 1.567 0.447 0.906 2.489
sigma_East 0.917 0.281 0.474 1.438
dstd 0.650 0.531 -0.303 1.800
nu 21.043 14.429 2.581 49.155
cohend 2.360 0.703 0.928 3.722
from ministats import plot_dmeans_stats
plot_dmeans_stats(mod1, idata1, group_name="loc");
---------------------------------------------------------------------------
ImportError                               Traceback (most recent call last)
Cell In[22], line 1
----> 1 from ministats import plot_dmeans_stats
      2 plot_dmeans_stats(mod1, idata1, group_name="loc");

ImportError: cannot import name 'plot_dmeans_stats' from 'ministats' (/opt/hostedtoolcache/Python/3.10.15/x64/lib/python3.10/site-packages/ministats/__init__.py)
# # FIGURES ONLY
# from ministats import plot_dmeans_stats
# plot_dmeans_stats(mod1, idata1, group_name="loc",
#                   ppc_xlims=[2,15], figsize=(6,6.5));
# filename = os.path.join(DESTDIR, "example1_dmeans_plots.pdf")
# savefigure(plt.gcf(), filename)

Compare to frequentist results#

from scipy.stats import ttest_ind
pricesW = eprices[eprices["loc"]=="West"]["price"]
pricesE = eprices[eprices["loc"]=="East"]["price"]

res1 = ttest_ind(pricesW, pricesE, equal_var=False)
res1.statistic, res1.pvalue, res1.df
(5.022875513276464, 0.00025703383372176116, 12.592817027231032)
from ministats import cohend2
cohend2(pricesW, pricesE)
2.3678062243290996

Conclusions#

Example 2: comparing IQ scores#

We’ll look at IQ scores data taken from a the paper Bayesian Estimation Supersedes the t-Test (BEST) by John K. Kruschke.

smart drug administered to treatment group and want to compare to control group. Data contains outliers)

cf. compare_iqs2_many_ways.ipynb

IQ scores dataset#

iqs2 = pd.read_csv("../datasets/exercises/iqs2.csv")
iqs2.groupby("group")["iq"].describe()
count mean std min 25% 50% 75% max
group
ctrl 42.0 100.357143 2.516496 88.0 100.0 100.5 101.0 105.0
treat 47.0 101.914894 6.021085 82.0 100.0 102.0 103.0 124.0
sns.histplot(data=iqs2, x="iq", hue="group");
../_images/500b7239bbe7764612634e75642aaf808c635ddc1c49dbf78dd721a364656258.png

Bayesian model#

TODO: add formulas

Bambi model#

formula2 = bmb.Formula("iq ~ 0 + group",
                       "sigma ~ 0 + group")

priors2 = {
    "group": bmb.Prior("Normal", mu=100, sigma=35),
    "sigma": {
        "group": bmb.Prior("Normal", mu=0, sigma=1)
    },
    "nu": bmb.Prior("Gamma", alpha=2, beta=0.1),
}


mod2 = bmb.Model(formula=formula2,
                 family="t",
                 link="identity",
                 priors=priors2,
                 data=iqs2)

mod2
       Formula: iq ~ 0 + group
                sigma ~ 0 + group
        Family: t
          Link: mu = identity
                sigma = log
  Observations: 89
        Priors: 
    target = mu
        Common-level effects
            group ~ Normal(mu: 100.0, sigma: 35.0)
        
        Auxiliary parameters
            nu ~ Gamma(alpha: 2.0, beta: 0.1)
    target = sigma
        Common-level effects
            sigma_group ~ Normal(mu: 0.0, sigma: 1.0)
mod2.build()
mod2.graph()

# # FIGURES ONLY
# filename = os.path.join(DESTDIR, "example2_iqs_mod2_graph")
# mod2.graph(name=filename, fmt="png", dpi=300)
../_images/b418e1ae95464cee60f7086ac8453b3d13315668ebac08b1cd32f3ac998dc1a8.svg

Model fitting and analysis#

idata2 = mod2.fit(random_seed=[42,43,44,45])
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [nu, group, sigma_group]


Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 1 seconds.
from ministats import calc_dmeans_stats
calc_dmeans_stats(idata2, group_name="group");
az.summary(idata2, kind="stats", hdi_prob=0.95,
           var_names=["mu_treat", "mu_ctrl", "dmeans", "sigma_treat", "sigma_ctrl", "dstd", "nu", "cohend"])
mean sd hdi_2.5% hdi_97.5%
mu_treat 101.545 0.376 100.760 102.245
mu_ctrl 100.525 0.206 100.118 100.916
dmeans 1.020 0.424 0.196 1.843
sigma_treat 1.931 0.400 1.184 2.711
sigma_ctrl 1.011 0.195 0.666 1.411
dstd 0.920 0.414 0.107 1.705
nu 1.794 0.449 1.059 2.662
cohend 0.679 0.301 0.068 1.243
from ministats import plot_dmeans_stats
plot_dmeans_stats(mod2, idata2, group_name="group", ppc_xlims=[90,110]);
../_images/6b37cc433355bce758eb757b32fdb16668f9d1702026924d3cc33cc89b19b08f.png
# # FIGURES ONLY
# plot_dmeans_stats(mod2, idata2, group_name="group",
#                   ppc_xlims=[90,110], figsize=(6,6.5));
# filename = os.path.join(DESTDIR, "example2_dmeans_plots.pdf")
# savefigure(plt.gcf(), filename)

Compare to frequentist results#

from scipy.stats import ttest_ind

treated = iqs2[iqs2["group"]=="treat"]["iq"].values
controls = iqs2[iqs2["group"]=="ctrl"]["iq"].values

res2 = ttest_ind(treated, controls, equal_var=False)
res2.statistic, res2.pvalue
(1.622190457290228, 0.10975381983712836)
from ministats import cohend2
cohend2(treated, controls)
0.3309654530139732
# MAYBE
# # Test if the variances of the two groups are the same
# from scipy.stats import levene
# levene(treated, controls)

Conclusions#

Explanations#

Alternative choices of priors#

e.g. prior for sigma as very wiiiiiide, and nu ~ Exp(1/29)+1 → BEST Bayesian estimation supersedes the t-test by John K. Kruschke

If we use a model with common standard deviation, we get equivalent of pooled sigma

Sensitivity analysis and robustness checks#

# TODO

Performance tests#

Simulated datasets#

Simulate data grid consisting of ∆ * outliers * n * m

Model comparisons#

Compare results several models:

  • N model

  • T model (robust)

  • BayesFactor based on JZS prior

  • Classical two-sample t-test (from Section 4.5)

Discussion#

Comparison to the frequentist two-sample t-test#

  • results numerically similar

  • note we’re using 𝒯 as data model, not as a sampling distribution

  • conceptually different:

    • p-value vs. decision based on posterior distribution

    • confidence intervals vs. credible intervals

Comparing multiple groups#

  • ?Extension to Bayesian ANOVA? Can extend approach to multiple groups: Bayesian ANOVA

FWD reference to hierarchical models for group comparison covered in Section 5.5

Exercises#

Exercise 1: small samples#

As = [5.77, 5.33, 4.59, 4.33, 3.66, 4.48]
Bs = [3.88, 3.55, 3.29, 2.59, 2.33, 3.59]
groups = ["A"]*len(As) + ["B"]*len(Bs)
df1 = pd.DataFrame({"group": groups, "vals": As + Bs})
# df1

Exercise 2: lecture and debate curriculums#

students = pd.read_csv("../datasets/students.csv")
# students.groupby("curriculum")["score"].describe()
formula_std = bmb.Formula("score ~ 0 + curriculum",
                          "sigma ~ 0 + curriculum")
priors_std = {
    "curriculum": bmb.Prior("Normal", mu=70, sigma=30),
    "sigma": {
        "curriculum":bmb.Prior("Normal", mu=1, sigma=2)
    },
    "nu": bmb.Prior("Gamma", alpha=2, beta=0.1),
}
mod_std = bmb.Model(formula=formula_std,
                 family="t",
                 link="identity",
                 priors=priors_std,
                 data=students)
mod_std
# mod_std.build()
# mod_std.graph()
       Formula: score ~ 0 + curriculum
                sigma ~ 0 + curriculum
        Family: t
          Link: mu = identity
                sigma = log
  Observations: 15
        Priors: 
    target = mu
        Common-level effects
            curriculum ~ Normal(mu: 70.0, sigma: 30.0)
        
        Auxiliary parameters
            nu ~ Gamma(alpha: 2.0, beta: 0.1)
    target = sigma
        Common-level effects
            sigma_curriculum ~ Normal(mu: 1.0, sigma: 2.0)
idata_std = mod_std.fit()
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [nu, curriculum, sigma_curriculum]


Sampling 4 chains for 1_000 tune and 1_000 draw iterations (4_000 + 4_000 draws total) took 1 seconds.
# Calculate the difference between between means
currs = idata_std["posterior"]["curriculum"]
mu_debate = currs.sel(curriculum_dim="debate")
mu_lecture = currs.sel(curriculum_dim="lecture")
idata_std["posterior"]["dmeans"] = mu_debate - mu_lecture
az.summary(idata_std, kind="stats", var_names=["dmeans"], hdi_prob=0.95)
mean sd hdi_2.5% hdi_97.5%
dmeans 7.423 5.195 -3.168 17.422
# from ministats import plot_dmeans_stats
# plot_dmeans_stats(mod_std, idata_std, ppc_xlims=[50,100], group_name="curriculum");
# from scipy.stats import ttest_ind
# scoresD = students[students["curriculum"]=="debate"]["score"]
# scoresL = students[students["curriculum"]=="lecture"]["score"]
# res_std = ttest_ind(scoresD, scoresL, equal_var=False)
# res_std.statistic, res_std.pvalue
# from ministats import cohend2
# cohend2(scoresL, scoresD)

Exercise 3: redo exercises from Section 3.5 section using Bayesian methods#

# TODO

BONUS Examples#

Example 4: small example form BEST vignette#

See http://cran.nexr.com/web/packages/BEST/vignettes/BEST.pdf#page=2

y1s = [5.77, 5.33, 4.59, 4.33, 3.66, 4.48]
y2s = [3.88, 3.55, 3.29, 2.59, 2.33, 3.59]

from ministats.bayes import bayes_dmeans
mod4, idata4 = bayes_dmeans(y1s, y2s, groups=["y1", "y2"])
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [nu, Intercept, group, sigma_group]


Sampling 4 chains for 1_000 tune and 2_000 draw iterations (4_000 + 8_000 draws total) took 2 seconds.
from ministats import calc_dmeans_stats
calc_dmeans_stats(idata4)
az.summary(idata4, kind="stats", hdi_prob=0.95,
           var_names=["dmeans", "sigma_y1", "sigma_y2", "dstd", "nu", "cohend"])
mean sd hdi_2.5% hdi_97.5%
dmeans -1.451 0.499 -2.443 -0.497
sigma_y1 0.848 0.325 0.369 1.492
sigma_y2 0.717 0.282 0.306 1.256
dstd -0.131 0.426 -0.982 0.736
nu 21.306 14.394 1.211 49.339
cohend -1.934 0.784 -3.475 -0.443
# from ministats import plot_dmeans_stats
# plot_dmeans_stats(mod4, idata4, ppc_xlims=None);

Example 5: comparing morning to evening#

treszkai/best

morning = [8.99, 9.21, 9.03, 9.15, 8.68, 8.82, 8.66, 8.82, 8.59, 8.14,
           9.09, 8.80, 8.18, 9.23, 8.55, 9.03, 9.36, 9.06, 9.57, 8.38]
evening = [9.82, 9.34, 9.73, 9.93, 9.33, 9.41, 9.48, 9.14, 8.62, 8.60,
           9.60, 9.41, 8.43, 9.77, 8.96, 9.81, 9.75, 9.50, 9.90, 9.13]
from ministats.bayes import bayes_dmeans
mod5, idata5 = bayes_dmeans(evening, morning, groups=["evening", "morning"])
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (4 chains in 4 jobs)
NUTS: [nu, Intercept, group, sigma_group]


Sampling 4 chains for 1_000 tune and 2_000 draw iterations (4_000 + 8_000 draws total) took 2 seconds.
from ministats import calc_dmeans_stats
calc_dmeans_stats(idata5)
az.summary(idata5, kind="stats", hdi_prob=0.95,
           var_names=["dmeans", "sigma_evening", "sigma_morning", "dstd", "nu", "cohend"])
mean sd hdi_2.5% hdi_97.5%
dmeans -0.528 0.141 -0.807 -0.248
sigma_evening 0.454 0.086 0.302 0.632
sigma_morning 0.387 0.071 0.262 0.529
dstd -0.068 0.111 -0.294 0.153
nu 22.920 14.387 2.520 50.827
cohend -1.265 0.374 -2.017 -0.539
# from ministats import plot_dmeans_stats
# plot_dmeans_stats(mod5, idata5, ppc_xlims=None);