Selection Function
Following Ref. GWTC-4 paper, selection function can be written as
as in
For ler, we will write the selection function as follows, with the assumption that data marginalization has been performed and the likelihood is marginalized over data,
We will consider the hyperparameters \(\Lambda \in \{\mathrm{lam\_0}, \mathrm{lam\_1}, \mathrm{mpp\_1}, \mathrm{sigpp\_1}, \mathrm{mpp\_2}, \mathrm{sigpp\_2}, \mathrm{mlow\_1}, \mathrm{delta\_m\_1}, \mathrm{break\_mass}, \mathrm{alpha\_1}, \mathrm{alpha\_2}\}\) to be the parameters of the mass distribution which is defined by Broken Power Law + Two Peaks model. We will use the same hyperparameters priors as in Ref. GWTC-4 paper.
Create priors
[14]:
# Hyperparameter priors for Broken Power Law + 2 Peaks (GWTC-4 Table 6)
import numpy as np
# Hyperparameter names used in broken_powerlaw_plus_2peaks_rvs
# Note: mlow_2 and delta_m_2 (Table 6) belong to the mass-ratio distribution and
# are handled separately; they are not included here.
hyper_keys = [
"lam_0", "lam_1", "mpp_1", "sigpp_1", "mpp_2", "sigpp_2",
"mlow_1", "delta_m_1", "break_mass", "alpha_1", "alpha_2",
]
def sample_mlow1_eq_B23(n_samples, rng=None, mmin=3.0, mmax=10.0):
"""Sample mlow_1 from the marginal prior in Eq. (B23).
The joint prior over (m1,low, m2,low) is uniform on the triangular region
{mmin <= m2,low <= m1,low <= mmax}, which gives the marginal:
pi(m1,low) = 2*(m1,low - mmin) / (mmax - mmin)^2
Inverse-CDF: F(m1,low) = ((m1,low - mmin) / (mmax - mmin))^2
=> m1,low = mmin + (mmax - mmin) * sqrt(u), u ~ U(0,1)
Parameters
----------
n_samples : int
rng : np.random.Generator or None
mmin : float
Lower bound (default: 3 Msun)
mmax : float
Upper bound (default: 10 Msun)
"""
rng = np.random.default_rng() if rng is None else rng
u = rng.uniform(0.0, 1.0, size=n_samples)
return mmin + (mmax - mmin) * np.sqrt(u)
def sample_m2low_given_m1low_eq_B23(m1low, rng=None, mmin=3.0):
"""Sample m2,low | m1,low from Eq. (B23).
pi(m2,low | m1,low) = 1 / (m1,low - mmin), i.e. uniform on [mmin, m1,low].
Note: this is used for the mass-ratio model (mlow_2 parameter) and is
provided here for completeness but is not called by
sample_hyperparameters_from_priors, which only covers the m1 model.
Parameters
----------
m1low : float or array-like
Previously sampled m1,low values.
rng : np.random.Generator or None
mmin : float
Lower bound (default: 3 Msun)
"""
rng = np.random.default_rng() if rng is None else rng
m1low = np.asarray(m1low, dtype=float)
return rng.uniform(mmin, m1low)
def sample_hyperparameters_from_priors(n_samples, rng=None):
"""Sample hyperparameters for the Broken Power Law + 2 Peaks model.
Priors follow GWTC-4 Table 6:
alpha_1, alpha_2 : U(-4, 12)
break_mass : U(20, 50) [Msun]
mpp_1 : U(5, 20) [Msun]
sigpp_1 : U(0, 10) [Msun]
mpp_2 : U(25, 60) [Msun]
sigpp_2 : U(0, 10) [Msun]
delta_m_1 : U(0, 10) [Msun]
lam_0, lam_1 : Dirichlet(alpha=(1,1,1)) [third weight = 1-lam_0-lam_1]
mlow_1 : Eq. (B23) triangular marginal, mmin=3, mmax=10 [Msun]
Parameters
----------
n_samples : int
rng : np.random.Generator or None
Returns
-------
dict mapping each key in hyper_keys to a numpy array of length n_samples.
"""
rng = np.random.default_rng() if rng is None else rng
# Dirichlet prior: (lam_0, lam_1, lam_2) ~ Dir(1,1,1)
# The third weight lam_2 = 1 - lam_0 - lam_1 is implicit in the model.
lam = rng.dirichlet(alpha=[1.0, 1.0, 1.0], size=n_samples)
draws = {
"lam_0": lam[:, 0],
"lam_1": lam[:, 1],
"mpp_1": rng.uniform(5.0, 20.0, size=n_samples),
"sigpp_1": rng.uniform(0.0, 10.0, size=n_samples),
"mpp_2": rng.uniform(25.0, 60.0, size=n_samples),
"sigpp_2": rng.uniform(0.0, 10.0, size=n_samples),
"delta_m_1": rng.uniform(0.0, 10.0, size=n_samples),
"break_mass": rng.uniform(20.0, 50.0, size=n_samples),
"alpha_1": rng.uniform(-4.0, 12.0, size=n_samples),
"alpha_2": rng.uniform(-4.0, 12.0, size=n_samples),
"mlow_1": sample_mlow1_eq_B23(n_samples, rng=rng, mmin=3.0, mmax=10.0),
}
return draws
# Smoke-test: sample 2 draws and print
rng = np.random.default_rng(1234)
draws = sample_hyperparameters_from_priors(n_samples=2, rng=rng)
print("Sampled hyperparameters (2 draws):")
for key in hyper_keys:
print(f" {key}: {draws[key]}")
Sampled hyperparameters (2 draws):
lam_0: [0.37610834 0.36673 ]
lam_1: [0.17640007 0.44314362]
mpp_1: [8.6264944 9.77800893]
sigpp_1: [9.64079245 2.63649804]
mpp_2: [40.43521427 46.34547833]
sigpp_2: [8.63621297 8.63757671]
mlow_1: [8.73503818 8.47173474]
delta_m_1: [6.74881313 6.59874348]
break_mass: [42.07273095 26.68260974]
alpha_1: [-1.24694105 9.92663956]
alpha_2: [-3.03778148 6.93902255]
Find selection function for sampled prior points
[15]:
from ler.gw_source_population import broken_powerlaw_plus_2peaks_rvs
from ler import GWRATES
import numpy as np
import contextlib
from tqdm import tqdm
def make_mass_1_sampler(hyp):
"""Return a mass_1_source sampler with fixed hyperparameters."""
def sampler(size):
return broken_powerlaw_plus_2peaks_rvs(
size=size,
lam_0=hyp["lam_0"],
lam_1=hyp["lam_1"],
mpp_1=hyp["mpp_1"],
sigpp_1=hyp["sigpp_1"],
mpp_2=hyp["mpp_2"],
sigpp_2=hyp["sigpp_2"],
mlow_1=hyp["mlow_1"],
delta_m_1=hyp["delta_m_1"],
break_mass=hyp["break_mass"],
alpha_1=hyp["alpha_1"],
alpha_2=hyp["alpha_2"],
mmax=300.0,
)
return sampler
# Create a GWRATES instance
# with default settings
ler = GWRATES(
npool=6,
event_type="BBH",
verbose=False,
)
Initializing GWRATES class...
[16]:
sample_size = 100000
loop_size = 5
rng = np.random.default_rng(1234)
data = sample_hyperparameters_from_priors(n_samples=loop_size, rng=rng)
selection_function = []
for j in tqdm(range(loop_size), total=loop_size, ncols=100):
with contextlib.redirect_stdout(None):
hyp = {k: float(data[k][j]) for k in hyper_keys}
mass_1_source_sampler = make_mass_1_sampler(hyp)
# Swap in the new mass_1_source sampler for this hyperparameter draw
ler.mass_1_source = mass_1_source_sampler
gw_param_i = ler.gw_cbc_statistics(
size=sample_size,
batch_size=100000,
resume=False,
output_jsonfile="gw_param_tmp.json",
)
# xi(Lambda) = <p(det|theta)>_{theta ~ p(theta|Lambda)}
pdet_net = gw_param_i["pdet_net"]
selection_function.append(np.mean(pdet_net))
selection_function = np.array(selection_function)
100%|█████████████████████████████████████████████████████████████████| 5/5 [00:08<00:00, 1.65s/it]
[ ]:
print("Sampled hyperparameters (Lambda):")
for key in hyper_keys:
vals = ", ".join(f"{v:.4f}" for v in data[key])
print(f" {key}: [{vals}]")
print("\nSelection function values xi(Lambda):")
for j, xi in enumerate(selection_function):
print(f" draw {j}: {xi:.6f}")
Sampled hyperparameters (Lambda):
lam_0: [0.3761, 0.3667, 0.0892, 0.1183, 0.7390]
lam_1: [0.1764, 0.4431, 0.1745, 0.2148, 0.0894]
mpp_1: [14.8981, 16.0364, 8.3413, 7.5810, 18.0562]
sigpp_1: [0.6014, 6.8369, 6.7124, 6.1102, 0.6014]
mpp_2: [59.2219, 40.3633, 43.6408, 25.1096, 33.7943]
sigpp_2: [8.5849, 4.2530, 7.3582, 9.2204, 1.5347]
mlow_1: [9.0664, 7.8094, 7.7336, 9.0474, 9.4741]
delta_m_1: [9.9226, 1.8233, 9.4011, 0.8688, 4.6821]
break_mass: [44.8697, 28.4316, 46.0727, 49.2925, 45.2514]
alpha_1: [3.1820, 1.9281, 3.7227, -2.5251, -0.3731]
alpha_2: [4.5851, 7.7317, 3.1826, 0.8880, 11.1578]
Selection function values xi(Lambda):
draw 0: 0.037610
draw 1: 0.009960
draw 2: 0.029080
draw 3: 0.029940
draw 4: 0.018440
draw |
lam_0 |
lam_1 |
mpp_1 |
sigpp_1 |
mpp_2 |
sigpp_2 |
mlow_1 |
delta_m_1 |
break_mass |
alpha_1 |
alpha_2 |
xi(Lambda) |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 |
0.376100 |
0.176400 |
14.898100 |
0.601400 |
59.221900 |
8.584900 |
9.066400 |
9.922600 |
44.869700 |
3.182000 |
4.585100 |
0.037610 |
1 |
0.366700 |
0.443100 |
16.036400 |
6.836900 |
40.363300 |
4.253000 |
7.809400 |
1.823300 |
28.431600 |
1.928100 |
7.731700 |
0.009960 |
2 |
0.089200 |
0.174500 |
8.341300 |
6.712400 |
43.640800 |
7.358200 |
7.733600 |
9.401100 |
46.072700 |
3.722700 |
3.182600 |
0.029080 |
3 |
0.118300 |
0.214800 |
7.581000 |
6.110200 |
25.109600 |
9.220400 |
9.047400 |
0.868800 |
49.292500 |
-2.525100 |
0.888000 |
0.029940 |
4 |
0.739000 |
0.089400 |
18.056200 |
0.601400 |
33.794300 |
1.534700 |
9.474100 |
4.682100 |
45.251400 |
-0.373100 |
11.157800 |
0.018440 |