psiphy.sbi

psiphy.sbi.rejection_abc.tqdm(iterable, **kwargs)[source]
class psiphy.sbi.rejection_abc.rABC(simulator, distance, observation, prior, bounds, N=100, eps=0.1)[source]
sample_prior(kk)[source]
run(N=None)[source]
class psiphy.sbi.sre.emceeSRE[source]
ratio_estimation(classifier, X, y, test_size=0.1, normalizer=None)[source]
set_obs(obs)[source]
set_simulator(simulator)[source]
learn_logL_with_classifier(classifier, mins, maxs, Nsamples=None, test_size=0.1)[source]
class psiphy.sbi.lfire.LFIRE_core(simulator, observation, prior, bounds, sim_out_den=None, n_m=100, n_theta=100, n_grid_out=100, thetas=None, verbose=True, penalty='l1', n_jobs=4, clfy=None)[source]
sample_prior(kk)[source]
theta_grid()[source]
sim_denominator()[source]
sim_numerator(theta, n_theta)[source]
ratio(theta, sim_out_num=None, get_score=False)[source]
run(thetas=None, n_grid_out=100, sim_out_num=None)[source]
class psiphy.sbi.lfire.LFIRE_TrainingSetAuto(simulator, observation, prior, bounds, n_init=10, n_step=1, n_max=100, n_grid_out=25, thetas=None, verbose=True, penalty='l1', n_jobs=4, clfy=None, lfire=None)[source]
run(thetas=None, n_grid_out=100, sim_out_num=None)[source]
class psiphy.sbi.lfire.LFIRE_BayesianOpt(simulator, observation, prior, bounds, sim_out_den=None, n_m=100, n_theta=100, n_grid_out=100, thetas=None, n_init=10, max_iter=1000, tol=1e-05, verbose=True, penalty='l1', n_jobs=4, clfy=None, lfire=None, simulate_corner=True, exploitation_exploration=None, sigma_tol=0.001, model_pdf=None, params=None)[source]
corner_to_theta()[source]
run(max_iter=None, tol=None)[source]
class psiphy.sbi.lfire.LFIRE_BayesianOpt_ShrinkSpace(simulator, observation, prior, bounds, sim_out_den=None, n_m=100, n_theta=100, n_grid_out=100, thetas=None, n_init=10, max_iter=1000, shrink_condition={'CI': 95, 'n': 5}, tol=1e-05, verbose=True, penalty='l1', n_jobs=4, clfy=None, lfire=None, simulate_corner=True, exploitation_exploration=1)[source]
corner_to_theta()[source]
run(max_iter=None, tol=None)[source]
class psiphy.sbi.lfire.LFIRE(**arg)[source]

docstring for LFIRE

psiphy.sbi.bolfi.dict_get_remove(a, key, default=None, remove=True)[source]
psiphy.sbi.bolfi.read_sampler_emcee(filename=None, sampler=None)[source]
psiphy.sbi.bolfi.get_chain_emcee(discard=0, filename=None, sampler=None)[source]
class psiphy.sbi.bolfi.BOLFI(distance, prior_range, obs=None, distance_kernel='exp', verbose=True, package='GPyOpt', learn_log_dist=False)[source]

Bayesian optimisation for Likelihood-free Inference.

save_likelihood_model(filename)[source]
load_likelihood_model(filename)[source]
cook_likelihood(gpmodel)[source]
learn_likelihood(obs=None, gpmodel=None, reset_model=False, n_calls=100, n_random_starts=None, n_initial_points=None, initial_point_generator='random', acq_func='EI', acq_optimizer='auto', random_state=None, verbose=False, callback=None, n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96, noise='gaussian', n_jobs=1, model_queue_size=None, batch_size=1, filename=None, acquisition_optimizer_type='lbfgs', **kwargs)[source]
sample_posterior(n_samples=5000, method='MCMC', **kwargs)[source]
sample_MCMC(n_samples, log_prior=None, nwalkers=16, filename=None, reset_sampler=True, n_jobs=4, **kwargs_emcee)[source]
sample_NestedSampling(log_prior=None, **kwargs)[source]
sample_IS(n_samples, log_prior=None, proposal='uniform')[source]
psiphy.sbi.bolfi.importance_sampling(func, n_samples, prior_range, proposal='uniform')[source]
psiphy.sbi.bolfi.sequential_importance_sampling(func, n_samples, prior_range, proposal='uniform', max_iter=10, kernel='EmpiricalCovariance')[source]
psiphy.sbi.bolfi.SMC_sampling(func, n_samples, prior_range, proposal='uniform', kernel='EmpiricalCovariance')[source]

UNDER CONSTRUCTION

psiphy.sbi.abc_gp.tqdm(iterable, **kwargs)[source]
class psiphy.sbi.abc_gp.ABC_gpL(simulator, distance, obs, theta_sampler=None, theta_range={}, n_train_init=100, mcmc_sampler=None, mcmc_sampler_info=None)[source]
prepare_distance_model(model=None, kernel=None, alpha=1e-10, optimizer='fmin_l_bfgs_b', n_restarts_optimizer=3)[source]
create_dataset(n_train=None)[source]
learn_distance(n_train=None)[source]
learn_distance_BO(tol=0.01)[source]
run_mcmc(n_samples=None)[source]