Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

condense init parameters #16

Merged
merged 39 commits into from
Jul 4, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
5fbc48d
condense init parameters
vloison May 30, 2024
895b88c
compact parameters initialization in one argument in FaDIn.__init__
vloison May 30, 2024
6448db4
condense optim_mask and add moment_matching in FaDIn init
vloison May 31, 2024
98b7171
clean code
vloison May 31, 2024
fbaf0b0
remove precomputations and criterion from FaDIn init, replace with cu…
vloison May 31, 2024
09a59d2
fix unmarked moment_matching
vloison May 31, 2024
d92cfbf
fix failing test
vloison May 31, 2024
649c2db
Update experiments/benchmark/run_benchmark_exp.py
vloison Jun 12, 2024
42cecfb
Update fadin/solver.py
vloison Jun 12, 2024
bce234f
Update fadin/solver.py
vloison Jun 12, 2024
cc1e10f
comment +1 in assert torch.isclose
vloison Jun 12, 2024
33bc14e
Move FaDInLoglikelihood and FaDInNoPrecomputations to experiments
vloison Jun 12, 2024
5c11fc7
clean imports
vloison Jun 12, 2024
2ce478a
change optim_iteration to compute_gradient for clarity
vloison Jun 13, 2024
c8695c2
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
4891015
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
541e9f7
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
c2fb1b5
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
fb942ec
correct tests
GuillaumeStaermanML Jun 17, 2024
94e4032
correct tests
GuillaumeStaermanML Jun 17, 2024
562ebd8
remove tick from the simulation of experiments
GuillaumeStaermanML Jun 17, 2024
5ed638a
remove useless experiments and comparisons with tick
GuillaumeStaermanML Jun 17, 2024
68c5d89
useless files
GuillaumeStaermanML Jun 17, 2024
9942b54
simplify init parameter of FaDIn and move Hawkes initialization to a …
vloison Jun 26, 2024
1ebd4e9
Merge branch 'condense' of https://github.com/mind-inria/FaDIn
vloison Jun 26, 2024
5a453ff
replace solver.W with solver.kernel_length
vloison Jun 26, 2024
00db0a7
fix scipy.integrate.simps expired deprecation and linter
vloison Jun 26, 2024
71aa1c0
fix call to scipy.integrate.simpson
vloison Jun 26, 2024
8981c63
Improve init_hawkes_params docstring
vloison Jul 1, 2024
04e96b6
improve init_hawkes_params docstring
vloison Jul 1, 2024
a0628fd
improve docstring
vloison Jul 1, 2024
2e2cba8
improve init.py docstring
vloison Jul 1, 2024
1c3a899
improve docstring
vloison Jul 1, 2024
0390d0b
init.py PEP8
vloison Jul 1, 2024
4ed3150
add Parameters section to init.py docstring
vloison Jul 1, 2024
2f5b2a0
simplify Hawkes parameter initialization
vloison Jul 3, 2024
5de7dda
improve init assert
vloison Jul 4, 2024
a212ab1
remove unused function
vloison Jul 4, 2024
a7c924e
move plot to separate utils file
vloison Jul 4, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 10 additions & 8 deletions examples/plot_multivariate_fadin.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@
discretization = torch.linspace(0, kernel_length, L)

###############################################################################
# Here, we set the parameters of a Hawkes process with a Exponential(1) distributions.
# Here, we set the parameters of a Hawkes process with a Exponential(1)
# distribution.

baseline = np.array([.1, .5])
alpha = np.array([[0.6, 0.3], [0.25, 0.7]])
Expand All @@ -50,13 +51,14 @@
###############################################################################
# Here, we apply FaDIn.

solver = FaDIn(n_dim=n_dim,
kernel="truncated_exponential",
kernel_length=kernel_length,
delta=dt, optim="RMSprop",
params_optim={'lr': 1e-3},
max_iter=10000, criterion='l2'
)
solver = FaDIn(
n_dim=n_dim,
kernel="truncated_exponential",
kernel_length=kernel_length,
delta=dt, optim="RMSprop",
params_optim={'lr': 1e-3},
max_iter=10000
)
solver.fit(events, T)

# We average on the 10 last values of the optimization.
Expand Down
2 changes: 1 addition & 1 deletion examples/plot_univariate_fadin.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
kernel_length=kernel_length,
delta=dt, optim="RMSprop",
params_optim={'lr': 1e-3},
max_iter=2000, criterion='l2'
max_iter=2000
)
solver.fit(events, T)

Expand Down
26 changes: 18 additions & 8 deletions experiments/benchmark/run_benchmark_exp.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,13 @@ def simulate_data(baseline, alpha, decay, T, dt, seed=0):
L = int(1 / dt)
discretization = torch.linspace(0, 1, L)
n_dim = decay.shape[0]
EXP = DiscreteKernelFiniteSupport(dt, n_dim=n_dim, kernel='truncated_exponential',
lower=0, upper=1)
EXP = DiscreteKernelFiniteSupport(
dt,
n_dim=n_dim,
kernel='truncated_exponential',
lower=0,
upper=1
)

kernel_values = EXP.kernel_eval([torch.Tensor(decay)],
discretization)
Expand Down Expand Up @@ -62,14 +67,16 @@ def simulate_data(baseline, alpha, decay, T, dt, seed=0):
def run_fadin(events, decay_init, baseline_init, alpha_init, T, dt, seed=0):
start = time.time()
max_iter = 2000
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(decay_init)]
}
solver = FaDIn(2,
"truncated_exponential",
[torch.tensor(decay_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
init=init,
delta=dt, optim="RMSprop",
step_size=1e-3, max_iter=max_iter,
optimize_kernel=True, precomputations=True,
ztzG_approx=True, device='cpu', log=False
)

Expand Down Expand Up @@ -258,14 +265,17 @@ def run_experiment(baseline, alpha, decay, T, dt, seed=0):
alpha_hat = results['param_alpha']
decay_hat = results['param_kernel'][0]

RC = DiscreteKernelFiniteSupport(dt, n_dim=2, kernel='truncated_exponential',
RC = DiscreteKernelFiniteSupport(dt, n_dim=2,
kernel='truncated_exponential',
lower=0, upper=1)
intens_fadin = RC.intensity_eval(torch.tensor(baseline_hat),
torch.tensor(alpha_hat),
[torch.Tensor(decay_hat)],
events_grid, torch.linspace(0, 1, L))

res['err_fadin'] = np.absolute(intens.numpy() - intens_fadin.numpy()).mean()
res['err_fadin'] = np.absolute(
intens.numpy() - intens_fadin.numpy()
).mean()
res['time_fadin'] = results['time']

results = run_gibbs(S, size_grid, dt, seed=seed)
Expand Down
11 changes: 6 additions & 5 deletions experiments/benchmark/run_benchmark_rc.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,16 @@ def simulate_data(baseline, alpha, mu, sigma, T, dt, seed=0):
def run_fadin(events, u_init, sigma_init, baseline_init, alpha_init, T, dt, seed=0):
start = time.time()
max_iter = 2000
init = {
'kernel': [torch.tensor(u_init), torch.tensor(sigma_init)],
'baseline': torch.tensor(baseline_init),
'alpha': torch.tensor(alpha_init)
}
solver = FaDIn(2,
"raised_cosine",
[torch.tensor(u_init),
torch.tensor(sigma_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
init=init,
delta=dt, optim="RMSprop",
step_size=1e-3, max_iter=max_iter,
optimize_kernel=True, precomputations=True,
ztzG_approx=True, device='cpu', log=False
)

Expand Down
11 changes: 6 additions & 5 deletions experiments/benchmark/run_benchmark_tg.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,15 +62,16 @@ def simulate_data(baseline, alpha, m, sigma, T, dt, seed=0):
def run_fadin(events, m_init, sigma_init, baseline_init, alpha_init, T, dt, seed=0):
start = time.time()
max_iter = 2000
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(m_init), torch.tensor(sigma_init)]
}
solver = FaDIn(2,
"truncated_gaussian",
[torch.tensor(m_init),
torch.tensor(sigma_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
init=init,
delta=dt, optim="RMSprop",
step_size=1e-3, max_iter=max_iter,
optimize_kernel=True, precomputations=True,
ztzG_approx=True, device='cpu', log=False
)

Expand Down
51 changes: 34 additions & 17 deletions experiments/benchmark/run_comparison_ll.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from tick.hawkes import SimuHawkes, HawkesKernelTimeFunc

from fadin.kernels import DiscreteKernelFiniteSupport
from fadin.solver import FaDIn
from fadin.solver import FaDIn, FaDIn_loglikelihood

# %% simulate data
# Simulated data
Expand Down Expand Up @@ -56,23 +56,40 @@ def simulate_data(baseline, alpha, kernel_params,


def run_solver(criterion, events, kernel_params_init,
baseline_init, alpha_init, T, dt, seed=0, kernel='raised_cosine'):
k_params_init = [torch.tensor(a) for a in kernel_params_init]
baseline_init, alpha_init, T, dt, seed=0,
kernel='raised_cosine'):
max_iter = 2000
solver = FaDIn(1,
kernel,
k_params_init,
torch.tensor(baseline_init),
torch.tensor(alpha_init),
delta=dt,
optim="RMSprop",
step_size=1e-3,
max_iter=max_iter,
log=False,
random_state=seed,
device="cpu",
optimize_kernel=True,
criterion=criterion)
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(a) for a in kernel_params_init]
}
if criterion == 'l2':
solver = FaDIn(
1,
kernel,
init=init,
delta=dt,
optim="RMSprop",
step_size=1e-3,
max_iter=max_iter,
log=False,
random_state=seed,
device="cpu"
)
elif criterion == 'll':
solver = FaDIn_loglikelihood(
1,
kernel,
init=init,
delta=dt,
optim="RMSprop",
step_size=1e-3,
max_iter=max_iter,
log=False,
random_state=seed,
device="cpu"
)
results = solver.fit(events, T)
if kernel == 'truncated_exponential':
results_ = dict(param_baseline=results['param_baseline'][-10:].mean().item(),
Expand Down
11 changes: 7 additions & 4 deletions experiments/benchmark/run_nonparam_exp.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,19 +61,22 @@ def simulate_data(baseline, alpha, decay, T, dt, seed=0):
# @mem.cache
def run_solver(events, decay_init, baseline_init, alpha_init, T, dt, seed=0):
max_iter = 800
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(decay_init)]
}
solver = FaDIn(1,
"truncated_exponential",
[torch.tensor(decay_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
init=init,
delta=dt,
optim="RMSprop",
step_size=1e-3,
max_iter=max_iter,
log=False,
random_state=seed,
device="cpu",
optimize_kernel=True)
)
results = solver.fit(events, T)
return results

Expand Down
12 changes: 7 additions & 5 deletions experiments/benchmark/run_nonparam_rc.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,20 +64,22 @@ def simulate_data(baseline, alpha, mu, sigma, T, dt, seed=0):
# @mem.cache
def run_solver(events, u_init, sigma_init, baseline_init, alpha_init, T, dt, seed=0):
max_iter = 800
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(u_init), torch.tensor(sigma_init)]
}
solver = FaDIn(1,
"raised_cosine",
[torch.tensor(u_init),
torch.tensor(sigma_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
init=init,
delta=dt,
optim="RMSprop",
step_size=1e-3,
max_iter=max_iter,
log=False,
random_state=seed,
device="cpu",
optimize_kernel=True)
)
results = solver.fit(events, T)
return results

Expand Down
12 changes: 7 additions & 5 deletions experiments/benchmark/run_nonparam_tg.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,20 +63,22 @@ def simulate_data(baseline, alpha, m, sigma, T, dt, seed=0):
# @mem.cache
def run_solver(events, m_init, sigma_init, baseline_init, alpha_init, T, dt, seed=0):
max_iter = 800
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(m_init), torch.tensor(sigma_init)]
}
solver = FaDIn(1,
"truncated_gaussian",
[torch.tensor(m_init),
torch.tensor(sigma_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
init=init,
delta=dt,
optim="RMSprop",
step_size=1e-3,
max_iter=max_iter,
log=False,
random_state=seed,
device="cpu",
optimize_kernel=True)
)
results = solver.fit(events, T)
return results

Expand Down
12 changes: 6 additions & 6 deletions experiments/example_multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,12 @@ def run_solver(events, dt, T,
ztzG_approx, seed=0):
start = time.time()
max_iter = 2000
solver = FaDIn(2,
"raised_cosine",
delta=dt, optim="RMSprop", max_iter=max_iter,
optimize_kernel=True, precomputations=True,
ztzG_approx=ztzG_approx, device='cpu', log=False, tol=10e-6
)
solver = FaDIn(
2,
"raised_cosine",
delta=dt, optim="RMSprop", max_iter=max_iter,
ztzG_approx=ztzG_approx, device='cpu', log=False, tol=10e-6
)

print(time.time() - start)
solver.fit(events, T)
Expand Down
12 changes: 7 additions & 5 deletions experiments/example_univariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,17 @@ def run_solver(events, u_init, sigma_init, baseline_init,
alpha_init, kernel_length, dt, T, seed=0):
start = time.time()
max_iter = 10000
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(u_init), torch.tensor(sigma_init)]
}
solver = FaDIn(1,
"kumaraswamy",
[torch.tensor(u_init),
torch.tensor(sigma_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
init=init,
kernel_length=kernel_length,
delta=dt, optim="RMSprop",
max_iter=max_iter, criterion='l2'
max_iter=max_iter, criterion='l2',
)

print(time.time() - start)
Expand Down
48 changes: 27 additions & 21 deletions experiments/inference_error/run_comp_autodiff.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from tick.hawkes import SimuHawkes, HawkesKernelTimeFunc

from fadin.kernels import DiscreteKernelFiniteSupport
from fadin.solver import FaDIn
from fadin.solver import FaDIn, FaDIn_no_precomputations

################################
# Meta parameters
Expand Down Expand Up @@ -58,30 +58,36 @@ def simulate_data(baseline, alpha, mu, sigma, T, dt, seed=0):

@mem.cache
def run_solver(events, u_init, sigma_init, baseline_init, alpha_init, T, dt, seed=0):

max_iter = 800
solver_autodiff = FaDIn(1,
"raised_cosine",
[torch.tensor(u_init),
torch.tensor(sigma_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
delta=dt, optim="RMSprop",
step_size=1e-3, max_iter=max_iter,
precomputations=False, random_state=0)
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(u_init), torch.tensor(sigma_init)]
}
solver_autodiff = FaDIn_no_precomputations(
1,
"raised_cosine",
init=init,
delta=dt, optim="RMSprop",
step_size=1e-3, max_iter=max_iter,
random_state=0
)
start_autodiff = time.time()
solver_autodiff.fit(events, T)
time_autodiff = time.time() - start_autodiff

solver_FaDIn = FaDIn(1,
"raised_cosine",
[torch.tensor(u_init),
torch.tensor(sigma_init)],
torch.tensor(baseline_init),
torch.tensor(alpha_init),
delta=dt, optim="RMSprop",
step_size=1e-3, max_iter=max_iter,
precomputations=True, random_state=0)
init = {
'alpha': torch.tensor(alpha_init),
'baseline': torch.tensor(baseline_init),
'kernel': [torch.tensor(u_init), torch.tensor(sigma_init)]
}
solver_FaDIn = FaDIn(
1,
"raised_cosine",
init=init,
delta=dt, optim="RMSprop",
step_size=1e-3, max_iter=max_iter,
random_state=0
)
start_FaDIn = time.time()
solver_FaDIn.fit(events, T)
time_FaDIn = time.time() - start_FaDIn
Expand Down
Loading
Loading