Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

condense init parameters #16

Merged
merged 39 commits into from
Jul 4, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
5fbc48d
condense init parameters
vloison May 30, 2024
895b88c
compact parameters initialization in one argument in FaDIn.__init__
vloison May 30, 2024
6448db4
condense optim_mask and add moment_matching in FaDIn init
vloison May 31, 2024
98b7171
clean code
vloison May 31, 2024
fbaf0b0
remove precomputations and criterion from FaDIn init, replace with cu…
vloison May 31, 2024
09a59d2
fix unmarked moment_matching
vloison May 31, 2024
d92cfbf
fix failing test
vloison May 31, 2024
649c2db
Update experiments/benchmark/run_benchmark_exp.py
vloison Jun 12, 2024
42cecfb
Update fadin/solver.py
vloison Jun 12, 2024
bce234f
Update fadin/solver.py
vloison Jun 12, 2024
cc1e10f
comment +1 in assert torch.isclose
vloison Jun 12, 2024
33bc14e
Move FaDInLoglikelihood and FaDInNoPrecomputations to experiments
vloison Jun 12, 2024
5c11fc7
clean imports
vloison Jun 12, 2024
2ce478a
change optim_iteration to compute_gradient for clarity
vloison Jun 13, 2024
c8695c2
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
4891015
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
541e9f7
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
c2fb1b5
condensate unit parameters and precomputations
GuillaumeStaermanML Jun 17, 2024
fb942ec
correct tests
GuillaumeStaermanML Jun 17, 2024
94e4032
correct tests
GuillaumeStaermanML Jun 17, 2024
562ebd8
remove tick from the simulation of experiments
GuillaumeStaermanML Jun 17, 2024
5ed638a
remove useless experiments and comparisons with tick
GuillaumeStaermanML Jun 17, 2024
68c5d89
useless files
GuillaumeStaermanML Jun 17, 2024
9942b54
simplify init parameter of FaDIn and move Hawkes initialization to a …
vloison Jun 26, 2024
1ebd4e9
Merge branch 'condense' of https://github.com/mind-inria/FaDIn
vloison Jun 26, 2024
5a453ff
replace solver.W with solver.kernel_length
vloison Jun 26, 2024
00db0a7
fix scipy.integrate.simps expired deprecation and linter
vloison Jun 26, 2024
71aa1c0
fix call to scipy.integrate.simpson
vloison Jun 26, 2024
8981c63
Improve init_hawkes_params docstring
vloison Jul 1, 2024
04e96b6
improve init_hawkes_params docstring
vloison Jul 1, 2024
a0628fd
improve docstring
vloison Jul 1, 2024
2e2cba8
improve init.py docstring
vloison Jul 1, 2024
1c3a899
improve docstring
vloison Jul 1, 2024
0390d0b
init.py PEP8
vloison Jul 1, 2024
4ed3150
add Parameters section to init.py docstring
vloison Jul 1, 2024
2f5b2a0
simplify Hawkes parameter initialization
vloison Jul 3, 2024
5de7dda
improve init assert
vloison Jul 4, 2024
a212ab1
remove unused function
vloison Jul 4, 2024
a7c924e
move plot to separate utils file
vloison Jul 4, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
208 changes: 208 additions & 0 deletions fadin/init.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,208 @@
import torch
import matplotlib.pyplot as plt


def init_hawkes_params(solver, init_mode, events, n_ground_events, end_time):
"""
Computes the initial Hawkes parameters for the FaDIn solver.
The function supports three modes of initialization:
- 'random': Random initialization of parameters.
- 'moment_matching': Moment matching initialization of parameters.
- given: parameters are given by user.
----------
solver : FaDIn
FaDIn solver.
init_mode: `str` or `dict`
Mode of initialization. Supported values are 'random', 'moment_matching', and
a dictionary with keys 'baseline', 'alpha' and 'kernel'.
events: list of array of size number of timestamps,
list size is self.n_dim.
n_ground_events : torch.tensor
Number of ground events for each dimension
end_time: float
End time of the events time series.

Returns:
--------
params_intens: list
List of parameters of the Hawkes process. [baseline, alpha, kernel_params]
baseline : `tensor`, shape `(solver.n_dim)`
Baseline parameter of the Hawkes process.
alpha : `tensor`, shape `(solver.n_dim, n_dim)`
Weight parameter of the Hawkes process.
kernel_params : `list` of `tensor`
list containing tensor array of kernels parameters.
The size of the list varies depending the number of
parameters. The shape of each tensor is `(solver.n_dim, solver.n_dim)`.
"""
# Compute initial Hawkes parameters
if init_mode == 'moment_matching':
baseline, alpha, kernel_params_init = momentmatching_nomark(
solver,
events,
n_ground_events,
end_time,
solver.mm_mode
)
elif init_mode == 'random':
baseline, alpha, kernel_params_init = random_params(solver)
else:
baseline = init_mode['baseline'].float()
alpha = init_mode['alpha'].float()
kernel_params_init = init_mode['kernel']

# Format initial parameters for optimization
baseline = (baseline * solver.baseline_mask).requires_grad_(True)
alpha = (alpha * solver.alpha_mask).requires_grad_(True)
params_intens = [baseline, alpha]
solver.n_kernel_params = len(kernel_params_init)
for i in range(solver.n_kernel_params):
kernel_param = kernel_params_init[i].float().clip(1e-4)
kernel_param.requires_grad_(True)
params_intens.append(kernel_param)

return params_intens


def momentmatching_kernel_nomark(solver, events, n_ground_events,
plot_delta=False, mode='max'):
"""Moment matching initialization of kernel parameters. Implemented for
'truncated_gaussian' and 'raised_cosine' kernels.
For the truncated gaussian kernel, the means $m_{i,j}$ and std
$\\sigma_{i,j}$ are:
$m_{i, j} =
\\frac{1}{N_{g_i}(T)}\\sum_{t_n^i \\in \\mathscr{F}_T^i}
\\delta t^{i, j}_n$
$\\sigma_{i, j} =
\\sqrt{\\dfrac{
\\sum_{t_n^i \\in \\mathscr{F}_T^i} (\\delta t^{i, j}_n - m_{i, j})^2
}{N_{g_i}(T) - 1}}.
For the raised cosine kernel, the parameters $u_{i,j}$ and $s_{i,j} are:
$u^{\\text{m}}_{i, j} =
\\max{(0, m^{\\text{m}}_{i, j} - \\sigma^{\\text{m}}_{i, j})}$
$s^{\\text{m}}_{i, j} = \\sigma_{i, j}^{\\text{m}}$

Parameters
----------
solver : `FaDIn` or `MarkedFaDIn` object
The solver object
events : list of torch.tensor
List of events for each dimension
n_ground_events : torch.tensor
Number of ground events for each dimension
plot_delta : bool, default=False
Whether to plot the delta_t distribution
mode : str, default='max'
Mode to compute the delta_t distribution. Supported values are 'max'
and 'mean'.

Returns
-------
list of torch.tensor
List of kernel parameters

"""
kernel_params_init = [torch.ones(solver.n_dim, solver.n_dim),
torch.ones(solver.n_dim, solver.n_dim)]
for i in range(solver.n_dim):
for j in range(solver.n_dim):
# Mean, std of time delta of [i, j] kernel
if mode == 'max':
delta_t = torch.zeros(int(n_ground_events[i].item()))
for n in range(int(n_ground_events[i])):
t_n_i = events[i][n]
t_n_j = torch.max(
torch.where(torch.tensor(events[j]) < t_n_i,
torch.tensor(events[j]),
0.)
)
delta_t[n] = t_n_i - t_n_j
avg = torch.mean(delta_t)
std = torch.std(delta_t)
if mode == 'mean':
delta_t = []
for n in range(int(n_ground_events[i])):
t_n_i = events[i][n]
for t_n_j in events[j]:
if t_n_j < t_n_i - solver.kernel_length:
continue
if t_n_j >= t_n_i:
break
delta_t.append(t_n_i - t_n_j)
avg = torch.mean(torch.tensor(delta_t))
std = torch.std(torch.tensor(delta_t))
# Plot delta_t distribution
if plot_delta:
fig_delta, ax_delta = plt.subplots()
ax_delta.hist(delta_t, bins=20)
ax_delta.set_xlim([0, solver.kernel_length])
ax_delta.set_xlabel('Time')
ax_delta.set_ylabel('Histogram')
fig_delta.suptitle('Moment Matching delta_t')
fig_delta.show()
# Parameters of [i, j] kernel
if solver.kernel == 'truncated_gaussian':
kernel_params_init[0][i, j] = avg
if solver.kernel == 'raised_cosine':
u = max(avg - std, 0)
kernel_params_init[0][i, j] = u
kernel_params_init[1][i, j] = std
return kernel_params_init


def momentmatching_nomark(solver, events, n_ground_events, end_time,
mode='max'):
"""Moment matching initialization of baseline, alpha and kernel parameters.
$\\mu_i^s = \frac{\\#\\mathscr{F}^i_T}{(D+1)T} \forall i \\in [1, D]$
$\\alpha_{i, j}^s = \\frac{1}{D+1} \\forall i,j \\in [1, D]$
Kernel parameters are initialized by `momentmatching_kernel`.
"""
assert solver.kernel in ['truncated_gaussian', 'raised_cosine'], (
f"Smart initialization not implemented for kernel {solver.kernel}"
)
# Baseline init
baseline = n_ground_events / (end_time * (solver.n_dim + 1))

# Alpha init
alpha = torch.ones(solver.n_dim, solver.n_dim) / (solver.n_dim + 1)

# Kernel parameters init
kernel_params_init = momentmatching_kernel_nomark(
solver, events, n_ground_events, mode=mode
)
return baseline, alpha, kernel_params_init


def random_params(solver):
"""Random initialization of baseline, alpha and kernel parameters.
Baseline and alpha are initialized with uniform distribution.
Kernel parameters are initialized with uniform distribution.
"""
# Baseline init
baseline = torch.rand(solver.n_dim)

# Alpha init
alpha = torch.rand(solver.n_dim, solver.n_dim)

# Kernel parameters init
kernel_params_init = []
if solver.kernel == 'raised_cosine':
temp = 0.5 * solver.kernel_length * torch.rand(solver.n_dim, solver.n_dim)
temp2 = 0.5 * solver.kernel_length * torch.rand(solver.n_dim, solver.n_dim)
kernel_params_init.append(temp)
kernel_params_init.append(temp2)
elif solver.kernel == 'truncated_gaussian':
temp = 0.25 * solver.kernel_length * torch.rand(solver.n_dim, solver.n_dim)
temp2 = 0.5 * solver.kernel_length * torch.rand(solver.n_dim, solver.n_dim)
kernel_params_init.append(temp)
kernel_params_init.append(temp2)
elif solver.kernel == 'truncated_exponential':
kernel_params_init.append(
2 * torch.rand(solver.n_dim, solver.n_dim)
)
else:
raise NotImplementedError(
'kernel initial parameters of not \
implemented kernel have to be given'
)
return baseline, alpha, kernel_params_init
Loading
Loading