From 7afc3ad4373ac27b844c6aab4e4e8f4352d1d687 Mon Sep 17 00:00:00 2001 From: Colladito Date: Fri, 10 Oct 2025 12:10:23 +0200 Subject: [PATCH 01/12] [VCF] Add function to compute block maxima keeping the independence hypothesis between observations --- bluemath_tk/distributions/pot.py | 82 ++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/bluemath_tk/distributions/pot.py b/bluemath_tk/distributions/pot.py index e69de29..31039f3 100644 --- a/bluemath_tk/distributions/pot.py +++ b/bluemath_tk/distributions/pot.py @@ -0,0 +1,82 @@ +import numpy as np + +def block_maxima( + x: np.ndarray, + block_size: int | float = 365.25, + min_sep=2, + peak_kwargs=None, +): + """ + Function to obtain the Block Maxima of given size taking into account + minimum independence hypothesis + + Parameters + ---------- + x : np.ndarray + Data used to compute the Block Maxima + block_size : int, default=5 + Size of BM in index units (if daily data, nÂș of days), by default 5 + min_sep : int, optional + Minimum separation between maximas in index units, by default 2 + + Returns + ------- + idx : np.ndarray + Indices of BM + bmaxs : np.ndarray + BM values + + Raises + ------ + ValueError + Minimum separation must be smaller than (block_size+1)/2 + """ + block_size = int(np.ceil(block_size)) + + if min_sep > (block_size+1) / 2: + raise ValueError("min_sep must be smaller than (block_size+1)/2") + + x = np.asarray(x) + n = x.size + + # Partition into non-overlapping blocks + n_blocks = int(np.ceil(n / block_size)) + segments_idx = [] + segments = [] + blocks = [] + # For each block, keep a *ranked* list of (idx, value) candidates (desc by value) + for b in range(n_blocks): + start = b * block_size + stop = min((b + 1) * block_size, n) + segment = x[start:stop] + + candidates_idx = np.argsort(segment)[::-1] + + segments_idx.append(np.arange(start, stop)) + segments.append(segment) + blocks.append(candidates_idx) + + def violates(i_left, i_right): + if i_left is None or i_right is None: + return False + return i_right - i_left < min_sep + + changed = True + while changed: + changed = False + + for b in range(n_blocks-1): + idx_left = segments_idx[b][blocks[b][0]] + idx_right = segments_idx[b+1][blocks[b+1][0]] + if not violates(idx_left, idx_right): + continue + else: + idx_block_adjust = 0 if segments[b][blocks[b][0]] < segments[b+1][blocks[b+1][0]] else 1 + blocks[b + idx_block_adjust] = np.delete(blocks[b + idx_block_adjust], 0) + changed=True + break + + bmaxs = np.asarray([segments[b][blocks[b][0]] for b in range(n_blocks)]) + idx = np.asarray([segments_idx[b][blocks[b][0]] for b in range(n_blocks)]) + + return idx, bmaxs \ No newline at end of file From 5c88336ae0b29714ed32c72f04e35631129a4abd Mon Sep 17 00:00:00 2001 From: Colladito Date: Fri, 10 Oct 2025 12:10:52 +0200 Subject: [PATCH 02/12] [VCF] Ruff pot.py --- bluemath_tk/distributions/pot.py | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/bluemath_tk/distributions/pot.py b/bluemath_tk/distributions/pot.py index 31039f3..780259f 100644 --- a/bluemath_tk/distributions/pot.py +++ b/bluemath_tk/distributions/pot.py @@ -1,13 +1,13 @@ import numpy as np + def block_maxima( x: np.ndarray, block_size: int | float = 365.25, min_sep=2, - peak_kwargs=None, ): """ - Function to obtain the Block Maxima of given size taking into account + Function to obtain the Block Maxima of given size taking into account minimum independence hypothesis Parameters @@ -22,7 +22,7 @@ def block_maxima( Returns ------- idx : np.ndarray - Indices of BM + Indices of BM bmaxs : np.ndarray BM values @@ -33,7 +33,7 @@ def block_maxima( """ block_size = int(np.ceil(block_size)) - if min_sep > (block_size+1) / 2: + if min_sep > (block_size + 1) / 2: raise ValueError("min_sep must be smaller than (block_size+1)/2") x = np.asarray(x) @@ -57,26 +57,32 @@ def block_maxima( blocks.append(candidates_idx) def violates(i_left, i_right): - if i_left is None or i_right is None: - return False - return i_right - i_left < min_sep + if i_left is None or i_right is None: + return False + return i_right - i_left < min_sep changed = True while changed: changed = False - for b in range(n_blocks-1): + for b in range(n_blocks - 1): idx_left = segments_idx[b][blocks[b][0]] - idx_right = segments_idx[b+1][blocks[b+1][0]] + idx_right = segments_idx[b + 1][blocks[b + 1][0]] if not violates(idx_left, idx_right): continue else: - idx_block_adjust = 0 if segments[b][blocks[b][0]] < segments[b+1][blocks[b+1][0]] else 1 - blocks[b + idx_block_adjust] = np.delete(blocks[b + idx_block_adjust], 0) - changed=True + idx_block_adjust = ( + 0 + if segments[b][blocks[b][0]] < segments[b + 1][blocks[b + 1][0]] + else 1 + ) + blocks[b + idx_block_adjust] = np.delete( + blocks[b + idx_block_adjust], 0 + ) + changed = True break bmaxs = np.asarray([segments[b][blocks[b][0]] for b in range(n_blocks)]) idx = np.asarray([segments_idx[b][blocks[b][0]] for b in range(n_blocks)]) - return idx, bmaxs \ No newline at end of file + return idx, bmaxs From 474856a9a7d0176ae0823e6729007bed6da2f34c Mon Sep 17 00:00:00 2001 From: Colladito Date: Fri, 10 Oct 2025 12:14:23 +0200 Subject: [PATCH 03/12] [VCF] Add example in block_maxima --- bluemath_tk/distributions/pot.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/bluemath_tk/distributions/pot.py b/bluemath_tk/distributions/pot.py index 780259f..7ad6a37 100644 --- a/bluemath_tk/distributions/pot.py +++ b/bluemath_tk/distributions/pot.py @@ -4,7 +4,7 @@ def block_maxima( x: np.ndarray, block_size: int | float = 365.25, - min_sep=2, + min_sep: int = 2, ): """ Function to obtain the Block Maxima of given size taking into account @@ -30,6 +30,18 @@ def block_maxima( ------ ValueError Minimum separation must be smaller than (block_size+1)/2 + + Example + ------- + >>> # 1-year of daily values + >>> x = np.random.lognormal(1, 1.2, size=365) + + >>> # 5-day Block Maxima with 72h of independency + >>> idx, bmaxs = block_maxima( + >>> x, + >>> block_size=5, + >>> min_sep=3 + >>> ) """ block_size = int(np.ceil(block_size)) From da26136ccc08a26279d43ba2e34a64033517f1a3 Mon Sep 17 00:00:00 2001 From: Colladito Date: Fri, 10 Oct 2025 12:41:45 +0200 Subject: [PATCH 04/12] [VCF] Add return hint in block_maxima --- bluemath_tk/distributions/pot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bluemath_tk/distributions/pot.py b/bluemath_tk/distributions/pot.py index 7ad6a37..89de828 100644 --- a/bluemath_tk/distributions/pot.py +++ b/bluemath_tk/distributions/pot.py @@ -5,7 +5,7 @@ def block_maxima( x: np.ndarray, block_size: int | float = 365.25, min_sep: int = 2, -): +) -> tuple[np.ndarray, np.ndarray]: """ Function to obtain the Block Maxima of given size taking into account minimum independence hypothesis From 167b2b814d984444a2d3ef09f67122c118c0b531 Mon Sep 17 00:00:00 2001 From: Colladito Date: Fri, 10 Oct 2025 17:21:40 +0200 Subject: [PATCH 05/12] [VCF] Update summary and change errors in optimizer --- bluemath_tk/distributions/nonstat_gev.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index 03d40ac..01e8a99 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -1282,9 +1282,9 @@ def _fit( # Fitting options if options is None: options = { - "gtol": 1e-5, - "xtol": 1e-5, - "barrier_tol": 1e-4, + "gtol": 1e-8, + "xtol": 1e-8, + "barrier_tol": 1e-6, "maxiter": 500, } @@ -7319,7 +7319,7 @@ def format_line(name, value, std_err): for i in range(self.nind_loc): print( - format_line(f"Beta_cov{i + 1}", self.beta_cov[i], std_params[param_idx]) + format_line(f"{self.covariates.columns[self.list_loc[i]]}", self.beta_cov[i], std_params[param_idx]) ) param_idx += 1 @@ -7352,7 +7352,7 @@ def format_line(name, value, std_err): for i in range(self.nind_sc): print( format_line( - f"Alpha_cov{i + 1}", self.alpha_cov[i], std_params[param_idx] + f"{self.covariates.columns[self.list_sc[i]]}", self.alpha_cov[i], std_params[param_idx] ) ) param_idx += 1 @@ -7387,7 +7387,7 @@ def format_line(name, value, std_err): for i in range(self.nind_sh): print( format_line( - f"Gamma_cov{i + 1}", self.gamma_cov[i], std_params[param_idx] + f"{self.covariates.columns[self.list_sh[i]]}", self.gamma_cov[i], std_params[param_idx] ) ) param_idx += 1 From 25d24ede8aa98772509468369b752b603a9c4847 Mon Sep 17 00:00:00 2001 From: Colladito Date: Fri, 10 Oct 2025 18:33:16 +0200 Subject: [PATCH 06/12] [VCF] Plot improvements with code of Tomas Carlotto --- bluemath_tk/distributions/nonstat_gev.py | 105 ++++++++++++++++++----- 1 file changed, 83 insertions(+), 22 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index 01e8a99..01ede5c 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -6,7 +6,7 @@ import pandas as pd from scipy.integrate import quad from scipy.optimize import minimize, root_scalar -from scipy.stats import norm +from scipy.stats import norm, genextreme from ..core.models import BlueMathModel from ..core.plotting.colors import default_colors @@ -5012,23 +5012,23 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 rp100 = self._quantile(1 - 1 / 100) if ( - self.betaT.size == 0 - and self.alphaT.size == 0 - and self.gammaT.size == 0 - and self.beta_cov.size == 0 - and self.alpha_cov.size == 0 - and self.gamma_cov.size == 0 + self.ntrend_loc == 0 + and self.ntrend_sc == 0 + and self.ntrend_sh == 0 + and self.nind_loc == 0 + and self.nind_sc == 0 + and self.nind_sh == 0 ): fig, ax1 = plt.subplots(figsize=(10, 6)) ############# - month_initials = ["J", "A", "S", "O", "N", "D", "J", "F", "M", "A", "M", "J"] - month_positions = [(i + 0.5) / 12 for i in range(12)] + month_initials = ["Jul", "Aug", "Sep", "Oct", "Nov", "Dec", "Jan", "Feb", "Mar", "Apr", "May", "Jun"] + month_positions = [i / 12 for i in range(12)] # Reorder the return periods to start from July - rt_10 = np.zeros(12) - rt_50 = np.zeros(12) - rt_100 = np.zeros(12) + rt_10 = np.zeros(13) + rt_50 = np.zeros(13) + rt_100 = np.zeros(13) for i in range(12): # Map i to the correct month index (July = 0, June = 11) month_idx = (i + 6) % 12 @@ -5036,21 +5036,49 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 rt_50[i] = self._aggquantile(1-1/50, month_idx/12, (month_idx+1)/12) rt_100[i] = self._aggquantile(1-1/100, month_idx/12, (month_idx+1)/12) + rt_10[12] = rt_10[0] + rt_50[12] = rt_50[0] + rt_100[12] = rt_100[0] + # For the data points, shift the time values by 0.5 years t_shifted = t_anual.copy() t_shifted[t_anual < 0.5] += 0.5 t_shifted[t_anual >= 0.5] -= 0.5 t_ord = np.argsort(t_shifted) + + ### Added by Tomas Carlotto + # Creating variables to store the evolution of pdf and sf over time + var_grid_resolution = 50 + # t_anual_ord = t_anual[t_ord] + mu_t = mut[t_ord] + psi_t = psit[t_ord] + xi_t = epst[t_ord] + # Definition of the Hs value grid + lim_max = np.max(self.xt)+1 + lim_min = np.min(self.xt) + hvar = np.linspace(lim_min, lim_max, var_grid_resolution) + t_grid, x_grid = np.meshgrid(t_shifted[t_ord], hvar) + + # Calculating the 1-CDF for each grid point (Exceedance Probabilities) + sf = np.array([genextreme.sf(x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i]) for i in range(len(t_shifted[t_ord]))]).T + # Calculating the Probability Density Function (pdf) for each grid point + pdf = np.array([genextreme.pdf(x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i]) for i in range(len(t_shifted[t_ord]))]).T + + cf = ax1.contourf(t_shifted[t_ord], hvar, sf, levels=50, cmap="viridis_r") + cbar = fig.colorbar(cf, ax=ax1) + cbar.set_label("Exceedance probability", fontsize=12) + #=============== # Use t_shifted for plotting data points ax1.plot( t_shifted[t_ord], self.xt[t_ord], - marker="+", + marker="o", linestyle="None", color="black", markersize=5, label="Data", + alpha=0.9 ) # Use t_shifted for other lines as well @@ -5072,23 +5100,24 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 alpha=0.3, ) + month_positions_aux = [i/12 for i in range(13)] ax1.plot( - month_positions, rt_10, linestyle="-", linewidth=1, label="10 years", color="tab:red" + month_positions_aux, rt_10, linestyle="-", linewidth=1, label="10 years", color="tab:red" ) ax1.plot( - month_positions, rt_50, linestyle="-", linewidth=1, label="50 years", color="tab:purple" + month_positions_aux, rt_50, linestyle="-", linewidth=1, label="50 years", color="tab:purple" ) ax1.plot( - month_positions, rt_100, linestyle="-", linewidth=1, label="100 years", color="tab:green" + month_positions_aux, rt_100, linestyle="-", linewidth=1, label="100 years", color="tab:green" ) ax1.set_title(f"Parameters Evolution ({self.var_name})") - ax1.set_xlabel("Time") - ax1.set_ylabel(r"$\mu_t$") + ax1.set_xlabel("Time (Months)") + ax1.set_ylabel(f"{self.var_name}") ax1.grid(True) ax1.legend(loc="best") - ax1.margins(x=0.01) - ax1.set_xticks(month_positions, month_initials) + ax1.set_xlim(0,1) + ax1.set_xticks(month_positions, month_initials, rotation=45) if save: plt.savefig( f"Figures/Adjustment_Evolution_{self.var_name}.png", @@ -5197,8 +5226,8 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 ###### 1st Year PLOT - month_initials = ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"] - month_positions = [(i + 0.5) / 12 for i in range(12)] + # month_initials = ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"] + # month_positions = [(i + 0.5) / 12 for i in range(12)] #### Creating the first year plot mask_year = (self.t >= 0) & (self.t <= 1) @@ -5510,10 +5539,42 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 #### Parameters Heatmap plot # self.paramplot(save=save) + #### 3D plot of pdf + if ( + self.ntrend_loc == 0 + and self.ntrend_sc == 0 + and self.ntrend_sh == 0 + and self.nind_loc == 0 + and self.nind_sc == 0 + and self.nind_sh == 0 + ): + mu_t = mut[t_ord] + psi_t = psit[t_ord] + xi_t = epst[t_ord] + # Definition of the Hs value grid + lim_max = np.max(self.xt)+1 + lim_min = np.min(self.xt) + hvar = np.linspace(lim_min, lim_max, var_grid_resolution) + t_grid, x_grid = np.meshgrid(t_shifted[t_ord], hvar) + # Calculating the Probability Density Function (pdf) for each grid point + pdf = np.array([genextreme.pdf(x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i]) for i in range(len(t_shifted[t_ord]))]).T + + fig = plt.figure(figsize=(15, 6)) + ax = fig.add_subplot(projection='3d') + ax.plot_surface(t_grid, x_grid, pdf, cmap="viridis_r") + ax.set_zlim(0, 1) + ax.set_xlabel("Time (Months)") + ax.set_ylabel(f"{self.var_name}") + ax.set_zlabel("PDF") + ax.set_xticks(month_positions, month_initials) + ax.view_init(elev=40, azim=-25) + plt.show() + #### Return periods if ( self.ntrend_loc == 0 and self.ntrend_sc == 0 + and self.ntrend_sh == 0 and self.nind_loc == 0 and self.nind_sc == 0 and self.nind_sh == 0 From 76d553c43f7e4d39b008995a8168105eca3fe02c Mon Sep 17 00:00:00 2001 From: Colladito Date: Fri, 10 Oct 2025 22:54:02 +0200 Subject: [PATCH 07/12] [VCF] Remove error in .fit method --- bluemath_tk/distributions/nonstat_gev.py | 48 ++++++++++++------------ 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index 01ede5c..8d426d4 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -2918,9 +2918,6 @@ def fit( """ self.logger.debug("Fixed fit (.fit())") - if nmu % 2 != 0 or npsi % 2 != 0 or ngamma % 2 != 0: - raise ValueError("Check the input of harmonics, must be even!") - if list_loc == "all": list_loc = list(range(self.covariates.shape[1])) elif list_loc is None: @@ -5187,26 +5184,27 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 ) # Aggregated return period lines - # n_years = int(np.ceil(self.t[-1])) - # rt_10 = np.zeros(n_years) - # for year in range(n_years): - # rt_10[year] = self._aggquantile(1-1/10, year, year+1) # 10-year return level at each year - # rt_50 = np.zeros(n_years) - # for year in range(n_years): - # rt_50[year] = self._aggquantile(1-1/50, year, year+1) # 50-year return level at each year - # rt_100 = np.zeros(n_years) - # for year in range(n_years): - # rt_100[year] = self._aggquantile(1-1/100, year, year+1) # 100-year return level at each year - - # ax1.plot( - # np.arange(init_year,init_year+n_years), rt_10, linestyle="-", linewidth=1, label="10 years", color="tab:red" - # ) - # ax1.plot( - # np.arange(init_year,init_year+n_years), rt_50, linestyle="-", linewidth=1, label="50 years", color="tab:purple" - # ) - # ax1.plot( - # np.arange(init_year, init_year+n_years), rt_100, linestyle="-", linewidth=1, label="100 years", color="tab:green" - # ) + if return_plot: + n_years = int(np.ceil(self.t[-1])) + rt_10 = np.zeros(n_years) + for year in range(n_years): + rt_10[year] = self._aggquantile(1-1/10, year, year+1) # 10-year return level at each year + rt_50 = np.zeros(n_years) + for year in range(n_years): + rt_50[year] = self._aggquantile(1-1/50, year, year+1) # 50-year return level at each year + rt_100 = np.zeros(n_years) + for year in range(n_years): + rt_100[year] = self._aggquantile(1-1/100, year, year+1) # 100-year return level at each year + + ax1.plot( + np.arange(init_year,init_year+n_years), rt_10, linestyle="-", linewidth=1, label="10 years", color="tab:red" + ) + ax1.plot( + np.arange(init_year,init_year+n_years), rt_50, linestyle="-", linewidth=1, label="50 years", color="tab:purple" + ) + ax1.plot( + np.arange(init_year, init_year+n_years), rt_100, linestyle="-", linewidth=1, label="100 years", color="tab:green" + ) ax1.set_xlabel("Time (years)") ax1.set_ylabel(rf"$\mu_t(m)$, {self.var_name}") @@ -5226,8 +5224,8 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 ###### 1st Year PLOT - # month_initials = ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"] - # month_positions = [(i + 0.5) / 12 for i in range(12)] + month_initials = ["Jul", "Aug", "Sep", "Oct", "Nov", "Dec", "Jan", "Feb", "Mar", "Apr", "May", "Jun"] + month_positions = [i / 12 for i in range(12)] #### Creating the first year plot mask_year = (self.t >= 0) & (self.t <= 1) From c1ea159442de9acf555517d151c1cb39f95b8106 Mon Sep 17 00:00:00 2001 From: Colladito Date: Sat, 11 Oct 2025 14:07:05 +0200 Subject: [PATCH 08/12] [VCF] Fix errors in NonStatGEV Hessian --- bluemath_tk/distributions/nonstat_gev.py | 32 ++++++++++++------------ 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index 8d426d4..9457a56 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -3384,23 +3384,23 @@ def _loglikelihood( + 2 + (-2 - epst * (3 + epst) * xn) * z ** (1 / epst) ) - + (z / (epst * epst)) + + (z / (epst ** 2)) * np.log(z) * ( 2 * epst * (-xn * (1 + epst) - 1 + z ** (1 + 1 / epst)) + z * np.log(z) ) ) - / (epst * epst * z**2) + / ((epst * z)**2) ) Dmutpsit = -(1 + epst - (1 - xn) * zn) / ((z * psit) ** 2) Dmutepst = ( -zn * ( - epst * (-(1 + epst) * xn - epst * (1 - xn) * z ** (1 / epst)) + epst * (-(1 + epst) * xn - epst * (1 - xn) / zn) + z * np.log(z) ) - / (epst * epst * psit * z**2) + / (psit * epst**2 * z**2) ) Dpsitepst = xn * Dmutepst @@ -3655,7 +3655,7 @@ def _loglikelihood( + ntrend_sc + nind_sc, 2 + nmu + ntrend_loc + nind_loc + npsi, - ] = np.sum(Dpsitepst * psit * self.t**2) + ] = np.sum(Dmutepst * self.t**2) if ntrend_sh > 0 and ntrend_sc > 0: # Sub-block number, alphaT*gammaT Hxx[ @@ -3669,7 +3669,7 @@ def _loglikelihood( + ntrend_sc + nind_sc, 1 + nmu, - ] = np.sum(Dmutepst * self.t**2) + ] = np.sum(Dpsitepst * self.t**2 * psit) # Sub-block number 13, beta0*beta_cov_i if nind_loc > 0: for i in range(nind_loc): @@ -3738,7 +3738,7 @@ def _loglikelihood( + nind_sc, 1 + nmu + ntrend_loc + i, ] = np.sum(Dmutepst * covariates_loc[:, i] * self.t) - # Sub-block number (Scale exponential involved), gamma_cov_i*gammaT + # Sub-block number, gamma_cov_i*gammaT if nind_sh > 0 and ntrend_sh > 0: for i in range(nind_sh): Hxx[ @@ -4351,13 +4351,13 @@ def _loglikelihood( + j, 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + i, ] = np.sum( - Dmutepst * covariates_sc[:, i] * covariates_sh[:, j] * psit + Dpsitepst * covariates_sc[:, i] * covariates_sh[:, j] * psit ) if nind_sh > 0 and ntrend_loc > 0: for i in range(nind_sh): - aux = 0 - for k, tt in enumerate(self.t): - aux += Dmutepst[k] * tt * covariates_sh[k, i] + # aux = 0 + # for k, tt in enumerate(self.t): + # aux += Dmutepst[k] * tt * covariates_sh[k, i] # Sub-block added by Victor, betaT*gamma_cov_i Hxx[ 2 @@ -4372,7 +4372,7 @@ def _loglikelihood( + ntrend_sh + i, 1 + nmu, - ] = aux + ] = np.sum(Dmutepst * self.t * covariates_sh[:, i]) if ntrend_sc > 0: for i in range(npsi): aux = 0 @@ -4498,9 +4498,9 @@ def _loglikelihood( 1 + nmu + ntrend_loc + nind_loc, ] = np.sum(Dpsitepst * psit * covariates_sh[:, i]) - aux = 0 - for k, tt in enumerate(self.t): - aux += Dpsitepst[k] * tt * covariates_sh[k, i] * psit[k] + # aux = 0 + # for k, tt in enumerate(self.t): + # aux += Dpsitepst[k] * tt * covariates_sh[k, i] * psit[k] # Sub-bloc added by Victor (scale exponential involved), alphaT*gamma_cov_i Hxx[ 2 @@ -4515,7 +4515,7 @@ def _loglikelihood( + ntrend_sh + i, 1 + nmu + npsi + ntrend_loc + nind_loc, - ] = aux + ] = np.sum(Dpsitepst * self.t * covariates_sh[:, i] * psit) # Simmetric part of the Hessian Hxx = Hxx + np.tril(Hxx, -1).T From 27f78d63ad8d011c459ade9d4a6e34404158afd9 Mon Sep 17 00:00:00 2001 From: Colladito Date: Sat, 11 Oct 2025 15:09:48 +0200 Subject: [PATCH 09/12] [VCF] Add analytical hessian in the optimizer --- bluemath_tk/distributions/nonstat_gev.py | 1827 +++++++++++++++++++++- 1 file changed, 1824 insertions(+), 3 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index 9457a56..d39238f 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -1285,7 +1285,7 @@ def _fit( "gtol": 1e-8, "xtol": 1e-8, "barrier_tol": 1e-6, - "maxiter": 500, + "maxiter": 1000, } # Total number of parameters to be estimated @@ -1485,7 +1485,7 @@ def _fit( result = minimize( fun=self._auxmin_loglikelihood, jac=self._auxmin_loglikelihood_grad, # Gradient information - hess="2-point", # Hessian information, if applicable + hess=self._auxmin_loglikelihood_hess, # Hessian information, if applicable x0=x_ini, bounds=bounds, args=( @@ -1530,7 +1530,7 @@ def _fit( result = minimize( fun=self._auxmin_loglikelihood, jac=self._auxmin_loglikelihood_grad, # Gradient information - hess="2-point", # Hessian information, if applicable + hess=self._auxmin_loglikelihood_hess, # Hessian information, if applicable x0=fit_result["x"], bounds=bounds, args=( @@ -2858,6 +2858,1827 @@ def _auxmin_loglikelihood_grad( Jx = -Jx return Jx + def _auxmin_loglikelihood_hess( + self, + x, + nmu, + npsi, + ngamma, + ntrend_loc=0, + list_loc=[], + ntrend_sc=0, + list_sc=[], + ntrend_sh=0, + list_sh=[], + ) -> np.ndarray: + """ + Function used for minimizing in the 'self._optimize_parameters' where the Negative loglikelihood of the GEV will be minimized + + Parameters + ---------- + x : np.ndarray + Parameter vector to optimize + nmu : int, default=0 + Number of harmonics included in location + npsi : int, default=0 + Number of harmonics included in scale + ngamma : int, default=0 + Number of harmonics included in shape + ntrend_loc : int, default=0 + Whether to include trends in location + list_loc : list, default=[] + List of covariates indices to include in location + ntrend_sc : int, default=0 + Whether to include trends in scale + list_sc : list, default=[] + List of covariates indices to include in scale + ntrend_sh : int, default=0 + Whether to include trends in shape + list_sh : list, default=[] + List of covariates indices to include in shape + + Return + ------ + Jx : np.ndarray + Gradient of negative loglikelihood value of the Non-stationary GEV + """ + # Cheking the inputs + covariates_loc = self.covariates.iloc[:, list_loc].values + covariates_sc = self.covariates.iloc[:, list_sc].values + covariates_sh = self.covariates.iloc[:, list_sh].values + + # Check consistency of the data + na1, nind_loc = covariates_loc.shape + if nind_loc > 0 and na1 > 0: + if na1 != len(self.xt) or na1 != len(self.t) or len(self.xt) != len(self.t): + ValueError("Check data x, t, indices: funcion aux loglikelihood") + + na2, nind_sc = covariates_sc.shape + if nind_sc > 0 and na2 > 0: + if na2 != len(self.xt) or na2 != len(self.t) or len(self.xt) != len(self.t): + ValueError("Check data x, t, indices: funcion aux loglikelihood") + + na3, nind_sh = covariates_sh.shape + if nind_sh > 0 and na3 > 0: + if na3 != len(self.xt) or na3 != len(self.t) or len(self.xt) != len(self.t): + ValueError("Check data x, t, indices: funcion aux loglikelihood") + + # Evaluate the location parameter at each time t as a function of the actual values of the parameters given by x + if ntrend_loc == 0 and nind_loc == 0: + mut1 = self._parametro(x[0], x[1 : 1 + nmu]) # beta0, beta + elif ntrend_loc == 0 and nind_loc != 0: + mut1 = self._parametro( + x[0], + x[1 : 1 + nmu], + None, + x[1 + nmu + ntrend_loc : 1 + nmu + ntrend_loc + nind_loc], + covariates_loc, + ) # beta0, beta, beta_cov + elif ntrend_loc != 0 and nind_loc == 0: + mut1 = self._parametro( + x[0], x[1 : 1 + nmu], x[1 + nmu] + ) # beta0, beta, betaT + else: + mut1 = self._parametro( + x[0], + x[1 : 1 + nmu], + x[1 + nmu], + x[1 + nmu + ntrend_loc : 1 + nmu + ntrend_loc + nind_loc], + covariates_loc, + ) # beta0, beta, betaT, beta_cov + + # Evaluate the scale parameter at each time t as a function of the actual values of the parameters given by x + if ntrend_sc == 0 and nind_sc == 0: + psit1 = np.exp( + self._parametro( + x[1 + nmu + ntrend_loc + nind_loc], + x[ + 2 + nmu + ntrend_loc + nind_loc : 2 + + nmu + + ntrend_loc + + nind_loc + + npsi + ], + ) + ) # alpha0, alpha + elif ntrend_sc == 0 and nind_sc != 0: + psit1 = np.exp( + self._parametro( + x[1 + nmu + ntrend_loc + nind_loc], + x[ + 2 + nmu + ntrend_loc + nind_loc : 2 + + nmu + + ntrend_loc + + nind_loc + + npsi + ], + None, + x[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc : 2 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + ], + covariates_sc, + ) + ) # alpha0, alpha, alpha_cov + elif ntrend_sc != 0 and nind_sc == 0: + psit1 = np.exp( + self._parametro( + x[1 + nmu + ntrend_loc + nind_loc], + x[ + 2 + nmu + ntrend_loc + nind_loc : 2 + + nmu + + ntrend_loc + + nind_loc + + npsi + ], + x[2 + nmu + ntrend_loc + nind_loc + npsi], + ) + ) # alpha0, alpha, alphaT + else: + psit1 = np.exp( + self._parametro( + x[1 + nmu + ntrend_loc + nind_loc], + x[ + 2 + nmu + ntrend_loc + nind_loc : 2 + + nmu + + ntrend_loc + + nind_loc + + npsi + ], + x[2 + nmu + ntrend_loc + nind_loc + npsi], + x[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc : 2 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + ], + covariates_sc, + ) + ) # alpha0, alpha, alphaT, alpha_cov + + # Evaluate the shape parameter at each time t as a function of the actual values of the parameters given by x + if self.ngamma0 != 0: + if ntrend_sh == 0 and nind_sh == 0: + epst = self._parametro( + x[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + nind_sc], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + ) # gamma0, gamma + elif ntrend_sh == 0 and nind_sh != 0: + epst = self._parametro( + x[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + nind_sc], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + None, + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + + nind_sh + ], + covariates_sh, + ) # gamma0, gamma, gamma_cov + elif ntrend_sh != 0 and nind_sh == 0: + epst = self._parametro( + x[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + nind_sc], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + ) # gamma0, gamma, gammaT + else: + epst = self._parametro( + x[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + nind_sc], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + + nind_sh + ], + covariates_sh, + ) # gamma0, gamma, gammaT, gamma_cov + # If intercept in shape is not included + else: + if ntrend_sh == 0 and nind_sh == 0: + epst = self._parametro( + None, + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + ) # gamma + elif ntrend_sh == 0 and nind_sh != 0: + epst = self._parametro( + None, + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + None, + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + + nind_sh + ], + covariates_sh, + ) # gamma, gamma_cov + elif ntrend_sh != 0 and nind_sh == 0: + epst = self._parametro( + None, + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + ) # gamma, gammaT + else: + epst = self._parametro( + None, + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ], + x[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma : 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + + nind_sh + ], + covariates_sh, + ) # gamma, gammaT, gamma_cov + + # The values whose shape parameter is almost 0 correspond to the Gumbel distribution + posG = np.where(np.abs(epst) <= 1e-8)[0] + # The remaining values correspond to Weibull or Frechet + pos = np.where(np.abs(epst) > 1e-8)[0] + + # The corresponding Gumbel values are set to 1 to avoid numerical problems, note that in this case, the Gumbel expressions are used + epst[posG] = 1 + + # Modify the parameters to include the length of the data + mut = mut1 + psit = psit1 + mut[pos] = mut1[pos] + psit1[pos] * (self.kt[pos] ** epst[pos] - 1) / epst[pos] + psit[pos] = psit1[pos] * self.kt[pos] ** epst[pos] + # Modify the parameters to include the length of the data in Gumbel + mut[posG] = mut[posG] + psit[posG] * np.log(self.kt[posG]) + + # Evaluate auxiliarly variables + xn = (self.xt - mut) / psit + z = 1 + epst * xn + + # Since z-values must be greater than 0 in order to avoid numerical problems, their values are set to be greater than 1e-8 + z = np.maximum(1e-8, z) + zn = z ** (-1 / epst) + + # Evaluate the loglikelihood function, not that the general and Gumbel expressions are used + f = -np.sum( + -np.log(self.kt[pos]) + + np.log(psit[pos]) + + (1 + 1 / epst[pos]) * np.log(z[pos]) + + self.kt[pos] * zn[pos] + ) - np.sum( + -np.log(self.kt[posG]) + + np.log(psit[posG]) + + xn[posG] + + self.kt[posG] * np.exp(-xn[posG]) + ) + + ### Gradient of the loglikelihood + # Derivatives given by equations (A.1)-(A.3) in the paper + Dmut = (1 + epst - self.kt * zn) / (psit * z) + Dpsit = -(1 - xn * (1 - self.kt * zn)) / (psit * z) + Depst = ( + zn + * ( + xn * (self.kt - (1 + epst) / zn) + + z * (-self.kt + 1 / zn) * np.log(z) / epst + ) + / (epst * z) + ) + + # Gumbel derivatives given by equations (A.4)-(A.5) in the paper + Dmut[posG] = (1 - self.kt[posG] * np.exp(-xn[posG])) / psit[posG] + Dpsit[posG] = (xn[posG] - 1 - self.kt[posG] * xn[posG] * np.exp(-xn[posG])) / ( + psit[posG] + ) + Depst[posG] = 0 + + ## New Derivatives + Dmutastmut = np.ones_like(self.kt) + Dmutastpsit = (-1 + self.kt**epst) / epst + Dmutastepst = ( + psit1 * (1 + (self.kt**epst) * (epst * np.log(self.kt) - 1)) / (epst**2) + ) + + Dpsitastpsit = self.kt**epst + Dpsitastepst = np.log(self.kt) * psit1 * (self.kt**epst) + + Dmutastpsit[posG] = np.log(self.kt[posG]) + Dmutastepst[posG] = 0 + + Dpsitastpsit[posG] = 1 + Dpsitastepst[posG] = 0 + + # Set the Jacobian to zero + Jx = np.zeros( + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + nind_sh + ) + # Jacobian elements related to the location parameters beta0 and beta, equation (A.6) in the paper + Jx[0] = np.dot(Dmut, Dmutastmut) + + # If location harmonics are included + if nmu > 0: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmut[k] * Dmutastmut[k] * self._Dparam(tt, i + 1) + Jx[i + 1] = aux + + # Jacobian elements related to the location parameters betaT, beta_cov (equation A.9) + if ntrend_loc > 0: + Jx[1 + nmu] = np.sum(Dmut * self.t * Dmutastmut) # betaT + if nind_loc > 0: + for i in range(nind_loc): + Jx[1 + nmu + ntrend_loc + i] = np.sum( + Dmut * covariates_loc[:, i] * Dmutastmut + ) # beta_cov_i + + # Jacobian elements related to the scale parameters alpha0, alpha (equation A.7) + Jx[1 + nmu + ntrend_loc + nind_loc] = np.sum( + psit1 * (Dpsit * Dpsitastpsit + Dmut * Dmutastpsit) + ) # alpha0 + # If scale harmonic are included + if npsi > 0: + for i in range(npsi): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + self._Dparam(tt, i + 1) + * psit1[k] + * (Dpsit[k] * Dpsitastpsit[k] + Dmut[k] * Dmutastpsit[k]) + ) + Jx[2 + nmu + ntrend_loc + nind_loc + i] = aux # alpha + # Jacobian elements related to the scale parameters alphaT and beta_cov (equation A.10) + if ntrend_sc > 0: + Jx[2 + nmu + ntrend_loc + nind_loc + npsi] = np.sum( + (Dpsit * Dpsitastpsit + Dmut * Dmutastpsit) * self.t * psit1 + ) # alphaT + if nind_sc > 0: + for i in range(nind_sc): + Jx[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + i] = np.sum( + (Dpsit * Dpsitastpsit + Dmut * Dmutastpsit) + * covariates_sc[:, i] + * psit1 + ) # alpha_cov + + # Jacobian elements related to the shape parameters gamma0 and gamma (equation A.10) + if self.ngamma0 == 1: + Jx[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + nind_sc] = np.sum( + Depst + Dpsit * Dpsitastepst + Dmut * Dmutastepst + ) + # If shape harmonics are included + if ngamma > 0: + for i in range(ngamma): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Depst[k] + Dpsit[k] * Dpsitastepst[k] + Dmut[k] * Dmutastepst[k] + ) * self._Dparam(tt, i + 1) + Jx[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + i + ] = aux + + # Jacobian elements related to the shape parameters trend (defined by Victor) + if ntrend_sh > 0: + Jx[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + ] = np.sum(Depst * self.t) + + # Jacobian elements related to the shape parameters gamma_cov (defined by Victor) + if nind_sh > 0: + for i in range(nind_sh): + Jx[ + 2 + + self.ngamma0 + + nmu + + ntrend_loc + + nind_loc + + npsi + + ntrend_sc + + nind_sc + + ngamma + + ntrend_sh + + i + ] = np.sum(Depst * covariates_sh[:, i]) + + ### Hessian matrix + # Derivatives given by equations (A.13)-(A.17) in the paper + D2mut = (1 + epst) * zn * (-1 + epst * z ** (1 / epst)) / ((z * psit) ** 2) + D2psit = ( + -zn * xn * ((1 - epst) * xn - 2) + ((1 - 2 * xn) - epst * (xn**2)) + ) / ((z * psit) ** 2) + D2epst = ( + -zn + * ( + xn + * ( + xn * (1 + 3 * epst) + + 2 + + (-2 - epst * (3 + epst) * xn) * z ** (1 / epst) + ) + + (z / (epst ** 2)) + * np.log(z) + * ( + 2 * epst * (-xn * (1 + epst) - 1 + z ** (1 + 1 / epst)) + + z * np.log(z) + ) + ) + / ((epst * z)**2) + ) + Dmutpsit = -(1 + epst - (1 - xn) * zn) / ((z * psit) ** 2) + Dmutepst = ( + -zn + * ( + epst * (-(1 + epst) * xn - epst * (1 - xn) / zn) + + z * np.log(z) + ) + / (psit * epst**2 * z**2) + ) + Dpsitepst = xn * Dmutepst + + # Corresponding Gumbel derivatives given by equations (A.18)-(A.20) + D2mut[posG] = -(np.exp(-xn[posG])) / (psit[posG] ** 2) + D2psit[posG] = ( + (1 - 2 * xn[posG]) + np.exp(-xn[posG]) * (2 - xn[posG]) * xn[posG] + ) / (psit[posG] ** 2) + D2epst[posG] = 0 + Dmutpsit[posG] = (-1 + np.exp(-xn[posG]) * (1 - xn[posG])) / (psit[posG] ** 2) + Dmutepst[posG] = 0 + Dpsitepst[posG] = 0 + + # Initialize the Hessian matrix + Hxx = np.zeros( + ( + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + nind_sh, + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + nind_sh, + ) + ) + # Elements of the Hessian matrix + # Sub-blocks following the order shown in Table 4 of the paper + + ## DIAGONAL SUB-BLOCKS + # Sub-block number 1, beta0^2 + Hxx[0, 0] = np.sum(D2mut) + # Sub-block number 2, betaT^2 + if ntrend_loc > 0: + Hxx[1 + nmu, 1 + nmu] = np.sum(D2mut * (self.t**2)) + # Sub-block number 3, beta_cov_i*beta_cov_j + if nind_loc > 0: + for i in range(nind_loc): + for j in range(i + 1): + Hxx[1 + nmu + ntrend_loc + i, 1 + nmu + ntrend_loc + j] = np.sum( + D2mut * covariates_loc[:, i] * covariates_loc[:, j] + ) + # Sub-block number 4, alphaT^2 + if ntrend_sc > 0: + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc, + 2 + nmu + npsi + ntrend_loc + nind_loc, + ] = np.sum((D2psit * psit + Dpsit) * psit * (self.t**2)) + # Sub-block number 5, alpha_cov_i*alpha_cov_j + if nind_sc > 0: + for i in range(nind_sc): + for j in range(i + 1): + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + i, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + j, + ] = np.sum( + (D2psit * psit + Dpsit) + * psit + * covariates_sc[:, i] + * covariates_sc[:, j] + ) + # Sub-block number 6, alpha0^2 + Hxx[1 + nmu + ntrend_loc + nind_loc, 1 + nmu + ntrend_loc + nind_loc] = np.sum( + (D2psit * psit + Dpsit) * psit + ) + # Sub-block number 7, gamma0^2 + if self.ngamma0 == 1: + # If the shape parameter is added but later the result is GUMBEL + if len(posG) == len(self.xt): + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + ] = -1 + else: + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + ] = np.sum(D2epst) + # Sub-block added by Victor, gamma_cov_i*gamma_cov_j + if nind_sh > 0: + for i in range(nind_sh): + for j in range(i + 1): + # Add -1 if the model is GUMBEL in all the values + if len(posG) == len(self.xt) and i == j: + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + j, + ] = -1 + else: + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + j, + ] = np.sum(D2epst * covariates_sh[:, i] * covariates_sh[:, j]) + # Sub-block number , gammaT^2 + if ntrend_sh > 0: + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + ] = np.sum(D2epst * self.t**2) + + # Sub-block number 8 (Scale exponential involved), beta0*alpha0 + Hxx[1 + nmu + ntrend_loc + nind_loc, 0] = np.sum(Dmutpsit * psit) + + if self.ngamma0 == 1: + # Sub-block number 9, beta0*gamma0 + Hxx[2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, 0] = ( + np.sum(Dmutepst) + ) + # Sub-block number 10 (Scale exponential involved), alpha0*gamma0 + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 1 + nmu + ntrend_loc + nind_loc, + ] = np.sum(Dpsitepst * psit) + # Sub-block number 11, beta0*betaT + if ntrend_loc > 0: + Hxx[1 + nmu, 0] = np.sum(D2mut * self.t) + # Sub-block number 12 (Scale exponential involved), beta0*alphaT + if ntrend_sc > 0: + Hxx[2 + nmu + ntrend_loc + nind_loc + npsi, 0] = np.sum( + Dmutpsit * self.t * psit + ) + # Sub-block number 52 (Scale exponential involved), alphaT*alpha0 + if ntrend_sc > 0: + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi, 1 + nmu + ntrend_loc + nind_loc + ] = np.sum((D2psit * psit + Dpsit) * self.t * psit) + # Sub-block number 48 (Scale exponential involved), betaT*alphaT + if ntrend_loc > 0 and ntrend_sc > 0: + Hxx[2 + nmu + ntrend_loc + nind_loc + npsi, 1 + nmu] = np.sum( + Dmutpsit * self.t * self.t * psit + ) + if ntrend_sh > 0: + # Sub-block number, beta0*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 0, + ] = np.sum(Dmutepst * self.t) + # Sub-block number (Scale exponential involved), alpha0*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 1 + nmu + ntrend_loc + nind_loc, + ] = np.sum(Dpsitepst * psit * self.t) + # Sub-block number, gamma0*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + ] = np.sum(D2epst * self.t) + if ntrend_sh > 0 and ntrend_loc > 0: + # Sub-block number, betaT*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 2 + nmu + ntrend_loc + nind_loc + npsi, + ] = np.sum(Dmutepst * self.t**2) + if ntrend_sh > 0 and ntrend_sc > 0: + # Sub-block number, alphaT*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 1 + nmu, + ] = np.sum(Dpsitepst * self.t**2 * psit) + # Sub-block number 13, beta0*beta_cov_i + if nind_loc > 0: + for i in range(nind_loc): + Hxx[1 + nmu + ntrend_loc + i, 0] = np.sum(D2mut * covariates_loc[:, i]) + # Sub-block number 14 (Scale exponential involved), beta0*alpha_cov_i + if nind_sc > 0: + for i in range(nind_sc): + Hxx[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + i, 0] = np.sum( + Dmutpsit * covariates_sc[:, i] * psit + ) + # Sub-block number 53 (Scale exponential involved), alpha0*alpha_cov_i + if nind_sc > 0: + for i in range(nind_sc): + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + i, + 1 + nmu + ntrend_loc + nind_loc, + ] = np.sum((D2psit * psit + Dpsit) * covariates_sc[:, i] * psit) + # Sub-block number 49 (Scale exponential involved), betaT*alpha_cov_i + if ntrend_loc > 0 and nind_sc > 0: + for i in range(nind_sc): + Hxx[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + i, 1 + nmu] = ( + np.sum(Dmutpsit * self.t * covariates_sc[:, i] * psit) + ) + # Sub-block number 15, betaT*beta_cov_i + if nind_loc > 0 and ntrend_loc > 0: + for i in range(nind_loc): + Hxx[1 + nmu + ntrend_loc + i, 1 + nmu] = np.sum( + D2mut * self.t * covariates_loc[:, i] + ) + # Sub-block number 16, alphaT*alpha_cov_i + if nind_sc > 0 and ntrend_sc > 0: + for i in range(nind_sc): + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + i, + 2 + nmu + ntrend_loc + nind_loc + npsi, + ] = np.sum( + (D2psit * psit + Dpsit) * self.t * covariates_sc[:, i] * psit + ) + # Sub-block number (Scale exponential involved), alpha_cov_i*gammaT + if nind_sc > 0 and ntrend_sh > 0: + for i in range(nind_sc): + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + i, + ] = np.sum(Dpsitepst * psit * covariates_sc[:, i] * self.t) + # Sub-block number (Scale exponential involved), beta_cov_i*gammaT + if nind_loc > 0 and ntrend_sh > 0: + for i in range(nind_loc): + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 1 + nmu + ntrend_loc + i, + ] = np.sum(Dmutepst * covariates_loc[:, i] * self.t) + # Sub-block number, gamma_cov_i*gammaT + if nind_sh > 0 and ntrend_sh > 0: + for i in range(nind_sh): + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + ] = np.sum(D2epst * covariates_sh[:, i] * self.t) + + # Sub-block number 17, alpha0*betaT + if ntrend_loc > 0: + Hxx[1 + nmu + ntrend_loc + nind_loc, 1 + nmu] = np.sum( + Dmutpsit * self.t * psit + ) + # Sub-block number 18, gamma0*betaT + if ntrend_loc > 0 and self.ngamma0 == 1: + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, 1 + nmu + ] = np.sum(Dmutepst * self.t) + # Sub-block number 19 (Scale exponential involved), gamma0*alphaT + if ntrend_sc > 0 and self.ngamma0 == 1: + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 2 + nmu + ntrend_loc + nind_loc + npsi, + ] = np.sum(Dpsitepst * self.t * psit) + # Sub-block number 20 (Scale exponential involved), alpha0*beta_cov_i + if nind_loc > 0: + for i in range(nind_loc): + Hxx[1 + nmu + ntrend_loc + nind_loc, 1 + nmu + ntrend_loc + i] = np.sum( + Dmutpsit * covariates_loc[:, i] * psit + ) + # Sub-block number 21, gamma0*beta_cov_i + if nind_loc > 0 and self.ngamma0 == 1: + for i in range(nind_loc): + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 1 + nmu + ntrend_loc + i, + ] = np.sum(Dmutepst * covariates_loc[:, i]) + # Sub-block number 22 (Scale exponential involved), gamma0*alpha_cov_i + if nind_sc > 0 and self.ngamma0 == 1: + for i in range(nind_sc): + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + i, + ] = np.sum(Dpsitepst * covariates_sc[:, i] * psit) + # Sub-block added by Victor, gamma0*gamma_cov_i + if nind_sh > 0 and self.ngamma0 == 1: + for i in range(nind_sh): + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + ] = np.sum(D2epst * covariates_sh[:, i]) + + if nmu > 0: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += D2mut[k] * self._Dparam(tt, i + 1) + # Sub-block number 23, beta_i*beta0 + Hxx[1 + i, 0] = aux + for j in range(i + 1): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + D2mut[k] * self._Dparam(tt, i + 1) * self._Dparam(tt, j + 1) + ) + # Sub-block number 24, beta_i,beta_j + Hxx[1 + i, 1 + j] = aux + + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutpsit[k] * self._Dparam(tt, i + 1) * psit[k] + # Sub-block number 25 (Scale exponential involved), beta_i*alpha0 + Hxx[1 + nmu + ntrend_loc + nind_loc, 1 + i] = aux + + if self.ngamma0 == 1: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutepst[k] * self._Dparam(tt, i + 1) + # Sub-block number 26 (Scale exponential involved), beta_i*gamma0 + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 1 + i, + ] = aux + if ntrend_loc > 0: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += D2mut[k] * tt * self._Dparam(tt, i + 1) + # Sub-block number 27, betaT*beta_i + Hxx[1 + nmu, 1 + i] = aux + + if ntrend_sc > 0: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutpsit[k] * tt * self._Dparam(tt, i + 1) * psit[k] + # Sub-block number 46 (Scale exponential involved), alphaT*beta_i + Hxx[2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc, 1 + i] = aux + if ntrend_sh > 0: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutepst[k] * tt * self._Dparam(tt, i + 1) + # Sub-block number 46 (Scale exponential involved), beta_i*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 1 + i, + ] = aux + + if nind_loc > 0: + for i in range(nmu): + for j in range(nind_loc): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + D2mut[k] + * covariates_loc[k, j] + * self._Dparam(tt, i + 1) + ) + # Sub-block number 28, beta_i*beta_cov_j + Hxx[1 + nmu + ntrend_loc + j, 1 + i] = aux + if nind_sc > 0: + for i in range(nmu): + for j in range(nind_sc): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dmutpsit[k] + * covariates_sc[k, j] + * self._Dparam(tt, i + 1) + * psit[k] + ) + # Sub-block number 47 (Scale exponential involved), beta_i*alpha_cov_j + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + j, + 1 + i, + ] = aux + if nind_sh > 0: + for i in range(nmu): + for j in range(nind_sh): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dmutepst[k] + * covariates_sh[k, j] + * self._Dparam(tt, i + 1) + ) + # Sub-block added by Victor, beta_j*gamma_cov_i + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + j, + 1 + i, + ] = aux + if npsi > 0: + for i in range(npsi): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + (D2psit[k] * psit[k] + Dpsit[k]) + * self._Dparam(tt, i + 1) + * psit[k] + ) + # Sub-block number 29 (Scale exponential involved), alpha_i*alpha_0 + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + i, 1 + ntrend_loc + nind_loc + nmu + ] = aux + for j in range(i + 1): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + (D2psit[k] * psit[k] + Dpsit[k]) + * self._Dparam(tt, i + 1) + * self._Dparam(tt, j + 1) + * psit[k] + ) + # Sub-block 30 (Scale exponential involved), alpha_i*alpha_j + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + i, + 2 + nmu + ntrend_loc + nind_loc + j, + ] = aux + if self.ngamma0 == 1: + for i in range(npsi): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dpsitepst[k] * self._Dparam(tt, i + 1) * psit[k] + # Sub-block number 31 (Scale exponential involved), alpha_i*gamma0 + Hxx[ + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + 2 + nmu + ntrend_loc + nind_loc + i, + ] = aux + for i in range(npsi): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutpsit[k] * self._Dparam(tt, i + 1) * psit[k] + # Sub-block number 32 (Scale exponential involved), beta0*alpha_i + Hxx[2 + nmu + ntrend_loc + nind_loc + i, 0] = aux + if ntrend_loc > 0: + for i in range(npsi): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutpsit[k] * tt * self._Dparam(tt, i + 1) * psit[k] + # Sub-block number 33 (Scale exponential involved), alpha_i*betaT + Hxx[2 + nmu + ntrend_loc + nind_loc + i, 1 + nmu] = aux + if nind_loc > 0: + for i in range(npsi): + for j in range(nind_loc): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dmutpsit[k] + * covariates_loc[k, j] + * self._Dparam(tt, i + 1) + * psit[k] + ) + # Sub-block number 34 (Scale exponential involved), alpha_i*beta_cov_j + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + i, + 1 + nmu + ntrend_loc + j, + ] = aux + if nind_sh > 0: + for i in range(npsi): + for j in range(nind_sh): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dpsitepst[k] + * covariates_sh[k, j] + * self._Dparam(tt, i + 1) + * psit[k] + ) + # Sub-block added by Victor (scale exponential involved), alpha_i*gamma_cov_j + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + j, + 2 + nmu + ntrend_loc + nind_loc + i, + ] = aux + if ntrend_sh > 0: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dpsitepst[k] * tt * psit[k] * self._Dparam(tt, i + 1) + # Sub-block number 46 (Scale exponential involved), alpha_i*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 2 + nmu + ntrend_loc + nind_loc + i, + ] = aux + if ngamma > 0: + for i in range(ngamma): + # First element associated to the constant value (first column) + aux = 0 + for k, tt in enumerate(self.t): + aux += D2epst[k] * self._Dparam(tt, i + 1) + # Sub-block number 35, gamma_i*gamma0 + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, + ] = aux + for j in range(i + 1): + # If shape parameters included but later everything is GUMBEL + if j == i and len(posG) == len(self.xt): + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + j, + ] = -1 + else: + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + D2epst[k] + * self._Dparam(tt, i + 1) + * self._Dparam(tt, j + 1) + ) + # Sub-block number 36, gamma_i*gamma_j + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + j, + ] = aux + for i in range(ngamma): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dpsitepst[k] * self._Dparam(tt, i + 1) * psit[k] + # Sub-block number 37 (Scale exponential involved) gamma_i*alpha0 + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 1 + nmu + ntrend_loc + nind_loc, + ] = aux + for i in range(ngamma): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutepst[k] * self._Dparam(tt, i + 1) + # Sub-block number 38, gamma_i*beta0 + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 0, + ] = aux + if ntrend_loc > 0: + for i in range(ngamma): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutepst[k] * tt * self._Dparam(tt, i + 1) + # Sub-block number 39, gamma_i*betaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 1 + nmu, + ] = aux + if ntrend_sc > 0: + for i in range(ngamma): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dpsitepst[k] * tt * self._Dparam(tt, i + 1) * psit[k] + # Sub-block number 44 (Scale exponential involved), gamma_i*alphaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 2 + nmu + npsi + ntrend_loc + nind_loc, + ] = aux + if ntrend_sh > 0: + for i in range(nmu): + aux = 0 + for k, tt in enumerate(self.t): + aux += D2epst[k] * tt * self._Dparam(tt, i + 1) + # Sub-block number 46 (Scale exponential involved), gamma_i*gammaT + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc, + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + ] = aux + if nind_loc > 0: + for i in range(ngamma): + for j in range(nind_loc): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dmutepst[k] + * covariates_loc[k, j] + * self._Dparam(tt, i + 1) + ) + # Sub-block number 40, gamma_i*beta_cov_j + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 1 + nmu + ntrend_loc + j, + ] = aux + if nind_sc > 0: + for i in range(ngamma): + for j in range(nind_sc): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dpsitepst[k] + * covariates_sc[k, j] + * self._Dparam(tt, i + 1) + * psit[k] + ) + # Sub-block number 45 (Scale exponential involved), gamma_i*alpha_cov_j + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + j, + ] = aux + if nind_sh > 0: + for i in range(ngamma): + for j in range(nind_sh): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + D2psit[k] + * covariates_sh[k, j] + * self._Dparam(tt, i + 1) + ) + # Sub-block added by Victor, gamma_i*gamma_cov_j + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + j, + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + ] = aux + + if nind_loc > 0 and ntrend_sc > 0: + for i in range(nind_loc): + aux = 0 + for k, tt in enumerate(self.t): + aux += Dmutpsit[k] * tt * covariates_loc[k, i] * psit[k] + # Sub-block number 50 (Scale exponential involved), beta_cov_i*alphaT + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi, 1 + nmu + ntrend_loc + i + ] = aux + if nind_loc > 0 and nind_sc > 0: + for i in range(nind_loc): + for j in range(nind_sc): + # Sub-block number 51 (Scale exponential involved), beta_cov_i*alpha_cov_j + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + j, + 1 + nmu + ntrend_loc + i, + ] = np.sum( + Dmutpsit * covariates_sc[:, j] * covariates_loc[:, i] * psit + ) + if nind_loc > 0 and nind_sh > 0: + for i in range(nind_loc): + for j in range(nind_sh): + # Sub-block added by Victor, beta_cov_i*gamma_cov_j + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + j, + 1 + nmu + ntrend_loc + i, + ] = np.sum(Dmutepst * covariates_loc[:, i] * covariates_sh[:, j]) + if nind_sc > 0 and nind_sh > 0: + for i in range(nind_sc): + for j in range(nind_sh): + # Sub-block added by Victor (scale exponential involved), alpha_cov_i*gamma_cov_j + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + j, + 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + i, + ] = np.sum( + Dpsitepst * covariates_sc[:, i] * covariates_sh[:, j] * psit + ) + if nind_sh > 0 and ntrend_loc > 0: + for i in range(nind_sh): + # aux = 0 + # for k, tt in enumerate(self.t): + # aux += Dmutepst[k] * tt * covariates_sh[k, i] + # Sub-block added by Victor, betaT*gamma_cov_i + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 1 + nmu, + ] = np.sum(Dmutepst * self.t * covariates_sh[:, i]) + if ntrend_sc > 0: + for i in range(npsi): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + (D2psit[k] * psit[k] + Dpsit[k]) + * tt + * self._Dparam(tt, i + 1) + * psit[k] + ) + # Sub-block number 54 (Scale exponential involved), alpha_i*alphaT + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc, + 2 + nmu + ntrend_loc + nind_loc + i, + ] = aux + if nind_sc > 0: + for i in range(npsi): + for j in range(nind_sc): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + (D2psit[k] * psit[k] + Dpsit[k]) + * covariates_sc[k, j] + * self._Dparam(tt, i + 1) + * psit[k] + ) + # Sub-block number 55 (Scale exponential involved), alpha_i*alpha_cov_j + Hxx[ + 2 + nmu + ntrend_loc + nind_loc + npsi + ntrend_sc + j, + 2 + nmu + ntrend_loc + nind_loc + i, + ] = aux + if nmu > 0 and npsi > 0: + for j in range(nmu): + for i in range(npsi): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dmutpsit[k] + * self._Dparam(tt, i + 1) + * self._Dparam(tt, j + 1) + * psit[k] + ) + # Sub-block number 41 (Scale exponential involved), beta_j*alpha_i + Hxx[2 + nmu + ntrend_loc + nind_loc + i, 1 + j] = aux + if nmu > 0 and ngamma > 0: + for j in range(nmu): + for i in range(ngamma): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dmutepst[k] + * self._Dparam(tt, i + 1) + * self._Dparam(tt, j + 1) + ) + # Sub-block number 42, beta_j*gamma_i + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 1 + j, + ] = aux + if npsi > 0 and ngamma > 0: + for j in range(npsi): + for i in range(ngamma): + aux = 0 + for k, tt in enumerate(self.t): + aux += ( + Dpsitepst[k] + * self._Dparam(tt, i + 1) + * self._Dparam(tt, j + 1) + * psit[k] + ) + # Sub-block number 43 (Scale exponential involved), alpha_j*gamma_i + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + i, + 2 + nmu + ntrend_loc + nind_loc + j, + ] = aux + + if nind_sh > 0: + for i in range(nind_sh): + # Sub-block added by Victor, beta0*gamma_cov_i + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 0, + ] = np.sum(Dmutepst * covariates_sh[:, i]) + # Sub-block added by Victor (scale exponential involved), alpha0*gamma_cov_i + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 1 + nmu + ntrend_loc + nind_loc, + ] = np.sum(Dpsitepst * psit * covariates_sh[:, i]) + + # aux = 0 + # for k, tt in enumerate(self.t): + # aux += Dpsitepst[k] * tt * covariates_sh[k, i] * psit[k] + # Sub-bloc added by Victor (scale exponential involved), alphaT*gamma_cov_i + Hxx[ + 2 + + self.ngamma0 + + nmu + + npsi + + ngamma + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ntrend_sh + + i, + 1 + nmu + npsi + ntrend_loc + nind_loc, + ] = np.sum(Dpsitepst * self.t * covariates_sh[:, i] * psit) + + # Simmetric part of the Hessian + Hxx = Hxx + np.tril(Hxx, -1).T + Hxx = -Hxx + + return Hxx def fit( self, From 08152836706a23663d1d44348309e80d843d7a04 Mon Sep 17 00:00:00 2001 From: Colladito Date: Sat, 11 Oct 2025 17:50:17 +0200 Subject: [PATCH 10/12] [VCF] Reset commit and fix auto_adjust error --- bluemath_tk/distributions/nonstat_gev.py | 34 ++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index d39238f..e4832f9 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -816,6 +816,7 @@ def auto_adjust(self, max_iter: int = 1000, plot: bool = False) -> dict: if self.AIC_iter[iter] <= self.AIC_iter[iter - 1]: # Update the parameters + final_fit_result = fit_result self.AICini = self.AIC_iter[iter] self._update_params(**fit_result) beta_cov = fit_result.get("beta_cov") @@ -841,6 +842,7 @@ def auto_adjust(self, max_iter: int = 1000, plot: bool = False) -> dict: gamma_cov = gamma_cov[:-1] nind_sh -= 1 + fit_result = final_fit_result self.niter_cov = iter - self.niter_harm self.nit = iter @@ -1430,6 +1432,30 @@ def _fit( + ngamma ] = 0.15 + if ntrend_sh > 0: + lb[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ngamma + ] = -0.15 + ub[ + 2 + + self.ngamma0 + + nmu + + npsi + + ntrend_loc + + nind_loc + + ntrend_sc + + nind_sc + + ngamma + ] = 0.15 + if nind_sh > 0: lb[ 2 @@ -1440,7 +1466,8 @@ def _fit( + nind_loc + ntrend_sc + nind_sc - + ngamma : 2 + + ngamma + + ntrend_sh : 2 + self.ngamma0 + nmu + npsi @@ -1449,6 +1476,7 @@ def _fit( + ntrend_sc + nind_sc + ngamma + + ntrend_sh + nind_sh ] = -0.15 ub[ @@ -1460,7 +1488,8 @@ def _fit( + nind_loc + ntrend_sc + nind_sc - + ngamma : 2 + + ngamma + + ntrend_sh : 2 + self.ngamma0 + nmu + npsi @@ -1469,6 +1498,7 @@ def _fit( + ntrend_sc + nind_sc + ngamma + + ntrend_sh + nind_sh ] = 0.15 From 5e07be15cee106b09621465454f5364c73d4d28a Mon Sep 17 00:00:00 2001 From: Colladito Date: Mon, 13 Oct 2025 13:05:31 +0200 Subject: [PATCH 11/12] [VCF] Error fixing in Hessian and changes in the evolution plot the aggregated return periods --- bluemath_tk/distributions/nonstat_gev.py | 66 ++++++++++++------------ 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index e4832f9..3ac0888 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -1284,8 +1284,8 @@ def _fit( # Fitting options if options is None: options = { - "gtol": 1e-8, - "xtol": 1e-8, + "gtol": 1e-6, + "xtol": 1e-10, "barrier_tol": 1e-6, "maxiter": 1000, } @@ -3390,17 +3390,17 @@ def _auxmin_loglikelihood_hess( zn = z ** (-1 / epst) # Evaluate the loglikelihood function, not that the general and Gumbel expressions are used - f = -np.sum( - -np.log(self.kt[pos]) - + np.log(psit[pos]) - + (1 + 1 / epst[pos]) * np.log(z[pos]) - + self.kt[pos] * zn[pos] - ) - np.sum( - -np.log(self.kt[posG]) - + np.log(psit[posG]) - + xn[posG] - + self.kt[posG] * np.exp(-xn[posG]) - ) + # f = -np.sum( + # -np.log(self.kt[pos]) + # + np.log(psit[pos]) + # + (1 + 1 / epst[pos]) * np.log(z[pos]) + # + self.kt[pos] * zn[pos] + # ) - np.sum( + # -np.log(self.kt[posG]) + # + np.log(psit[posG]) + # + xn[posG] + # + self.kt[posG] * np.exp(-xn[posG]) + # ) ### Gradient of the loglikelihood # Derivatives given by equations (A.1)-(A.3) in the paper @@ -3829,8 +3829,8 @@ def _auxmin_loglikelihood_hess( + nind_sc, 2 + nmu + npsi + ntrend_loc + nind_loc + ntrend_sc + nind_sc, ] = np.sum(D2epst * self.t) - if ntrend_sh > 0 and ntrend_loc > 0: - # Sub-block number, betaT*gammaT + if ntrend_sh > 0 and ntrend_sc > 0: + # Sub-block number, alphaT*gammaT Hxx[ 2 + self.ngamma0 @@ -3842,9 +3842,9 @@ def _auxmin_loglikelihood_hess( + ntrend_sc + nind_sc, 2 + nmu + ntrend_loc + nind_loc + npsi, - ] = np.sum(Dmutepst * self.t**2) - if ntrend_sh > 0 and ntrend_sc > 0: - # Sub-block number, alphaT*gammaT + ] = np.sum(Dpsitepst * psit * self.t**2) + if ntrend_sh > 0 and ntrend_loc > 0: + # Sub-block number, betaT*gammaT Hxx[ 2 + self.ngamma0 @@ -3856,7 +3856,7 @@ def _auxmin_loglikelihood_hess( + ntrend_sc + nind_sc, 1 + nmu, - ] = np.sum(Dpsitepst * self.t**2 * psit) + ] = np.sum(Dmutepst * self.t**2) # Sub-block number 13, beta0*beta_cov_i if nind_loc > 0: for i in range(nind_loc): @@ -6949,14 +6949,14 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 ) month_positions_aux = [i/12 for i in range(13)] - ax1.plot( - month_positions_aux, rt_10, linestyle="-", linewidth=1, label="10 years", color="tab:red" + ax1.step( + month_positions_aux, rt_10, where="post", linestyle="-", linewidth=1, label="10 years", color="tab:red" ) - ax1.plot( - month_positions_aux, rt_50, linestyle="-", linewidth=1, label="50 years", color="tab:purple" + ax1.step( + month_positions_aux, rt_50, where="post", linestyle="-", linewidth=1, label="50 years", color="tab:purple" ) - ax1.plot( - month_positions_aux, rt_100, linestyle="-", linewidth=1, label="100 years", color="tab:green" + ax1.step( + month_positions_aux, rt_100, where="post", linestyle="-", linewidth=1, label="100 years", color="tab:green" ) ax1.set_title(f"Parameters Evolution ({self.var_name})") @@ -7047,18 +7047,18 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 for year in range(n_years): rt_100[year] = self._aggquantile(1-1/100, year, year+1) # 100-year return level at each year - ax1.plot( - np.arange(init_year,init_year+n_years), rt_10, linestyle="-", linewidth=1, label="10 years", color="tab:red" + ax1.step( + np.arange(init_year,init_year+n_years), rt_10, where="post", linestyle="-", linewidth=1, label="10 years", color="tab:red" ) - ax1.plot( - np.arange(init_year,init_year+n_years), rt_50, linestyle="-", linewidth=1, label="50 years", color="tab:purple" + ax1.step( + np.arange(init_year,init_year+n_years), rt_50, where="post", linestyle="-", linewidth=1, label="50 years", color="tab:purple" ) - ax1.plot( - np.arange(init_year, init_year+n_years), rt_100, linestyle="-", linewidth=1, label="100 years", color="tab:green" + ax1.step( + np.arange(init_year, init_year+n_years), rt_100, where="post", linestyle="-", linewidth=1, label="100 years", color="tab:green" ) ax1.set_xlabel("Time (years)") - ax1.set_ylabel(rf"$\mu_t(m)$, {self.var_name}") + ax1.set_ylabel(f"{self.var_name}") # ax1.set_title(f"Evolution of location and scale parameters ({self.var_name})") ax1.set_title(f"Evolution of parameters ({self.var_name})") ax1.grid(True) @@ -8704,7 +8704,7 @@ def _aggquantile( ) a = media - 10 - b = media + 10 + b = media + 20 for il in range(m): # function of z whose root we want From 7e9f064586d3295dcfb877bc8cf44c2fb1000849 Mon Sep 17 00:00:00 2001 From: Colladito Date: Mon, 13 Oct 2025 15:46:35 +0200 Subject: [PATCH 12/12] [VCF] Format nonstat_gev.py --- bluemath_tk/distributions/nonstat_gev.py | 266 ++++++++++++++++------- 1 file changed, 182 insertions(+), 84 deletions(-) diff --git a/bluemath_tk/distributions/nonstat_gev.py b/bluemath_tk/distributions/nonstat_gev.py index 3ac0888..a1484bc 100644 --- a/bluemath_tk/distributions/nonstat_gev.py +++ b/bluemath_tk/distributions/nonstat_gev.py @@ -4,14 +4,13 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd +from numba import njit, prange from scipy.integrate import quad from scipy.optimize import minimize, root_scalar -from scipy.stats import norm, genextreme +from scipy.stats import genextreme, norm from ..core.models import BlueMathModel from ..core.plotting.colors import default_colors -from numba import njit, prange - # @njit(fastmath=True) # def search(times: np.ndarray, values: np.ndarray, xs) -> np.ndarray: @@ -106,7 +105,7 @@ def __init__( """ super().__init__() - debug=1 + debug = 1 self.set_logger_name( name=self.__class__.__name__, level="DEBUG" if debug else "INFO" ) @@ -1488,7 +1487,7 @@ def _fit( + nind_loc + ntrend_sc + nind_sc - + ngamma + + ngamma + ntrend_sh : 2 + self.ngamma0 + nmu @@ -2888,6 +2887,7 @@ def _auxmin_loglikelihood_grad( Jx = -Jx return Jx + def _auxmin_loglikelihood_hess( self, x, @@ -3571,22 +3571,19 @@ def _auxmin_loglikelihood_hess( + 2 + (-2 - epst * (3 + epst) * xn) * z ** (1 / epst) ) - + (z / (epst ** 2)) + + (z / (epst**2)) * np.log(z) * ( 2 * epst * (-xn * (1 + epst) - 1 + z ** (1 + 1 / epst)) + z * np.log(z) ) ) - / ((epst * z)**2) + / ((epst * z) ** 2) ) Dmutpsit = -(1 + epst - (1 - xn) * zn) / ((z * psit) ** 2) Dmutepst = ( -zn - * ( - epst * (-(1 + epst) * xn - epst * (1 - xn) / zn) - + z * np.log(z) - ) + * (epst * (-(1 + epst) * xn - epst * (1 - xn) / zn) + z * np.log(z)) / (psit * epst**2 * z**2) ) Dpsitepst = xn * Dmutepst @@ -4837,7 +4834,6 @@ def fit( self.invI0 = np.linalg.inv(-Hxx) fit_result["invI0"] = self.invI0 - std_params = np.sqrt(np.diag(self.invI0)) self.std_params = std_params fit_result["std_param"] = std_params @@ -5235,22 +5231,19 @@ def _loglikelihood( + 2 + (-2 - epst * (3 + epst) * xn) * z ** (1 / epst) ) - + (z / (epst ** 2)) + + (z / (epst**2)) * np.log(z) * ( 2 * epst * (-xn * (1 + epst) - 1 + z ** (1 + 1 / epst)) + z * np.log(z) ) ) - / ((epst * z)**2) + / ((epst * z) ** 2) ) Dmutpsit = -(1 + epst - (1 - xn) * zn) / ((z * psit) ** 2) Dmutepst = ( -zn - * ( - epst * (-(1 + epst) * xn - epst * (1 - xn) / zn) - + z * np.log(z) - ) + * (epst * (-(1 + epst) * xn - epst * (1 - xn) / zn) + z * np.log(z)) / (psit * epst**2 * z**2) ) Dpsitepst = xn * Dmutepst @@ -6400,7 +6393,7 @@ def _update_params(self, **kwargs: dict) -> None: self.gamma_cov = kwargs.get("gamma_cov", np.empty(0)) self.xopt = kwargs.get("x", None) - + def _parametro( self, beta0: Optional[float] = None, @@ -6411,8 +6404,7 @@ def _parametro( indicesint: Optional[np.ndarray] = None, times: Optional[np.ndarray] = None, x: Optional[np.ndarray] = None, - ) -> np.ndarray: - + ) -> np.ndarray: if beta is None: beta = np.empty(0) if betaT is None or betaT.size == 0: @@ -6433,7 +6425,9 @@ def _parametro( else: x = self.t - return self.parametro(beta0, beta, betaT, beta_cov, covariates, indicesint, times,x,ntend) + return self.parametro( + beta0, beta, betaT, beta_cov, covariates, indicesint, times, x, ntend + ) @staticmethod @njit(fastmath=True) @@ -6448,7 +6442,7 @@ def parametro( x: Optional[np.ndarray] = None, ntend: Optional[int] = None, ) -> np.ndarray: - """ This function computes the location, scale and shape parameters for given parameters. Expressions by (2)-(3) in the paper + """This function computes the location, scale and shape parameters for given parameters. Expressions by (2)-(3) in the paper Parameters ---------- @@ -6474,7 +6468,7 @@ def parametro( y : np.ndarray Values of the parameter """ - + m = len(x) na, nind = covariates.shape @@ -6515,14 +6509,14 @@ def parametro( # times, covariates[:, i], x.flatten() # ) # y = y + beta_cov[i] * indicesintaux - idx = np.searchsorted(times, x, side='right') + idx = np.searchsorted(times, x, side="right") valid = idx < times.size y_add = np.zeros_like(x, dtype=np.float64) if np.any(valid): # pick rows from covariates and do one matvec - A = covariates[idx[valid], :] # (k, nind) - y_add[valid] = A @ beta_cov # (k,) + A = covariates[idx[valid], :] # (k, nind) + y_add[valid] = A @ beta_cov # (k,) y = y + y_add else: @@ -6530,9 +6524,8 @@ def parametro( # y = y + beta_cov[i] * covariates[:, i] y = y + covariates @ beta_cov - return y - + def _evaluate_params( self, beta0: Optional[float] = None, @@ -6870,19 +6863,38 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 fig, ax1 = plt.subplots(figsize=(10, 6)) ############# - month_initials = ["Jul", "Aug", "Sep", "Oct", "Nov", "Dec", "Jan", "Feb", "Mar", "Apr", "May", "Jun"] + month_initials = [ + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + ] month_positions = [i / 12 for i in range(12)] # Reorder the return periods to start from July - rt_10 = np.zeros(13) - rt_50 = np.zeros(13) + rt_10 = np.zeros(13) + rt_50 = np.zeros(13) rt_100 = np.zeros(13) for i in range(12): # Map i to the correct month index (July = 0, June = 11) month_idx = (i + 6) % 12 - rt_10[i] = self._aggquantile(1-1/10, month_idx/12, (month_idx+1)/12) - rt_50[i] = self._aggquantile(1-1/50, month_idx/12, (month_idx+1)/12) - rt_100[i] = self._aggquantile(1-1/100, month_idx/12, (month_idx+1)/12) + rt_10[i] = self._aggquantile( + 1 - 1 / 10, month_idx / 12, (month_idx + 1) / 12 + ) + rt_50[i] = self._aggquantile( + 1 - 1 / 50, month_idx / 12, (month_idx + 1) / 12 + ) + rt_100[i] = self._aggquantile( + 1 - 1 / 100, month_idx / 12, (month_idx + 1) / 12 + ) rt_10[12] = rt_10[0] rt_50[12] = rt_50[0] @@ -6890,32 +6902,44 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 # For the data points, shift the time values by 0.5 years t_shifted = t_anual.copy() - t_shifted[t_anual < 0.5] += 0.5 + t_shifted[t_anual < 0.5] += 0.5 t_shifted[t_anual >= 0.5] -= 0.5 t_ord = np.argsort(t_shifted) - + ### Added by Tomas Carlotto # Creating variables to store the evolution of pdf and sf over time - var_grid_resolution = 50 + var_grid_resolution = 50 # t_anual_ord = t_anual[t_ord] - mu_t = mut[t_ord] + mu_t = mut[t_ord] psi_t = psit[t_ord] - xi_t = epst[t_ord] + xi_t = epst[t_ord] # Definition of the Hs value grid - lim_max = np.max(self.xt)+1 + lim_max = np.max(self.xt) + 1 lim_min = np.min(self.xt) - hvar = np.linspace(lim_min, lim_max, var_grid_resolution) + hvar = np.linspace(lim_min, lim_max, var_grid_resolution) t_grid, x_grid = np.meshgrid(t_shifted[t_ord], hvar) # Calculating the 1-CDF for each grid point (Exceedance Probabilities) - sf = np.array([genextreme.sf(x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i]) for i in range(len(t_shifted[t_ord]))]).T + sf = np.array( + [ + genextreme.sf(x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i]) + for i in range(len(t_shifted[t_ord])) + ] + ).T # Calculating the Probability Density Function (pdf) for each grid point - pdf = np.array([genextreme.pdf(x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i]) for i in range(len(t_shifted[t_ord]))]).T + pdf = np.array( + [ + genextreme.pdf( + x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i] + ) + for i in range(len(t_shifted[t_ord])) + ] + ).T - cf = ax1.contourf(t_shifted[t_ord], hvar, sf, levels=50, cmap="viridis_r") - cbar = fig.colorbar(cf, ax=ax1) + cf = ax1.contourf(t_shifted[t_ord], hvar, sf, levels=50, cmap="viridis_r") + cbar = fig.colorbar(cf, ax=ax1) cbar.set_label("Exceedance probability", fontsize=12) - #=============== + # =============== # Use t_shifted for plotting data points ax1.plot( @@ -6926,7 +6950,7 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 color="black", markersize=5, label="Data", - alpha=0.9 + alpha=0.9, ) # Use t_shifted for other lines as well @@ -6948,15 +6972,33 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 alpha=0.3, ) - month_positions_aux = [i/12 for i in range(13)] + month_positions_aux = [i / 12 for i in range(13)] ax1.step( - month_positions_aux, rt_10, where="post", linestyle="-", linewidth=1, label="10 years", color="tab:red" + month_positions_aux, + rt_10, + where="post", + linestyle="-", + linewidth=1, + label="10 years", + color="tab:red", ) ax1.step( - month_positions_aux, rt_50, where="post", linestyle="-", linewidth=1, label="50 years", color="tab:purple" + month_positions_aux, + rt_50, + where="post", + linestyle="-", + linewidth=1, + label="50 years", + color="tab:purple", ) ax1.step( - month_positions_aux, rt_100, where="post", linestyle="-", linewidth=1, label="100 years", color="tab:green" + month_positions_aux, + rt_100, + where="post", + linestyle="-", + linewidth=1, + label="100 years", + color="tab:green", ) ax1.set_title(f"Parameters Evolution ({self.var_name})") @@ -6964,7 +7006,7 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 ax1.set_ylabel(f"{self.var_name}") ax1.grid(True) ax1.legend(loc="best") - ax1.set_xlim(0,1) + ax1.set_xlim(0, 1) ax1.set_xticks(month_positions, month_initials, rotation=45) if save: plt.savefig( @@ -6973,9 +7015,6 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 ) plt.show() - - - else: fig, ax1 = plt.subplots(figsize=(20, 6)) ax1.plot( @@ -7039,22 +7078,46 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 n_years = int(np.ceil(self.t[-1])) rt_10 = np.zeros(n_years) for year in range(n_years): - rt_10[year] = self._aggquantile(1-1/10, year, year+1) # 10-year return level at each year + rt_10[year] = self._aggquantile( + 1 - 1 / 10, year, year + 1 + ) # 10-year return level at each year rt_50 = np.zeros(n_years) for year in range(n_years): - rt_50[year] = self._aggquantile(1-1/50, year, year+1) # 50-year return level at each year + rt_50[year] = self._aggquantile( + 1 - 1 / 50, year, year + 1 + ) # 50-year return level at each year rt_100 = np.zeros(n_years) for year in range(n_years): - rt_100[year] = self._aggquantile(1-1/100, year, year+1) # 100-year return level at each year + rt_100[year] = self._aggquantile( + 1 - 1 / 100, year, year + 1 + ) # 100-year return level at each year ax1.step( - np.arange(init_year,init_year+n_years), rt_10, where="post", linestyle="-", linewidth=1, label="10 years", color="tab:red" + np.arange(init_year, init_year + n_years), + rt_10, + where="post", + linestyle="-", + linewidth=1, + label="10 years", + color="tab:red", ) ax1.step( - np.arange(init_year,init_year+n_years), rt_50, where="post", linestyle="-", linewidth=1, label="50 years", color="tab:purple" + np.arange(init_year, init_year + n_years), + rt_50, + where="post", + linestyle="-", + linewidth=1, + label="50 years", + color="tab:purple", ) ax1.step( - np.arange(init_year, init_year+n_years), rt_100, where="post", linestyle="-", linewidth=1, label="100 years", color="tab:green" + np.arange(init_year, init_year + n_years), + rt_100, + where="post", + linestyle="-", + linewidth=1, + label="100 years", + color="tab:green", ) ax1.set_xlabel("Time (years)") @@ -7073,9 +7136,21 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 plt.savefig(f"Figures/Adjustment_Evolution_{self.var_name}.png", dpi=300) plt.show() - ###### 1st Year PLOT - month_initials = ["Jul", "Aug", "Sep", "Oct", "Nov", "Dec", "Jan", "Feb", "Mar", "Apr", "May", "Jun"] + month_initials = [ + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + ] month_positions = [i / 12 for i in range(12)] #### Creating the first year plot @@ -7164,7 +7239,7 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 # alpha=0.3, # label="Location +- scale", # ) - + # # ax1.plot( # # self.t[mask_month], # # rt_10[mask_month], @@ -7397,19 +7472,26 @@ def plot(self, return_plot: bool = False, save: bool = False, init_year: int = 0 and self.nind_sc == 0 and self.nind_sh == 0 ): - mu_t = mut[t_ord] + mu_t = mut[t_ord] psi_t = psit[t_ord] - xi_t = epst[t_ord] + xi_t = epst[t_ord] # Definition of the Hs value grid - lim_max = np.max(self.xt)+1 + lim_max = np.max(self.xt) + 1 lim_min = np.min(self.xt) - hvar = np.linspace(lim_min, lim_max, var_grid_resolution) + hvar = np.linspace(lim_min, lim_max, var_grid_resolution) t_grid, x_grid = np.meshgrid(t_shifted[t_ord], hvar) # Calculating the Probability Density Function (pdf) for each grid point - pdf = np.array([genextreme.pdf(x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i]) for i in range(len(t_shifted[t_ord]))]).T + pdf = np.array( + [ + genextreme.pdf( + x_grid[:, i], c=-xi_t[i], loc=mu_t[i], scale=psi_t[i] + ) + for i in range(len(t_shifted[t_ord])) + ] + ).T fig = plt.figure(figsize=(15, 6)) - ax = fig.add_subplot(projection='3d') + ax = fig.add_subplot(projection="3d") ax.plot_surface(t_grid, x_grid, pdf, cmap="viridis_r") ax.set_zlim(0, 1) ax.set_xlabel("Time (Months)") @@ -8434,7 +8516,9 @@ def returnperiod_plot( self.logger.debug("ReturnPeriodPlot: Annual return period.") for j in range(nts): quanaggrA[j] = self._aggquantile(1 - 1 / Ts[j], 0, 1)[0] - self.logger.debug(f"ReturnPeriodPlot: Annual return period {j} finished.") + self.logger.debug( + f"ReturnPeriodPlot: Annual return period {j} finished." + ) # Confidence intervals if conf_int: stdup = np.zeros(nts) @@ -8507,10 +8591,15 @@ def returnperiod_plot( # Create mask for each year's data year_masks = [(self.t >= j) & (self.t < j + 1) for j in range(ny)] # Calculate max values using masks and broadcasting - hmax1 = np.array([np.max(self.xt[mask]) if np.any(mask) else np.nan for mask in year_masks]) + hmax1 = np.array( + [ + np.max(self.xt[mask]) if np.any(mask) else np.nan + for mask in year_masks + ] + ) # Remove any NaN values if present hmax1 = hmax1[~np.isnan(hmax1)] - + hmaxsort = np.sort(hmax1) ProHsmaxsort = np.arange(1, len(hmaxsort) + 1) / (len(hmaxsort) + 1) Tapprox = 1 / (1 - ProHsmaxsort) @@ -8735,11 +8824,12 @@ def F(z): ), float(t0[il]), float(t1[il]), - epsabs=1e-5, epsrel=1e-5 + epsabs=1e-5, + epsrel=1e-5, ) self.logger.debug("Fin quad()") return integ + np.log(q[il]) / 12.0 - + try: # sol = root_scalar( # F, @@ -8750,14 +8840,14 @@ def F(z): # rtol=1e-6, # maxiter=200, # ) - + sol = root_scalar( F, bracket=(a, b), - method="toms748", - xtol=1e-6, - rtol=1e-6, - maxiter=100 + method="toms748", + xtol=1e-6, + rtol=1e-6, + maxiter=100, ) if sol.converged: if abs(F(sol.root)) < 1e-2: @@ -9229,7 +9319,11 @@ def format_line(name, value, std_err): for i in range(self.nind_loc): print( - format_line(f"{self.covariates.columns[self.list_loc[i]]}", self.beta_cov[i], std_params[param_idx]) + format_line( + f"{self.covariates.columns[self.list_loc[i]]}", + self.beta_cov[i], + std_params[param_idx], + ) ) param_idx += 1 @@ -9262,7 +9356,9 @@ def format_line(name, value, std_err): for i in range(self.nind_sc): print( format_line( - f"{self.covariates.columns[self.list_sc[i]]}", self.alpha_cov[i], std_params[param_idx] + f"{self.covariates.columns[self.list_sc[i]]}", + self.alpha_cov[i], + std_params[param_idx], ) ) param_idx += 1 @@ -9297,7 +9393,9 @@ def format_line(name, value, std_err): for i in range(self.nind_sh): print( format_line( - f"{self.covariates.columns[self.list_sh[i]]}", self.gamma_cov[i], std_params[param_idx] + f"{self.covariates.columns[self.list_sh[i]]}", + self.gamma_cov[i], + std_params[param_idx], ) ) param_idx += 1