From 45a84d239c8a40a7f61e696c1eaf9beb182bc234 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:03:28 +0200 Subject: [PATCH 01/25] ruff rules --- pyproject.toml | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 2a62fe5..cde78c4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -92,3 +92,56 @@ markers = [ # Helpful for pytest-debugging (leave commented out on commit): # log_cli=true # log_level=DEBUG + +# ----- Dev Tools Configuration ----- # + +[tool.ruff] +exclude = [ + ".eggs", + ".git", + ".mypy_cache", + ".venv", + "_build", + "build", + "dist", +] + +# Assume Python 3.10+ +target-version = "py310" + +line-length = 100 +indent-width = 4 + +[tool.ruff.lint] +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" +ignore = [ + "E501", # line too long + "FBT001", # boolean-type-hint-positional-argument + "FBT002", # boolean-default-value-positional-argument + "PT019", # pytest-fixture-param-without-value (but suggested solution fails) +] +extend-select = [ + "F", # Pyflakes rules + "W", # PyCodeStyle warnings + "E", # PyCodeStyle errors + "I", # Sort imports properly + "A", # Detect shadowed builtins + "N", # enforce naming conventions, e.g. ClassName vs function_name + "UP", # Warn if certain things can changed due to newer Python versions + "C4", # Catch incorrect use of comprehensions, dict, list, etc + "FA", # Enforce from __future__ import annotations + "FBT", # detect boolean traps + "ISC", # Good use of string concatenation + "BLE", # disallow catch-all exceptions + "ICN", # Use common import conventions + "RET", # Good return practices + "SIM", # Common simplification rules + "TID", # Some good import practices + "TC", # Enforce importing certain types in a TYPE_CHECKING block + "PTH", # Use pathlib instead of os.path + "NPY", # Some numpy-specific things +] +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] From bff289681b40b24a72b539b6e7e10affe1d90568 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:06:10 +0200 Subject: [PATCH 02/25] formatting --- doc/conf.py | 2 +- pylhc/__init__.py | 1 + pylhc/bpm_calibration.py | 15 +- pylhc/bsrt_analysis.py | 11 +- pylhc/bsrt_logger.py | 3 +- pylhc/calibration/beta.py | 21 +- pylhc/calibration/dispersion.py | 34 +-- pylhc/constants/calibration.py | 234 ++++++++++--------- pylhc/constants/forced_da_analysis.py | 1 + pylhc/constants/general.py | 3 +- pylhc/constants/kickgroups.py | 25 ++- pylhc/constants/machine_settings_info.py | 1 + pylhc/data_extract/lsa.py | 75 +++++-- pylhc/data_extract/timber.py | 1 + pylhc/forced_da_analysis.py | 44 ++-- pylhc/kickgroups.py | 39 ++-- pylhc/lsa_to_madx.py | 56 +++-- pylhc/machine_settings_info.py | 273 +++++++++++++---------- tests/unit/test_bpm_calibration.py | 141 ++++++------ tests/unit/test_bsrt_analysis.py | 4 +- tests/unit/test_forced_da_analysis.py | 2 +- tests/unit/test_lsa_to_madx.py | 23 +- 22 files changed, 586 insertions(+), 423 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 30a55b9..2bb2e4f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -97,7 +97,7 @@ def about_package(init_posixpath: pathlib.Path) -> dict: # Override link in 'Edit on Github' rst_prolog = f""" -:github_url: {ABOUT_PYLHC['__url__']} +:github_url: {ABOUT_PYLHC["__url__"]} """ # The version info for the project you're documenting, acts as replacement for diff --git a/pylhc/__init__.py b/pylhc/__init__.py index 8c76dc1..bc95d96 100644 --- a/pylhc/__init__.py +++ b/pylhc/__init__.py @@ -7,6 +7,7 @@ :copyright: pyLHC/OMC-Team working group. :license: MIT, see the LICENSE.md file for details. """ + __title__ = "pylhc" __description__ = "An accelerator physics script collection for the OMC team at CERN." __url__ = "https://github.com/pylhc/pylhc" diff --git a/pylhc/bpm_calibration.py b/pylhc/bpm_calibration.py index 8e98fb4..cafbd85 100644 --- a/pylhc/bpm_calibration.py +++ b/pylhc/bpm_calibration.py @@ -51,6 +51,7 @@ default: ``beta`` """ + from pathlib import Path import tfs @@ -74,11 +75,7 @@ def _get_params() -> dict: """ return EntryPointParameters( - inputdir=dict( - type=Path, - required=True, - help="Measurements path." - ), + inputdir=dict(type=Path, required=True, help="Measurements path."), outputdir=dict( type=Path, required=True, @@ -120,9 +117,11 @@ def main(opt): # Write the TFS file to the desired output directory opt.outputdir.mkdir(parents=True, exist_ok=True) for plane in factors.keys(): - tfs.write(opt.outputdir / f"{CALIBRATION_NAME[opt.method]}{plane.lower()}{EXT}", - factors[plane].reset_index(), - save_index=False) + tfs.write( + opt.outputdir / f"{CALIBRATION_NAME[opt.method]}{plane.lower()}{EXT}", + factors[plane].reset_index(), + save_index=False, + ) return factors diff --git a/pylhc/bsrt_analysis.py b/pylhc/bsrt_analysis.py index f2e73eb..313f75b 100644 --- a/pylhc/bsrt_analysis.py +++ b/pylhc/bsrt_analysis.py @@ -11,6 +11,7 @@ - If provided a `TfsDataFrame` file with timestamps, plots of the 2D distribution and comparison of fit parameters to cross sections are added. """ + import datetime import glob import gzip @@ -224,7 +225,6 @@ def _add_kick_lines(ax, df): def _fit_var(ax, bsrt_df, plot_dict, opt): - ax[plot_dict["idx"]].plot( bsrt_df.index, [entry[plot_dict["fitidx"]] for entry in bsrt_df["lastFitResults"]] ) @@ -234,7 +234,6 @@ def _fit_var(ax, bsrt_df, plot_dict, opt): def plot_fit_variables(opt, bsrt_df): - fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(20, 9), sharex=True, constrained_layout=True) plot_dicts = [ @@ -292,8 +291,8 @@ def _full_crossection(ax, bsrt_df, plot_dict, opt): ax, bsrt_df.reset_index(), "TimeIndex", - f'projPositionSet{plot_dict["idx"]}', - f'projDataSet{plot_dict["idx"]}', + f"projPositionSet{plot_dict['idx']}", + f"projDataSet{plot_dict['idx']}", ) ax.plot( bsrt_df.index, @@ -326,7 +325,6 @@ def _full_crossection(ax, bsrt_df, plot_dict, opt): def plot_full_crosssection(opt, bsrt_df): - plot_dicts = [ {"idx": 1, "fitresult": 3, "fiterror": 4, "title": "Horizontal Cross section"}, {"idx": 2, "fitresult": 8, "fiterror": 9, "title": "Vertical Cross section"}, @@ -344,7 +342,7 @@ def plot_full_crosssection(opt, bsrt_df): def _gauss(x, *p): a, b, c = p - return a * np.exp(-((x - b) ** 2) / (2.0 * c ** 2.0)) + return a * np.exp(-((x - b) ** 2) / (2.0 * c**2.0)) def _reshaped_imageset(df): @@ -408,7 +406,6 @@ def plot_crosssection_for_timesteps(opt, bsrt_df): def _aux_variables(ax, bsrt_df, plot_dict, opt): - ax.plot( bsrt_df.index, bsrt_df[plot_dict["variable1"]], color="red", label=plot_dict["variable1"] ) diff --git a/pylhc/bsrt_logger.py b/pylhc/bsrt_logger.py index 32ab95f..8884470 100644 --- a/pylhc/bsrt_logger.py +++ b/pylhc/bsrt_logger.py @@ -9,6 +9,7 @@ Original authors: E. H. Maclean, T. Persson and G. Trad. """ + import datetime as dt import os import pickle @@ -60,7 +61,7 @@ def convert_to_data_output_format(dtobject): ########################################## -if __name__ == '__main__': +if __name__ == "__main__": # Create a PyJapc instance with selector SCT.USER.ALL # INCA is automatically configured based on the timing domain you specify here diff --git a/pylhc/calibration/beta.py b/pylhc/calibration/beta.py index 427760d..6897a4d 100644 --- a/pylhc/calibration/beta.py +++ b/pylhc/calibration/beta.py @@ -7,6 +7,7 @@ to be used with the script `bpm_calibration.py`. """ + from pathlib import Path from typing import Dict, Sequence, Tuple @@ -50,9 +51,9 @@ def err_function(x, popt, pcov): sa, sb, sab = pcov[0, 0], pcov[1, 1], pcov[0, 1] a, b = popt[0], popt[1] - beta_err = ((a ** 2 - (x - b) ** 2) / a ** 2) ** 2 * sa + beta_err = ((a**2 - (x - b) ** 2) / a**2) ** 2 * sa beta_err += 4 * ((x - b) / a) ** 2 * sb - beta_err -= 4 * (x - b) * (a ** 2 - (x - b) ** 2) / a ** 3 * sab + beta_err -= 4 * (x - b) * (a**2 - (x - b) ** 2) / a**3 * sab return beta_err positions = beta_phase_tfs.reindex(bpms)[f"{S}"] @@ -118,8 +119,8 @@ def _get_factors_from_phase( factors = np.sqrt(beta_phase / beta_amp) # Now compute the errors - calibration_error = (beta_phase_err ** 2) / (4 * beta_amp * beta_phase) - calibration_error += (beta_phase * (beta_amp_err ** 2)) / (4 * (beta_amp ** 3)) + calibration_error = (beta_phase_err**2) / (4 * beta_amp * beta_phase) + calibration_error += (beta_phase * (beta_amp_err**2)) / (4 * (beta_amp**3)) calibration_error = np.sqrt(calibration_error) return pd.DataFrame({LABELS[1]: factors, LABELS[2]: calibration_error}) @@ -183,7 +184,9 @@ def _get_factors_from_phase_fit( return calibration_phase_fit -def get_calibration_factors_from_beta(ips: Sequence[int], input_path: Path) -> Dict[str, pd.DataFrame]: +def get_calibration_factors_from_beta( + ips: Sequence[int], input_path: Path +) -> Dict[str, pd.DataFrame]: """ This function is the main function to compute the calibration factors for the beta method. @@ -220,14 +223,18 @@ def get_calibration_factors_from_beta(ips: Sequence[int], input_path: Path) -> D # Load the tfs files for beta from phase and beta from amp beta_phase_tfs = tfs.read(input_path / f"{BETA_NAME}{plane.lower()}{EXT}", index=TFS_INDEX) - beta_amp_tfs = tfs.read(input_path / f"{AMP_BETA_NAME}{plane.lower()}{EXT}", index=TFS_INDEX) + beta_amp_tfs = tfs.read( + input_path / f"{AMP_BETA_NAME}{plane.lower()}{EXT}", index=TFS_INDEX + ) # Get the calibration factors from phase calibration_phase = _get_factors_from_phase(beta_phase_tfs, beta_amp_tfs, plane) # Calibration from phase fit can only be obtained via ballistic optics if ips is not None: - calibration_phase_fit = _get_factors_from_phase_fit(beta_phase_tfs, beta_amp_tfs, ips, plane) + calibration_phase_fit = _get_factors_from_phase_fit( + beta_phase_tfs, beta_amp_tfs, ips, plane + ) else: calibration_phase_fit = pd.DataFrame(columns=(LABELS[3], LABELS[4])) diff --git a/pylhc/calibration/dispersion.py b/pylhc/calibration/dispersion.py index b641dd8..925b1c5 100644 --- a/pylhc/calibration/dispersion.py +++ b/pylhc/calibration/dispersion.py @@ -7,6 +7,7 @@ intended to be used with the script `bpm_calibration.py`. """ + from pathlib import Path from scipy.optimize import curve_fit import numpy as np @@ -68,19 +69,17 @@ def dispersion_function(x, a, b): # Get the fitted beta and add the errors to get min/max values dispersion_fit = dispersion_function(positions, fit[0], fit[1]) - dispersion_max_fit = dispersion_function( - positions, fit[0] + fit_err[0], fit[1] + fit_err[1] - ) - dispersion_min_fit = dispersion_function( - positions, fit[0] - fit_err[0], fit[1] - fit_err[1] - ) + dispersion_max_fit = dispersion_function(positions, fit[0] + fit_err[0], fit[1] + fit_err[1]) + dispersion_min_fit = dispersion_function(positions, fit[0] - fit_err[0], fit[1] - fit_err[1]) dispersion_fit_err = (dispersion_max_fit - dispersion_min_fit) / 2 return dispersion_fit, dispersion_fit_err def _get_factors_from_dispersion( - dispersion: Dict[str, pd.Series], phase: str, phase_err: str, + dispersion: Dict[str, pd.Series], + phase: str, + phase_err: str, ) -> Tuple[pd.Series, pd.Series]: """ This function computes the calibration factors for the dispersion method @@ -148,9 +147,7 @@ def get_calibration_factors_from_dispersion( """ LOG.info("Computing the calibration factors via dispersion") # Load the normalized dispersion tfs file - norm_dispersion_tfs = tfs.read( - input_path / f"{NORM_DISP_NAME}x{EXT}", index=TFS_INDEX - ) + norm_dispersion_tfs = tfs.read(input_path / f"{NORM_DISP_NAME}x{EXT}", index=TFS_INDEX) dispersion_tfs = tfs.read(input_path / f"{DISPERSION_NAME}x{EXT}", index=TFS_INDEX) # Get the beam concerned by those tfs files @@ -163,14 +160,16 @@ def get_calibration_factors_from_dispersion( # Filter our TFS files to only keep the BPMs for the selected IR bpms = dispersion_tfs.reindex(BPMS[ip][beam]) d_bpms = dispersion_tfs.reindex(D_BPMS[ip][beam]) - + # Check for possible missing bpms for bpm_set in [bpms, d_bpms]: missing = set(bpm_set.loc[bpm_set.isnull().values].index) if missing: - LOG.warning(" One or several BPMs are missing in the input" - " DataFrame, the calibration factors calculation" - f"from fit may not be accurate: {missing}") + LOG.warning( + " One or several BPMs are missing in the input" + " DataFrame, the calibration factors calculation" + f"from fit may not be accurate: {missing}" + ) # Get the positions of the BPMs and the subset used for the fit bpms = bpms.index @@ -190,7 +189,8 @@ def get_calibration_factors_from_dispersion( # Compute the calibration factors using the dispersion from phase and amp calibration, calibration_err = _get_factors_from_dispersion( - dispersion, "phase", "phase_err") + dispersion, "phase", "phase_err" + ) # Fit the dispersion from phase dispersion["phase_fit"], dispersion["phase_fit_err"] = _get_dispersion_fit( @@ -219,6 +219,8 @@ def get_calibration_factors_from_dispersion( if "X" not in calibration_factors.keys(): calibration_factors = {"X": factors_for_ip} else: - calibration_factors["X"] = pd.concat([calibration_factors["X"], factors_for_ip], axis="index") + calibration_factors["X"] = pd.concat( + [calibration_factors["X"], factors_for_ip], axis="index" + ) return calibration_factors diff --git a/pylhc/constants/calibration.py b/pylhc/constants/calibration.py index ca075a4..996eb58 100644 --- a/pylhc/constants/calibration.py +++ b/pylhc/constants/calibration.py @@ -9,125 +9,141 @@ IPS = [1, 4, 5] # Constants for TFS files -LABELS = ['S', - 'CALIBRATION', - 'ERROR_CALIBRATION', - 'CALIBRATION_FIT', - 'ERROR_CALIBRATION_FIT'] -TFS_INDEX = 'NAME' -D = 'D' -ND = 'ND' +LABELS = ["S", "CALIBRATION", "ERROR_CALIBRATION", "CALIBRATION_FIT", "ERROR_CALIBRATION_FIT"] +TFS_INDEX = "NAME" +D = "D" +ND = "ND" # Estimation for the curve fit BETA_STAR_ESTIMATION = 200 # Methods to be used to compulte the calibration factors -METHODS = ('beta', 'dispersion') +METHODS = ("beta", "dispersion") # File name prefix for calibration output # end result example: {'beta': 'calibration_beta_.tfs', 'dispersion' ... } -CALIBRATION_NAME = {m: f'calibration_{m}_' for m in METHODS} +CALIBRATION_NAME = {m: f"calibration_{m}_" for m in METHODS} # Define BPMs to be used for a combination of IP and Beam -BPMS = {1: {1: ['BPMR.5L1.B1', - 'BPMYA.4L1.B1', - 'BPMWB.4L1.B1', - 'BPMSY.4L1.B1', - 'BPMS.2L1.B1', - 'BPMSW.1L1.B1', - 'BPMSW.1R1.B1', - 'BPMS.2R1.B1', - 'BPMSY.4R1.B1', - 'BPMWB.4R1.B1', - 'BPMYA.4R1.B1'], - 2: ['BPM.5L1.B2', - 'BPMYA.4L1.B2', - 'BPMWB.4L1.B2', - 'BPMSY.4L1.B2', - 'BPMS.2L1.B2', - 'BPMSW.1L1.B2', - 'BPMSW.1R1.B2', - 'BPMS.2R1.B2', - 'BPMSY.4R1.B2', - 'BPMWB.4R1.B2', - 'BPMYA.4R1.B2'] - }, - 4: {1: [ - 'BPMYA.5L4.B1', - 'BPMWI.A5L4.B1', - 'BPMWA.B5L4.B1', - 'BPMWA.A5L4.B1', - 'BPMWA.A5R4.B1', - 'BPMWA.B5R4.B1', - 'BPMYB.5R4.B1', - 'BPMYA.6R4.B1', - ], - 2: [ - 'BPMYB.5L4.B2', - 'BPMWA.B5L4.B2', - 'BPMWA.A5L4.B2', - 'BPMWA.A5R4.B2', - 'BPMWA.B5R4.B2', - 'BPMWI.A5R4.B2', - 'BPMYA.5R4.B2', - 'BPMYB.6R4.B2' - ] - }, - 5: {1: ['BPMYA.4L5.B1', - 'BPMWB.4L5.B1', - 'BPMSY.4L5.B1', - 'BPMS.2L5.B1', - 'BPMSW.1L5.B1', - 'BPMSW.1R5.B1', - 'BPMS.2R5.B1', - 'BPMSY.4R5.B1', - 'BPMWB.4R5.B1', - 'BPMYA.4R5.B1', - 'BPM.5R5.B1'], - 2: ['BPMYA.4L5.B2', - 'BPMWB.4L5.B2', - 'BPMSY.4L5.B2', - 'BPMS.2L5.B2', - 'BPMSW.1L5.B2', - 'BPMSW.1R5.B2', - 'BPMS.2R5.B2', - 'BPMSY.4R5.B2', - 'BPMWB.4R5.B2', - 'BPMYA.4R5.B2', - 'BPMR.5R5.B2'] - } - } +BPMS = { + 1: { + 1: [ + "BPMR.5L1.B1", + "BPMYA.4L1.B1", + "BPMWB.4L1.B1", + "BPMSY.4L1.B1", + "BPMS.2L1.B1", + "BPMSW.1L1.B1", + "BPMSW.1R1.B1", + "BPMS.2R1.B1", + "BPMSY.4R1.B1", + "BPMWB.4R1.B1", + "BPMYA.4R1.B1", + ], + 2: [ + "BPM.5L1.B2", + "BPMYA.4L1.B2", + "BPMWB.4L1.B2", + "BPMSY.4L1.B2", + "BPMS.2L1.B2", + "BPMSW.1L1.B2", + "BPMSW.1R1.B2", + "BPMS.2R1.B2", + "BPMSY.4R1.B2", + "BPMWB.4R1.B2", + "BPMYA.4R1.B2", + ], + }, + 4: { + 1: [ + "BPMYA.5L4.B1", + "BPMWI.A5L4.B1", + "BPMWA.B5L4.B1", + "BPMWA.A5L4.B1", + "BPMWA.A5R4.B1", + "BPMWA.B5R4.B1", + "BPMYB.5R4.B1", + "BPMYA.6R4.B1", + ], + 2: [ + "BPMYB.5L4.B2", + "BPMWA.B5L4.B2", + "BPMWA.A5L4.B2", + "BPMWA.A5R4.B2", + "BPMWA.B5R4.B2", + "BPMWI.A5R4.B2", + "BPMYA.5R4.B2", + "BPMYB.6R4.B2", + ], + }, + 5: { + 1: [ + "BPMYA.4L5.B1", + "BPMWB.4L5.B1", + "BPMSY.4L5.B1", + "BPMS.2L5.B1", + "BPMSW.1L5.B1", + "BPMSW.1R5.B1", + "BPMS.2R5.B1", + "BPMSY.4R5.B1", + "BPMWB.4R5.B1", + "BPMYA.4R5.B1", + "BPM.5R5.B1", + ], + 2: [ + "BPMYA.4L5.B2", + "BPMWB.4L5.B2", + "BPMSY.4L5.B2", + "BPMS.2L5.B2", + "BPMSW.1L5.B2", + "BPMSW.1R5.B2", + "BPMS.2R5.B2", + "BPMSY.4R5.B2", + "BPMWB.4R5.B2", + "BPMYA.4R5.B2", + "BPMR.5R5.B2", + ], + }, +} # For the dispersion method, only a subject of the BPMs is used # Same as BPM: IP and then beam -D_BPMS = {1: {1: ['BPMSY.4L1.B1', - 'BPMS.2L1.B1', - 'BPMSW.1L1.B1', - 'BPMSW.1R1.B1', - 'BPMS.2R1.B1', - 'BPMSY.4R1.B1'], - 2: ['BPMSY.4L1.B2', - 'BPMS.2L1.B2', - 'BPMSW.1L1.B2', - 'BPMSW.1R1.B2', - 'BPMS.2R1.B2', - 'BPMSY.4R1.B2'] - }, - 4: {1: [], - 2: [] - }, - 5: {1: ['BPMSY.4L5.B1', - 'BPMS.2L5.B1', - 'BPMSW.1L5.B1', - 'BPMSW.1R5.B1', - 'BPMS.2R5.B1', - 'BPMSY.4R5.B1', - ], - 2: ['BPMSY.4L5.B2', - 'BPMS.2L5.B2', - 'BPMSW.1L5.B2', - 'BPMSW.1R5.B2', - 'BPMS.2R5.B2', - 'BPMSY.4R5.B2'] - } - } +D_BPMS = { + 1: { + 1: [ + "BPMSY.4L1.B1", + "BPMS.2L1.B1", + "BPMSW.1L1.B1", + "BPMSW.1R1.B1", + "BPMS.2R1.B1", + "BPMSY.4R1.B1", + ], + 2: [ + "BPMSY.4L1.B2", + "BPMS.2L1.B2", + "BPMSW.1L1.B2", + "BPMSW.1R1.B2", + "BPMS.2R1.B2", + "BPMSY.4R1.B2", + ], + }, + 4: {1: [], 2: []}, + 5: { + 1: [ + "BPMSY.4L5.B1", + "BPMS.2L5.B1", + "BPMSW.1L5.B1", + "BPMSW.1R5.B1", + "BPMS.2R5.B1", + "BPMSY.4R5.B1", + ], + 2: [ + "BPMSY.4L5.B2", + "BPMS.2L5.B2", + "BPMSW.1L5.B2", + "BPMSW.1R5.B2", + "BPMS.2R5.B2", + "BPMSY.4R5.B2", + ], + }, +} diff --git a/pylhc/constants/forced_da_analysis.py b/pylhc/constants/forced_da_analysis.py index 6f909a4..f2dd3f8 100644 --- a/pylhc/constants/forced_da_analysis.py +++ b/pylhc/constants/forced_da_analysis.py @@ -5,6 +5,7 @@ Specific constants relating to the forced DA analysis to be used in ``PyLHC``, to help with consistency. """ + from pylhc.constants.general import PLANE_TO_HV, TFS_SUFFIX RESULTS_DIR = "forced_da_analysis" diff --git a/pylhc/constants/general.py b/pylhc/constants/general.py index 483afd0..83425b6 100644 --- a/pylhc/constants/general.py +++ b/pylhc/constants/general.py @@ -4,6 +4,7 @@ General constants to be used in ``PyLHC``, to help with consistency. """ + import numpy as np BEAMS = (1, 2) @@ -26,5 +27,5 @@ def get_proton_gamma(energy): def get_proton_beta(energy): - """ Returns relativistic beta for protons """ + """Returns relativistic beta for protons""" return np.sqrt(1 - (1 / get_proton_gamma(energy) ** 2)) diff --git a/pylhc/constants/kickgroups.py b/pylhc/constants/kickgroups.py index 6047cf2..5688225 100644 --- a/pylhc/constants/kickgroups.py +++ b/pylhc/constants/kickgroups.py @@ -4,6 +4,7 @@ Constants used in the KickGroups """ + from pathlib import Path KICKGROUPS_ROOT = Path("/user/slops/data/LHC_DATA/OP_DATA/Betabeat/KickGroups/MULTITURN_ACQ_GROUPS") @@ -31,6 +32,26 @@ BEAMPROCESS = "BEAMPROCESS" BEAM = "BEAM" -KICK_COLUMNS = [UTCTIME, LOCALTIME, TUNEX, TUNEY, DRIVEN_TUNEX, DRIVEN_TUNEY, DRIVEN_TUNEZ, AMPX, AMPY, AMPZ, TURNS, BUNCH, SDDS, JSON_FILE, BEAM, FILL, OPTICS, OPTICS_URI, BEAMPROCESS] +KICK_COLUMNS = [ + UTCTIME, + LOCALTIME, + TUNEX, + TUNEY, + DRIVEN_TUNEX, + DRIVEN_TUNEY, + DRIVEN_TUNEZ, + AMPX, + AMPY, + AMPZ, + TURNS, + BUNCH, + SDDS, + JSON_FILE, + BEAM, + FILL, + OPTICS, + OPTICS_URI, + BEAMPROCESS, +] COLUMNS_TO_HEADERS = [BEAM, FILL, BUNCH, TURNS, BEAMPROCESS, OPTICS, OPTICS_URI] -KICK_GROUP_COLUMNS = [UTCTIME, LOCALTIME, KICKGROUP, TIMESTAMP] \ No newline at end of file +KICK_GROUP_COLUMNS = [UTCTIME, LOCALTIME, KICKGROUP, TIMESTAMP] diff --git a/pylhc/constants/machine_settings_info.py b/pylhc/constants/machine_settings_info.py index acd6378..f42a0fb 100644 --- a/pylhc/constants/machine_settings_info.py +++ b/pylhc/constants/machine_settings_info.py @@ -5,6 +5,7 @@ Specific constants relating to the retrieval of machine settings information to be used in ``PyLHC``, to help with consistency. """ + from pylhc.constants.general import TFS_SUFFIX # TFS-File Conventions ######################################################### diff --git a/pylhc/data_extract/lsa.py b/pylhc/data_extract/lsa.py index dff88dd..c810669 100644 --- a/pylhc/data_extract/lsa.py +++ b/pylhc/data_extract/lsa.py @@ -4,6 +4,7 @@ This module provides useful functions to conveniently wrap the functionality of ``pjlsa``. """ + import jpype import logging import re @@ -39,7 +40,7 @@ class LSAClient(pjLSAClient): """Extension of the LSAClient.""" def __getattr__(self, item): - """ Overwrite __getattr__ to raise the proper import errors at the proper time.""" + """Overwrite __getattr__ to raise the proper import errors at the proper time.""" try: super().__getattr__(item) except AttributeError as e: @@ -93,7 +94,7 @@ def find_existing_knobs(self, knobs: List[str]) -> List[str]: return knobs def find_last_fill( - self, acc_time: AccDatetime, accelerator: str = "lhc", source: str = "nxcals" + self, acc_time: AccDatetime, accelerator: str = "lhc", source: str = "nxcals" ) -> Tuple[str, list]: """ Return last fill name and BeamProcesses. @@ -105,11 +106,12 @@ def find_last_fill( Returns: tuple: Last fill name (str), Beamprocesses of last fill (list). - """ + """ start_time = acc_time.sub(days=1) # assumes a fill is not longer than a day try: fills = self.find_beamprocess_history( - t_start=start_time, t_end=acc_time, + t_start=start_time, + t_end=acc_time, accelerator=accelerator, source=source, ) @@ -121,7 +123,11 @@ def find_last_fill( return last_fill, fills[last_fill] def find_beamprocess_history( - self, t_start: AccDatetime, t_end: AccDatetime, accelerator: str = "lhc", source: str = "nxcals" + self, + t_start: AccDatetime, + t_end: AccDatetime, + accelerator: str = "lhc", + source: str = "nxcals", ) -> Dict: """ Finds the BeamProcesses between t_start and t_end and sorts then by fills. @@ -138,7 +144,9 @@ def find_beamprocess_history( Dictionary of fills (keys) with a list of Timestamps and BeamProcesses. """ - cts = self.findUserContextMappingHistory(t_start.timestamp(), t_end.timestamp(), accelerator=accelerator) + cts = self.findUserContextMappingHistory( + t_start.timestamp(), t_end.timestamp(), accelerator=accelerator + ) db = pytimber.LoggingDB(source=source, loglevel=logging.WARNING) fillnts, fillnv = try_to_acquire_data( @@ -146,7 +154,9 @@ def find_beamprocess_history( )["HX:FILLN"] if not len(fillnv): - raise ValueError(f"No beamprocesses for {accelerator} ({source}) found between {t_start} - {t_end}.") + raise ValueError( + f"No beamprocesses for {accelerator} ({source}) found between {t_start} - {t_end}." + ) LOG.debug(f"{len(fillnts)} fills aqcuired.") # map beam-processes to fills @@ -159,9 +169,12 @@ def find_beamprocess_history( return fills def get_trim_history( - self, beamprocess: str, knobs: list, - start_time: AccDatetime = None, end_time: AccDatetime = None, - accelerator: str = "lhc" + self, + beamprocess: str, + knobs: list, + start_time: AccDatetime = None, + end_time: AccDatetime = None, + accelerator: str = "lhc", ) -> dict: """ Get trim history for knobs between specified times. @@ -194,18 +207,23 @@ def get_trim_history( LOG.debug(f"Getting trims for {len(knobs)} knobs.") try: - trims = self.getTrims(parameter=knobs, beamprocess=beamprocess, start=start_time, end=end_time) + trims = self.getTrims( + parameter=knobs, beamprocess=beamprocess, start=start_time, end=end_time + ) except jpype.java.lang.NullPointerException as e: # In the past this happened, when a knob was not defined, but # this should have been caught by the filter_existing_knobs above - raise ValueError(f"Something went wrong when extracting trims for the knobs: {knobs}") from e + raise ValueError( + f"Something went wrong when extracting trims for the knobs: {knobs}" + ) from e LOG.debug(f"{len(trims)} trims extracted.") trims_not_found = [k for k in knobs if k not in trims.keys()] if len(trims_not_found): LOG.warning( f"The following knobs were not found in '{beamprocess}' " - f"or had no trims during the given time: {trims_not_found}") + f"or had no trims during the given time: {trims_not_found}" + ) return trims def get_beamprocess_info(self, beamprocess: Union[str, object]) -> Dict: @@ -226,8 +244,10 @@ def get_beamprocess_info(self, beamprocess: Union[str, object]) -> Dict: return bp_dict def find_active_beamprocess_at_time( - self, acc_time: AccDatetime, accelerator: str = "lhc", - bp_group: str = "POWERCONVERTERS" # the Beamprocesses relevant for OMC, + self, + acc_time: AccDatetime, + accelerator: str = "lhc", + bp_group: str = "POWERCONVERTERS", # the Beamprocesses relevant for OMC, ): """ Find the active beam process at the time given. @@ -250,8 +270,10 @@ def find_active_beamprocess_at_time( ) beamprocess = beamprocessmap.get(bp_group) if beamprocess is None: - raise ValueError(f"No active BeamProcess found for group '{bp_group}' " - f"at time {acc_time.utc_string} UTC.") + raise ValueError( + f"No active BeamProcess found for group '{bp_group}' " + f"at time {acc_time.utc_string} UTC." + ) LOG.debug(f"Active Beamprocess at time '{acc_time.cern_utc_string()}': {str(beamprocess)}") return beamprocess @@ -303,7 +325,9 @@ def get_madx_name_from_circuit(self, circuit: str): slist = jpype.java.util.Collections.singletonList( # python lists did not work (jdilly) logical_name ) - madx_name_map = self._deviceService.findMadStrengthNamesByLogicalNames(slist) # returns a map + madx_name_map = self._deviceService.findMadStrengthNamesByLogicalNames( + slist + ) # returns a map madx_name = madx_name_map[logical_name] LOG.debug(f"Name conversion: {circuit} -> {logical_name} -> {madx_name}") return madx_name @@ -347,6 +371,7 @@ class LSA(metaclass=LSAMeta): """Import this class to use LSA like the client without the need to instantiate it. Disadvantage: It will always use the default Server. """ + pass @@ -355,10 +380,14 @@ class LSA(metaclass=LSAMeta): def _beamprocess_to_dict(bp): """Converts some fields of the beamprocess (java) to a dictionary.""" - bp_dict = {'Name': bp.toString(), "Object": bp} - bp_dict.update({getter[3:]: str(bp.__getattribute__(getter)()) # __getattr__ does not exist - for getter in dir(bp) - if getter.startswith('get') and "Attribute" not in getter}) + bp_dict = {"Name": bp.toString(), "Object": bp} + bp_dict.update( + { + getter[3:]: str(bp.__getattribute__(getter)()) # __getattr__ does not exist + for getter in dir(bp) + if getter.startswith("get") and "Attribute" not in getter + } + ) return bp_dict @@ -373,7 +402,7 @@ def try_to_acquire_data(function: Callable, *args, **kwargs): Returns: Return arguments of ``function`` - """ + """ retries = MAX_RETRIES for tries in range(retries + 1): try: diff --git a/pylhc/data_extract/timber.py b/pylhc/data_extract/timber.py index 1593d33..294de0d 100644 --- a/pylhc/data_extract/timber.py +++ b/pylhc/data_extract/timber.py @@ -4,6 +4,7 @@ This module provides useful functions to conveniently wrap the functionality of ``pytimber``. """ + import logging from omc3.utils.mock import cern_network_import diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 32017e1..788f3d2 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -87,6 +87,7 @@ .. _CarlierForcedDA2019: https://journals.aps.org/prab/pdf/10.1103/PhysRevAccelBeams.22.031002 """ + import os from collections import defaultdict from contextlib import suppress @@ -124,8 +125,8 @@ from tfs import TfsDataFrame from tfs.tools import significant_digits -pytimber = cern_network_import('pytimber') -PageStore = cern_network_import('pytimber.pagestore.PageStore') +pytimber = cern_network_import("pytimber") +PageStore = cern_network_import("pytimber.pagestore.PageStore") from pylhc.constants.forced_da_analysis import ( @@ -300,10 +301,12 @@ def get_params(): help="Assumed NORMALIZED nominal emittance for the machine.", ), emittance_tfs=dict( - type=PathOrDataframe, help="Dataframe or Path of pre-saved emittance tfs.", + type=PathOrDataframe, + help="Dataframe or Path of pre-saved emittance tfs.", ), intensity_tfs=dict( - type=PathOrDataframe, help="Dataframe or Path of pre-saved intensity tfs.", + type=PathOrDataframe, + help="Dataframe or Path of pre-saved intensity tfs.", ), show_wirescan_emittance=dict( default=False, @@ -342,7 +345,10 @@ def get_params(): choices=["fit_sigma", "average"], help="Which BSRT data to use (from database).", ), - show=dict(action="store_true", help="Show plots.",), + show=dict( + action="store_true", + help="Show plots.", + ), plot_styles=dict( type=str, nargs="+", @@ -682,7 +688,9 @@ def _get_bsrt_bunch_emittances_from_timber(beam, planes, db, timespan, key_type, x, y, y_std = x[y != 0], y[y != 0], y_std[y != 0] df = tfs.TfsDataFrame( - index=_timestamp_to_cerntime_index(x), columns=all_columns, dtype=float, + index=_timestamp_to_cerntime_index(x), + columns=all_columns, + dtype=float, ) df[col_nemittance] = y df[err_col(col_nemittance)] = y_std @@ -895,7 +903,7 @@ def fun_exp_decay(p, x): # fit and plot def fun_exp_sigma(p, x): # only used for plotting """p = DA_sigma, x = action (J_sigma)""" - return np.exp(-0.5 * (p ** 2 - x ** 2)) + return np.exp(-0.5 * (p**2 - x**2)) def fun_linear(p, x): # fit and plot @@ -981,7 +989,12 @@ def _fit_odr(fun, x, y, sx, sy, init): """ODR Fit (includes errors).""" # fill zero errors with the minimum error - otherwise fit will not work fit_model_sigma = scipy.odr.Model(fun) - data_model_sigma = scipy.odr.RealData(x=x, y=y, sx=sx, sy=sy,) + data_model_sigma = scipy.odr.RealData( + x=x, + y=y, + sx=sx, + sy=sy, + ) da_odr = scipy.odr.ODR(data_model_sigma, fit_model_sigma, beta0=init) # da_odr.set_job(fit_type=2) odr_output = da_odr.run() @@ -1010,7 +1023,7 @@ def _convert_to_sigmas(plane, kick_df): ) LOG.info( f"Measured Emittance {emittance_sign} ± {emittance_sign_std} pm" - f" (Nominal {nominal_emittance*1e12: .2f} pm)" + f" (Nominal {nominal_emittance * 1e12: .2f} pm)" ) # DA (in units of J) to DA_sigma @@ -1053,9 +1066,7 @@ def _plot_intensity(directory, beam, plane, kick_df, intensity_df): # convert to % relative to before first kick idx_before = get_approximate_index( - intensity_df, - kick_df.index.min() - pd.Timedelta(seconds=x_span[0]), - method="ffill" + intensity_df, kick_df.index.min() - pd.Timedelta(seconds=x_span[0]), method="ffill" ) idx_intensity = intensity_df.columns.get_loc(INTENSITY) # for iloc intensity_start = intensity_df.iloc[idx_before, idx_intensity] @@ -1112,7 +1123,7 @@ def _plot_intensity(directory, beam, plane, kick_df, intensity_df): plt.tight_layout() annotations.set_name(f"Intensity Beam {beam}, Plane {plane}", fig) annotations.set_annotation( - f"Intensity at 100%: {intensity_start*1e-10:.3f}" "$\;\cdot\;10^{{10}}$ charges", + f"Intensity at 100%: {intensity_start * 1e-10:.3f}$\;\cdot\;10^{{{{10}}}}$ charges", ax=ax, position="left", ) @@ -1319,7 +1330,7 @@ def _plot_da_fit(directory, beam, plane, k_df, fit_type): y=1.00, s=( f"$\epsilon_{{mean}}$ = {emittance_sign} $\pm$ {emittance_sign_std} pm " - f"($\epsilon_{{nominal}}$ = {nominal_emittance*1e12: .2f} pm)" + f"($\epsilon_{{nominal}}$ = {nominal_emittance * 1e12: .2f} pm)" ), transform=ax.transAxes, va="bottom", @@ -1360,9 +1371,10 @@ def _get_fit_plot_data(da, da_err, data, fit_type): # Helper --- + def get_approximate_index(df, item, method="nearest"): - """ Emulates the `get_loc` from pandas<2.0, i.e. - single index input and output. """ + """Emulates the `get_loc` from pandas<2.0, i.e. + single index input and output.""" return df.index.get_indexer([item], method=method)[0] diff --git a/pylhc/kickgroups.py b/pylhc/kickgroups.py index 51e80f8..27e2de3 100644 --- a/pylhc/kickgroups.py +++ b/pylhc/kickgroups.py @@ -56,6 +56,7 @@ values for the first ones. A value of zero means showing all files in the group. """ + import argparse import json @@ -106,7 +107,9 @@ # List Kickgroups -------------------------------------------------------------- -def list_available_kickgroups(by: str = TIMESTAMP, root: Path | str = KICKGROUPS_ROOT, printout: bool = True) -> DataFrame: +def list_available_kickgroups( + by: str = TIMESTAMP, root: Path | str = KICKGROUPS_ROOT, printout: bool = True +) -> DataFrame: """ List all available KickGroups in `root` with optional sorting.. @@ -174,7 +177,9 @@ def get_kickgroup_info(kick_group: str, root: Path | str = KICKGROUPS_ROOT) -> T LOG.debug(f"Loading info from all KickFiles in KickGroup '{kick_group}'") kick_group_data = _load_json(Path(root) / f"{kick_group}.json") kicks_files = kick_group_data["jsonFiles"] - df_info = TfsDataFrame(index=range(len(kicks_files)), columns=KICK_COLUMNS, headers={KICKGROUP: kick_group}) + df_info = TfsDataFrame( + index=range(len(kicks_files)), columns=KICK_COLUMNS, headers={KICKGROUP: kick_group} + ) if not len(kicks_files): raise ValueError(f"KickGroup {kick_group} contains no kicks.") @@ -232,8 +237,12 @@ def load_kickfile(kickfile: Path | str) -> pd.Series: data[TUNEX] = kick["excitationSettings"][0]["acDipoleSettings"][idx]["measuredTune"] data[TUNEY] = kick["excitationSettings"][0]["acDipoleSettings"][idy]["measuredTune"] - data[DRIVEN_TUNEX] = data[TUNEX] + kick["excitationSettings"][0]["acDipoleSettings"][idx]["deltaTuneStart"] - data[DRIVEN_TUNEY] = data[TUNEY] + kick["excitationSettings"][0]["acDipoleSettings"][idy]["deltaTuneStart"] + data[DRIVEN_TUNEX] = ( + data[TUNEX] + kick["excitationSettings"][0]["acDipoleSettings"][idx]["deltaTuneStart"] + ) + data[DRIVEN_TUNEY] = ( + data[TUNEY] + kick["excitationSettings"][0]["acDipoleSettings"][idy]["deltaTuneStart"] + ) data[DRIVEN_TUNEZ] = kick["excitationData"][0]["rfdata"]["excitationFrequency"] data[AMPX] = kick["excitationSettings"][0]["acDipoleSettings"][idx]["amplitude"] data[AMPY] = kick["excitationSettings"][0]["acDipoleSettings"][idy]["amplitude"] @@ -254,7 +263,9 @@ def load_kickfile(kickfile: Path | str) -> pd.Series: LOG.warning(f"{str(e)} in {kickfile}") continue - if "measuredTune" not in kick["excitationSettings"][idx]: # Happens in very early files in 2022 + if ( + "measuredTune" not in kick["excitationSettings"][idx] + ): # Happens in very early files in 2022 LOG.warning(f"No measured tune {plane} in the kick file: {kickfile}") continue @@ -267,9 +278,10 @@ def load_kickfile(kickfile: Path | str) -> pd.Series: return data + def _get_delta_tune(kick: dict, idx_plane: int) -> float: - """ Return the delta from the tune for the kicks. - For some reason, there are multiple different keys where this can be stored. """ + """Return the delta from the tune for the kicks. + For some reason, there are multiple different keys where this can be stored.""" # Default key for ACDipole --- # There is also "deltaTuneEnd", but we usually don't change the delta during kick @@ -293,8 +305,8 @@ def _get_delta_tune(kick: dict, idx_plane: int) -> float: raise KeyError(f"Could not find delta tune for plane-entry {idx_plane}") -def _find_existing_file_path(path: str|Path) -> Path: - """ Find the existing kick file for the kick group. """ +def _find_existing_file_path(path: str | Path) -> Path: + """Find the existing kick file for the kick group.""" path = Path(path) if path.is_file(): return path @@ -304,7 +316,7 @@ def _find_existing_file_path(path: str|Path) -> Path: if fill_data in path.parts: # Fills are moved at the end of year - idx = path.parts.index(fill_data)+1 + idx = path.parts.index(fill_data) + 1 new_path = Path(*path.parts[:idx], all_fill_data, *path.parts[idx:]) if new_path.exists(): return new_path @@ -312,7 +324,6 @@ def _find_existing_file_path(path: str|Path) -> Path: raise FileNotFoundError(f"Could not find kick file at {path}") - # Functions with console output --- # Full Info - @@ -353,7 +364,9 @@ def _print_kickgroup_info(kicks_info: TfsDataFrame) -> None: # Files only - -def show_kickgroup_files(kick_group: str, nfiles: int = None, root: Path | str = KICKGROUPS_ROOT) -> None: +def show_kickgroup_files( + kick_group: str, nfiles: int = None, root: Path | str = KICKGROUPS_ROOT +) -> None: """ Wrapper around `pylhc.kickgroups.get_kickgroup_info`, gathering the relevant information from all kickfiles in the KickGroup and printing only the sdds-filepaths @@ -454,7 +467,7 @@ def _get_plane_index(data: list[dict], plane: str) -> str: def _get_fill_from_path(sdds_path: str | Path) -> str: - """ Get the fill number from the path to the sdds file. + """Get the fill number from the path to the sdds file. Note: Not sure why the fill is not saved automatically into the .json file. Maybe we should ask OP to include this. """ diff --git a/pylhc/lsa_to_madx.py b/pylhc/lsa_to_madx.py index c4b4fc1..78a7665 100644 --- a/pylhc/lsa_to_madx.py +++ b/pylhc/lsa_to_madx.py @@ -92,6 +92,7 @@ Hint: the knobs active at a given time can be retrieved with the `~pylhc.machine_settings_info` script. """ + import argparse import re import string @@ -126,7 +127,9 @@ def parse_knobs_and_trim_values_from_file(knobs_file: Path) -> Dict[str, float]: Returns: A `dict` with as keys the parsed knob names and as values their associated trims. """ - knob_lines = [line for line in Path(knobs_file).read_text().splitlines() if not line.startswith("#")] + knob_lines = [ + line for line in Path(knobs_file).read_text().splitlines() if not line.startswith("#") + ] results = {} for line in knob_lines: @@ -169,9 +172,13 @@ def get_sign_madx_vs_lsa(madx_name: str) -> int: return 1 -def get_madx_script_from_definition_dataframe(deltas_df: tfs.TfsDataFrame, lsa_knob: str, trim: float = 1.0, - by_reference: bool = True, verbose: bool = False - ) -> str: +def get_madx_script_from_definition_dataframe( + deltas_df: tfs.TfsDataFrame, + lsa_knob: str, + trim: float = 1.0, + by_reference: bool = True, + verbose: bool = False, +) -> str: """ Given the extracted definition dataframe of an LSA knob - as returned by `~pylhc.data_extract.lsa.LSAClient.get_knob_circuits` - this function will generate the @@ -218,9 +225,13 @@ def get_madx_script_from_definition_dataframe(deltas_df: tfs.TfsDataFrame, lsa_k # mess up parsing of "var = var + -value" if delta_k is negative if by_reference: variable_init = f"{variable}_init" - change_commands.append(f"{variable:<12} := {variable_init:^19} + ({delta:^25}) * {trim_variable};") + change_commands.append( + f"{variable:<12} := {variable_init:^19} + ({delta:^25}) * {trim_variable};" + ) else: - change_commands.append(f"{variable:<12} = {variable:^15} + ({delta:^25}) * {trim_variable};") + change_commands.append( + f"{variable:<12} = {variable:^15} + ({delta:^25}) * {trim_variable};" + ) change_commands.append(f"! End of change commands for knob: {lsa_knob}\n") return "\n".join(change_commands) @@ -261,7 +272,7 @@ def _get_trim_variable(lsa_knob: str) -> str: def _get_delta(deltas_df: tfs.TfsDataFrame) -> pd.Series: - """ Get the correct delta-column """ + """Get the correct delta-column""" if "DELTA_K" not in deltas_df.columns: LOG.debug("Using DELTA_KL column.") return deltas_df.DELTA_KL @@ -271,12 +282,16 @@ def _get_delta(deltas_df: tfs.TfsDataFrame) -> pd.Series: return deltas_df.DELTA_K if (deltas_df.DELTA_K.astype(bool) & deltas_df.DELTA_KL.astype(bool)).any(): - raise ValueError("Some entries of DELTA_KL and DELTA_K seem to both be given. " - "This looks like a bug. Please investigate.") + raise ValueError( + "Some entries of DELTA_KL and DELTA_K seem to both be given. " + "This looks like a bug. Please investigate." + ) LOG.debug("Both DELTA_K and DELTA_KL columns present, merging columns.") - return pd.Series(np.where(deltas_df.DELTA_K.astype(bool), deltas_df.DELTA_K, deltas_df.DELTA_KL), - index=deltas_df.index) + return pd.Series( + np.where(deltas_df.DELTA_K.astype(bool), deltas_df.DELTA_K, deltas_df.DELTA_KL), + index=deltas_df.index, + ) # ----- Script Part ----- # @@ -292,7 +307,11 @@ def _get_args(): "scripts reproducing the provided knobs." ) parser.add_argument( - "--optics", dest="optics", type=str, required=True, help="The LSA name of the optics for which the knobs are defined." + "--optics", + dest="optics", + type=str, + required=True, + help="The LSA name of the optics for which the knobs are defined.", ) parser.add_argument( "--knobs", @@ -340,13 +359,17 @@ def main(): for lsa_knob, trim_value in knobs_dict.items(): LOG.info(f"Processing LSA knob '{lsa_knob}'") try: # next line might raise if knob not defined for the given optics - knob_definition = lsa_client.get_knob_circuits(knob_name=lsa_knob, optics=lsa_optics) + knob_definition = lsa_client.get_knob_circuits( + knob_name=lsa_knob, optics=lsa_optics + ) madx_commands_string = get_madx_script_from_definition_dataframe( deltas_df=knob_definition, lsa_knob=lsa_knob, trim=trim_value ) except (OSError, IOError): # raised by pjlsa if knob not found - LOG.warning(f"Could not find knob '{lsa_knob}' in the given optics '{lsa_optics}' - skipping") + LOG.warning( + f"Could not find knob '{lsa_knob}' in the given optics '{lsa_optics}' - skipping" + ) unfound_knobs.append(lsa_knob) else: # we've managed to find knobs @@ -360,7 +383,10 @@ def main(): Path(madx_file).write_text(madx_commands_string) if unfound_knobs: - LOG.info(f"The following knobs could not be found in the '{lsa_optics}' optics: \n\t\t" + "\n\t\t".join(unfound_knobs)) + LOG.info( + f"The following knobs could not be found in the '{lsa_optics}' optics: \n\t\t" + + "\n\t\t".join(unfound_knobs) + ) if __name__ == "__main__": diff --git a/pylhc/machine_settings_info.py b/pylhc/machine_settings_info.py index 5a2dd5f..f4a249a 100644 --- a/pylhc/machine_settings_info.py +++ b/pylhc/machine_settings_info.py @@ -7,9 +7,9 @@ If an output path is given, all info will be written into tfs files, otherwise a summary is logged into console. -Knob values can be extracted and the knob definition gathered. +Knob values can be extracted and the knob definition gathered. For brevity reasons, this data is not logged into the summary in the console. -If a start time is given, the trim history for the given knobs can be written out as well. +If a start time is given, the trim history for the given knobs can be written out as well. This data is also not logged. Can be run from command line, parameters as given in :meth:`pylhc.machine_settings_info.get_info`. @@ -47,6 +47,7 @@ :author: jdilly """ + from collections import OrderedDict, namedtuple import tfs @@ -67,9 +68,12 @@ class AccDatetimeOrStr(metaclass=get_instance_faker_meta(AccDatetime, str)): """A class that accepts AccDateTime and strings.""" + def __new__(cls, value): if isinstance(value, str): - value = value.strip("\'\"") # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER + value = value.strip( + "'\"" + ) # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER return value @@ -82,137 +86,130 @@ def _get_params() -> dict: time=dict( default=None, type=AccDatetimeOrStr, - help=("UTC Time as 'Y-m-d H:M:S.f' or ISO format or AccDatetime object." - " Acts as point in time or end time (if ``start_time`` is given).") + help=( + "UTC Time as 'Y-m-d H:M:S.f' or ISO format or AccDatetime object." + " Acts as point in time or end time (if ``start_time`` is given)." ), + ), start_time=dict( default=None, type=AccDatetimeOrStr, - help=("UTC Time as 'Y-m-d H:M:S.f' or ISO format or AccDatetime object." - " Defines the beginning of the time-range.") - ), + help=( + "UTC Time as 'Y-m-d H:M:S.f' or ISO format or AccDatetime object." + " Defines the beginning of the time-range." + ), + ), knobs=dict( default=None, nargs="+", type=str, help="List of knobnames. " - "If `None` (or omitted) no knobs will be extracted. " - "If it is just the string ``'all'``, " - "all knobs will be extracted (can be slow). " - "Use the string ``'default'`` for pre-defined knobs of interest." + "If `None` (or omitted) no knobs will be extracted. " + "If it is just the string ``'all'``, " + "all knobs will be extracted (can be slow). " + "Use the string ``'default'`` for pre-defined knobs of interest.", ), - accel=dict( - default='lhc', - type=str, - help="Accelerator name."), + accel=dict(default="lhc", type=str, help="Accelerator name."), beamprocess=dict( type=str, - help=("Manual override for the Beamprocess " - "(otherwise taken at the given ``time``)") + help=("Manual override for the Beamprocess (otherwise taken at the given ``time``)"), ), - output_dir=dict( - default=None, - type=PathOrStr, - help="Output directory."), - knob_definitions=dict( - action="store_true", - help="Set to extract knob definitions."), - source=dict( - type=str, - default="nxcals", - help="Source to extract data from."), + output_dir=dict(default=None, type=PathOrStr, help="Output directory."), + knob_definitions=dict(action="store_true", help="Set to extract knob definitions."), + source=dict(type=str, default="nxcals", help="Source to extract data from."), log=dict( action="store_true", - help="Write summary into log (automatically done if no output path is given)."), + help="Write summary into log (automatically done if no output path is given).", + ), ) @entrypoint(_get_params(), strict=True) def get_info(opt) -> Dict[str, object]: """ - Get info about **Beamprocess**, **Optics** and **Knobs** at given time. + Get info about **Beamprocess**, **Optics** and **Knobs** at given time. - Keyword Args: + Keyword Args: - *--Optional--* + *--Optional--* - - **accel** *(str)*: + - **accel** *(str)*: - Accelerator name. + Accelerator name. - default: ``lhc`` + default: ``lhc`` - - **beamprocess** *(str)*: + - **beamprocess** *(str)*: - Manual override for the Beamprocess - (otherwise taken at the given ``time``) + Manual override for the Beamprocess + (otherwise taken at the given ``time``) - default: ``None`` + default: ``None`` - - **knob_definitions**: + - **knob_definitions**: - Set to extract knob definitions. + Set to extract knob definitions. - action: ``store_true`` + action: ``store_true`` - - **knobs** *(str)*: + - **knobs** *(str)*: - List of knobnames. - If `None` (or omitted) no knobs will be extracted. - If it is just the string ``'all'``, - all knobs will be extracted (can be slow). - Use the string ``'default'`` for pre-defined knobs of interest. - If this is called from python, the strings need - to be put as single items into a list. + List of knobnames. + If `None` (or omitted) no knobs will be extracted. + If it is just the string ``'all'``, + all knobs will be extracted (can be slow). + Use the string ``'default'`` for pre-defined knobs of interest. + If this is called from python, the strings need + to be put as single items into a list. - default: ``None`` + default: ``None`` - - **log**: + - **log**: - Write summary into log (automatically done if no output path is - given). + Write summary into log (automatically done if no output path is + given). - action: ``store_true`` + action: ``store_true`` - - **output_dir** *(PathOrStr)*: + - **output_dir** *(PathOrStr)*: - Output directory. + Output directory. - default: ``None`` + default: ``None`` - - **source** *(str)*: + - **source** *(str)*: - Source to extract data from. + Source to extract data from. - default: ``nxcals`` + default: ``nxcals`` - - **start_time** *(AccDatetime, str)*: + - **start_time** *(AccDatetime, str)*: - UTC Time as 'Y-m-d H:M:S.f' format or AccDatetime object. - Defines the beginning of the time-range. + UTC Time as 'Y-m-d H:M:S.f' format or AccDatetime object. + Defines the beginning of the time-range. - default: ``None`` + default: ``None`` - - **time** *(AccDatetime, str)*: + - **time** *(AccDatetime, str)*: - UTC Time as 'Y-m-d H:M:S.f' format or AccDatetime object. - Acts as point in time or end time (if ``start_time`` is given). + UTC Time as 'Y-m-d H:M:S.f' format or AccDatetime object. + Acts as point in time or end time (if ``start_time`` is given). - default: ``None`` + default: ``None`` - Returns: - dict: Dictionary containing the given ``time`` and ``start_time``, - the extracted ``beamprocess``-info and ``optics``-info, the - ``trim_histories`` and current (i.e. at given ``time``) ``trims`` - and the ``knob_definitions``, if extracted. + Returns: + dict: Dictionary containing the given ``time`` and ``start_time``, + the extracted ``beamprocess``-info and ``optics``-info, the + ``trim_histories`` and current (i.e. at given ``time``) ``trims`` + and the ``knob_definitions``, if extracted. """ if opt.output_dir is None: @@ -228,17 +225,19 @@ def get_info(opt) -> Dict[str, object]: LOG.error(str(e)) else: if opt.knobs is not None: - if len(opt.knobs) == 1 and opt.knobs[0].lower() == 'all': + if len(opt.knobs) == 1 and opt.knobs[0].lower() == "all": opt.knobs = [] # will extract all knobs in get_trim_history - if len(opt.knobs) == 1 and opt.knobs[0].lower() == 'default': - opt.knobs = [name2lsa(knob) for category in KNOB_CATEGORIES.values() - for knob in category] + if len(opt.knobs) == 1 and opt.knobs[0].lower() == "default": + opt.knobs = [ + name2lsa(knob) for category in KNOB_CATEGORIES.values() for knob in category + ] trim_histories = LSA.get_trim_history( - beamprocess_info.Object, opt.knobs, + beamprocess_info.Object, + opt.knobs, start_time=acc_start_time, end_time=acc_time, - accelerator=opt.accel + accelerator=opt.accel, ) trims = _get_last_trim(trim_histories) @@ -255,9 +254,17 @@ def get_info(opt) -> Dict[str, object]: out_path = Path(opt.output_dir) out_path.mkdir(parents=True, exist_ok=True) write_summary(out_path, opt.accel, acc_time, beamprocess_info, optics_info, trims) - + if trim_histories and acc_start_time: - write_trim_histories(out_path, trim_histories, opt.accel, acc_time, acc_start_time, beamprocess_info, optics_info) + write_trim_histories( + out_path, + trim_histories, + opt.accel, + acc_time, + acc_start_time, + beamprocess_info, + optics_info, + ) if knob_definitions: write_knob_defitions(out_path, knob_definitions) @@ -269,15 +276,19 @@ def get_info(opt) -> Dict[str, object]: "optics": optics_info, "trim_histories": trim_histories, "trims": trims, - "knob_definitions": knob_definitions + "knob_definitions": knob_definitions, } # Output ####################################################################### -def log_summary(acc_time: AccDatetime, bp_info: DotDict, - optics_info: DotDict = None, trims: Dict[str, float] = None): +def log_summary( + acc_time: AccDatetime, + bp_info: DotDict, + optics_info: DotDict = None, + trims: Dict[str, float] = None, +): """Log the summary. Args: @@ -311,8 +322,12 @@ def log_summary(acc_time: AccDatetime, bp_info: DotDict, def write_summary( - output_path: Path, accel: str, acc_time: AccDatetime, bp_info: DotDict, - optics_info: DotDict = None, trims: Dict[str, float] = None + output_path: Path, + accel: str, + acc_time: AccDatetime, + bp_info: DotDict, + optics_info: DotDict = None, + trims: Dict[str, float] = None, ): """Write summary into a ``tfs`` file. @@ -328,21 +343,27 @@ def write_summary( trims = trims.items() info_tfs = tfs.TfsDataFrame(trims, columns=[const.column_knob, const.column_value]) - info_tfs.headers = OrderedDict([ - ("Hint:", "All times given in UTC."), - (const.head_accel, accel), - (const.head_time, acc_time.cern_utc_string()), - (const.head_beamprocess, bp_info.Name), - (const.head_fill, bp_info.Fill), - (const.head_beamprocess_start, bp_info.StartTime.cern_utc_string()), - (const.head_context_category, bp_info.ContextCategory), - (const.head_beamprcess_description, bp_info.Description), - ]) + info_tfs.headers = OrderedDict( + [ + ("Hint:", "All times given in UTC."), + (const.head_accel, accel), + (const.head_time, acc_time.cern_utc_string()), + (const.head_beamprocess, bp_info.Name), + (const.head_fill, bp_info.Fill), + (const.head_beamprocess_start, bp_info.StartTime.cern_utc_string()), + (const.head_context_category, bp_info.ContextCategory), + (const.head_beamprcess_description, bp_info.Description), + ] + ) if optics_info is not None: - info_tfs.headers.update(OrderedDict([ - (const.head_optics, optics_info.Name), - (const.head_optics_start, optics_info.StartTime.cern_utc_string()), - ])) + info_tfs.headers.update( + OrderedDict( + [ + (const.head_optics, optics_info.Name), + (const.head_optics_start, optics_info.StartTime.cern_utc_string()), + ] + ) + ) tfs.write(output_path / const.info_name, info_tfs) @@ -354,11 +375,15 @@ def write_knob_defitions(output_path: Path, definitions: dict): def write_trim_histories( - output_path: Path, trim_histories: Dict[str, namedtuple], accel: str, - acc_time: AccDatetime = None, acc_start_time: AccDatetime = None, - bp_info: DotDict = None, optics_info: DotDict = None + output_path: Path, + trim_histories: Dict[str, namedtuple], + accel: str, + acc_time: AccDatetime = None, + acc_start_time: AccDatetime = None, + bp_info: DotDict = None, + optics_info: DotDict = None, ): - """ Write the trim histories into tfs files. + """Write the trim histories into tfs files. There are two time columns, one with timestamps as they are usually easier to handle and one with the UTC-string, as they are more human-readable. @@ -374,28 +399,30 @@ def write_trim_histories( AccDT = AcceleratorDatetime[accel] # Create headers with basic info --- - headers = OrderedDict([("Hint:", "All times are given in UTC."), - (const.head_accel, accel) - ]) - + headers = OrderedDict([("Hint:", "All times are given in UTC."), (const.head_accel, accel)]) + if acc_start_time: headers.update({const.head_start_time: acc_start_time.cern_utc_string()}) if acc_time: headers.update({const.head_end_time: acc_time.cern_utc_string()}) - + if bp_info: - headers.update({ - const.head_beamprocess: bp_info.Name, - const.head_fill: bp_info.Fill, - }) + headers.update( + { + const.head_beamprocess: bp_info.Name, + const.head_fill: bp_info.Fill, + } + ) if optics_info: headers.update({const.head_optics: optics_info.Name}) # Write trim history per knob ---- for knob, trim_history in trim_histories.items(): - trims_tfs = tfs.TfsDataFrame(headers=headers, columns=[const.column_time, const.column_timestamp, const.column_value]) + trims_tfs = tfs.TfsDataFrame( + headers=headers, columns=[const.column_time, const.column_timestamp, const.column_value] + ) for timestamp, value in zip(trim_history.time, trim_history.data): time = AccDT.from_timestamp(timestamp).cern_utc_string() try: @@ -416,7 +443,9 @@ def write_trim_histories( # Beamprocess ################################################################## -def _get_beamprocess(acc_time: AccDatetime, accel: str, source: str, beamprocess: str = None) -> DotDict: +def _get_beamprocess( + acc_time: AccDatetime, accel: str, source: str, beamprocess: str = None +) -> DotDict: """Get the info about the active beamprocess at ``acc_time`` or the given one.""" if beamprocess is None: beamprocess = LSA.find_active_beamprocess_at_time(acc_time, accelerator=accel) @@ -438,7 +467,9 @@ def _get_beamprocess(acc_time: AccDatetime, accel: str, source: str, beamprocess return DotDict(bp_info) -def _get_beamprocess_start(beamprocesses: Iterable[Tuple[float, str]], acc_time: AccDatetime, bp_name: str) -> AccDatetime: +def _get_beamprocess_start( + beamprocesses: Iterable[Tuple[float, str]], acc_time: AccDatetime, bp_name: str +) -> AccDatetime: """ Get the last beamprocess in the list of beamprocesses before dt_utc. Returns the start time of the beam-process in utc. @@ -449,9 +480,7 @@ def _get_beamprocess_start(beamprocesses: Iterable[Tuple[float, str]], acc_time: if time <= ts and name == bp_name: LOG.debug(f"Found start for beamprocess '{bp_name}' at timestamp {time}.") return acc_time.__class__.from_timestamp(time) - raise ValueError( - f"Beamprocess '{bp_name}' was not found." - ) + raise ValueError(f"Beamprocess '{bp_name}' was not found.") # Optics ####################################################################### @@ -520,7 +549,7 @@ def _get_last_trim(trims: dict) -> dict: def _get_times(time: Union[str, AccDatetime], start_time: Union[str, AccDatetime], accel: str): - """ Returns acc_time and acc_start_time parameters depending on the user input. """ + """Returns acc_time and acc_start_time parameters depending on the user input.""" acc_dt = AcceleratorDatetime[accel] def get_time(t, default=None): diff --git a/tests/unit/test_bpm_calibration.py b/tests/unit/test_bpm_calibration.py index 673df7f..ec0e23b 100644 --- a/tests/unit/test_bpm_calibration.py +++ b/tests/unit/test_bpm_calibration.py @@ -10,28 +10,28 @@ from pylhc import bpm_calibration as calibration from pylhc.constants.calibration import BPMS -INPUTS_DIR = Path(__file__).parent.parent / 'inputs' / 'calibration' -MEASUREMENTS = INPUTS_DIR / 'measurements' -EXPECTED_OUTPUT = INPUTS_DIR / 'output' +INPUTS_DIR = Path(__file__).parent.parent / "inputs" / "calibration" +MEASUREMENTS = INPUTS_DIR / "measurements" +EXPECTED_OUTPUT = INPUTS_DIR / "output" def test_calibration_same_betabeat(tmp_path): - factors = calibration.main(inputdir=MEASUREMENTS / 'for_beta', - outputdir=tmp_path, - ips=[1, 4, 5]) + factors = calibration.main( + inputdir=MEASUREMENTS / "for_beta", outputdir=tmp_path, ips=[1, 4, 5] + ) # Let's open the tfs files we just created - x_tfs = tfs.read(tmp_path / 'calibration_beta_x.tfs', index='NAME') - y_tfs = tfs.read(tmp_path / 'calibration_beta_y.tfs', index='NAME') + x_tfs = tfs.read(tmp_path / "calibration_beta_x.tfs", index="NAME") + y_tfs = tfs.read(tmp_path / "calibration_beta_y.tfs", index="NAME") # Those tfs need to be filtered because GetLLM only gives us the BPMs # used in ballistic optics x_tfs = x_tfs.reindex(BPMS[1][1] + BPMS[4][1] + BPMS[5][1]) y_tfs = y_tfs.reindex(BPMS[1][1] + BPMS[4][1] + BPMS[5][1]) - + # And the ones created by BetaBeat.src for the same measurements - expected_x_tfs = tfs.read(EXPECTED_OUTPUT / 'calibration_beta_x.tfs', index='NAME') - expected_y_tfs = tfs.read(EXPECTED_OUTPUT / 'calibration_beta_y.tfs', index='NAME') + expected_x_tfs = tfs.read(EXPECTED_OUTPUT / "calibration_beta_x.tfs", index="NAME") + expected_y_tfs = tfs.read(EXPECTED_OUTPUT / "calibration_beta_y.tfs", index="NAME") # BetaBeat's tfs implementation is a bit different, we don't have the # same integer precision @@ -41,7 +41,7 @@ def test_calibration_same_betabeat(tmp_path): tfs_ = [x_tfs, y_tfs, expected_x_tfs, expected_y_tfs] for i in range(len(tfs_)): tfs_[i] = tfs_[i].drop("ERROR_CALIBRATION_FIT", axis=1) - + # Compare the two dataframes assert_frame_equal(tfs_[0], tfs_[2], rtol=precision) assert_frame_equal(tfs_[1], tfs_[3], rtol=precision) @@ -49,18 +49,14 @@ def test_calibration_same_betabeat(tmp_path): def test_bad_args(): with pytest.raises(ArgumentError) as e: - calibration.main(inputdir='wat', - outputdir='', - ips=[1,5]) + calibration.main(inputdir="wat", outputdir="", ips=[1, 5]) assert "inputdir' is not of type Path" in str(e.value) def test_no_beta_tfs(tmp_path): with pytest.raises(FileNotFoundError) as e: - calibration.main(inputdir=pathlib.Path('wat'), - outputdir=tmp_path, - ips=[1,5]) + calibration.main(inputdir=pathlib.Path("wat"), outputdir=tmp_path, ips=[1, 5]) assert "No such file or directory:" in str(e.value) assert "beta_phase_x.tfs" in str(e.value) @@ -68,101 +64,100 @@ def test_no_beta_tfs(tmp_path): def test_wrong_ip(tmp_path): with pytest.raises(ArgumentError) as e: - calibration.main(inputdir=MEASUREMENTS / 'for_beta', - outputdir=tmp_path, - ips=[15, 22]) + calibration.main(inputdir=MEASUREMENTS / "for_beta", outputdir=tmp_path, ips=[15, 22]) err = "All elements of 'ips' need to be one of '[1, 4, 5]', instead the list was [15, 22]." assert err in str(e.value) def test_calibration_same_dispersion(tmp_path): - factors = calibration.main(inputdir=MEASUREMENTS / 'for_dispersion', - outputdir=tmp_path, - method='dispersion', - ips=[1,5]) + factors = calibration.main( + inputdir=MEASUREMENTS / "for_dispersion", + outputdir=tmp_path, + method="dispersion", + ips=[1, 5], + ) # Let's open the tfs files we just created - x_tfs = tfs.read(tmp_path / 'calibration_dispersion_x.tfs') - + x_tfs = tfs.read(tmp_path / "calibration_dispersion_x.tfs") + # And the ones created by BetaBeat.src for the same measurements - expected_x_tfs = tfs.read(EXPECTED_OUTPUT / 'calibration_dispersion_x.tfs') + expected_x_tfs = tfs.read(EXPECTED_OUTPUT / "calibration_dispersion_x.tfs") - # Check all the BPMs are indeed the same - assert x_tfs['NAME'].equals(expected_x_tfs['NAME']) + # Check all the BPMs are indeed the same + assert x_tfs["NAME"].equals(expected_x_tfs["NAME"]) precision = 1e-4 # BBsrc was wrong for the calibration error fit and the calibration fits # So we can only check the first column: CALIBRATION - assert_series_equal(x_tfs['CALIBRATION'], expected_x_tfs['CALIBRATION'], atol=precision) + assert_series_equal(x_tfs["CALIBRATION"], expected_x_tfs["CALIBRATION"], atol=precision) def test_beta_equal(tmp_path): - factors = calibration.main(inputdir=MEASUREMENTS / 'same_beta', - outputdir=tmp_path, - method='beta') + factors = calibration.main( + inputdir=MEASUREMENTS / "same_beta", outputdir=tmp_path, method="beta" + ) # beta from phase and beta amp are the same. Calibrations factors should # equal to 1 - expected = np.array([1.0] * len(factors['X']['CALIBRATION'])) - assert (factors['X']['CALIBRATION'].to_numpy() == expected).all() - - expected = np.array([1.0] * len(factors['Y']['CALIBRATION'])) - assert (factors['Y']['CALIBRATION'].to_numpy() == expected).all() + expected = np.array([1.0] * len(factors["X"]["CALIBRATION"])) + assert (factors["X"]["CALIBRATION"].to_numpy() == expected).all() + + expected = np.array([1.0] * len(factors["Y"]["CALIBRATION"])) + assert (factors["Y"]["CALIBRATION"].to_numpy() == expected).all() def test_missing_bpms(tmp_path): - calibration.main(inputdir=MEASUREMENTS / 'missing_bpms', - outputdir=tmp_path, - method='beta', - ips=[1,5]) + calibration.main( + inputdir=MEASUREMENTS / "missing_bpms", outputdir=tmp_path, method="beta", ips=[1, 5] + ) factors = tfs.read(tmp_path / "calibration_beta_x.tfs", index="NAME") assert factors.loc["BPMWB.4R1.B1"]["CALIBRATION"] == 1 assert factors.loc["BPMWB.4L1.B1"]["CALIBRATION"] == 1 assert factors.loc["BPMS.2L1.B1"]["CALIBRATION"] != 1 - + def test_number_in_out(tmp_path): - tfs_in = tfs.read(MEASUREMENTS / 'for_beta' / 'beta_phase_x.tfs') - factors = calibration.main(inputdir=MEASUREMENTS / 'for_beta', - outputdir=tmp_path, - method='beta') + tfs_in = tfs.read(MEASUREMENTS / "for_beta" / "beta_phase_x.tfs") + factors = calibration.main( + inputdir=MEASUREMENTS / "for_beta", outputdir=tmp_path, method="beta" + ) assert len(factors["X"]) == len(tfs_in) def test_no_error_tracking(tmp_path): # Test with tracking data on ballistic optics at IR4 without noise - factors = calibration.main(inputdir=MEASUREMENTS / 'tracking', - outputdir=tmp_path, - ips=[4]) - - x_df = factors['X'].reset_index(drop=True) - y_df = factors['Y'].reset_index(drop=True) - ir4_x_df = factors['X'].reindex(BPMS[4][1]).reset_index(drop=True) - ir4_y_df = factors['X'].reindex(BPMS[4][1]).reset_index(drop=True) + factors = calibration.main(inputdir=MEASUREMENTS / "tracking", outputdir=tmp_path, ips=[4]) + + x_df = factors["X"].reset_index(drop=True) + y_df = factors["Y"].reset_index(drop=True) + ir4_x_df = factors["X"].reindex(BPMS[4][1]).reset_index(drop=True) + ir4_y_df = factors["X"].reindex(BPMS[4][1]).reset_index(drop=True) precision = 1e-3 - + # All factors ≃ 1 - expected = pd.Series([1.0] * len(factors['X']['CALIBRATION'])) - assert_series_equal(x_df['CALIBRATION'], expected, atol=precision, check_names=False) - assert_series_equal(y_df['CALIBRATION'], expected, atol=precision, check_names=False) - + expected = pd.Series([1.0] * len(factors["X"]["CALIBRATION"])) + assert_series_equal(x_df["CALIBRATION"], expected, atol=precision, check_names=False) + assert_series_equal(y_df["CALIBRATION"], expected, atol=precision, check_names=False) + # And their error ≃ - expected = pd.Series([0.0] * len(factors['X']['CALIBRATION'])) - assert_series_equal(x_df['ERROR_CALIBRATION'], expected, atol=precision, check_names=False) - assert_series_equal(y_df['ERROR_CALIBRATION'], expected, atol=precision, check_names=False) + expected = pd.Series([0.0] * len(factors["X"]["CALIBRATION"])) + assert_series_equal(x_df["ERROR_CALIBRATION"], expected, atol=precision, check_names=False) + assert_series_equal(y_df["ERROR_CALIBRATION"], expected, atol=precision, check_names=False) # Same with fit - expected = pd.Series([1.0] * len(ir4_x_df['CALIBRATION_FIT'])) - assert_series_equal(ir4_x_df['CALIBRATION_FIT'], expected, atol=precision, check_names=False) - assert_series_equal(ir4_y_df['CALIBRATION_FIT'], expected, atol=precision, check_names=False) - - # and its errors - expected = pd.Series([0.0] * len(ir4_x_df['ERROR_CALIBRATION_FIT'])) - assert_series_equal(ir4_x_df['ERROR_CALIBRATION_FIT'], expected, atol=precision, check_names=False) - assert_series_equal(ir4_y_df['ERROR_CALIBRATION_FIT'], expected, atol=precision, check_names=False) - + expected = pd.Series([1.0] * len(ir4_x_df["CALIBRATION_FIT"])) + assert_series_equal(ir4_x_df["CALIBRATION_FIT"], expected, atol=precision, check_names=False) + assert_series_equal(ir4_y_df["CALIBRATION_FIT"], expected, atol=precision, check_names=False) + # and its errors + expected = pd.Series([0.0] * len(ir4_x_df["ERROR_CALIBRATION_FIT"])) + assert_series_equal( + ir4_x_df["ERROR_CALIBRATION_FIT"], expected, atol=precision, check_names=False + ) + assert_series_equal( + ir4_y_df["ERROR_CALIBRATION_FIT"], expected, atol=precision, check_names=False + ) diff --git a/tests/unit/test_bsrt_analysis.py b/tests/unit/test_bsrt_analysis.py index 0d93f21..58315bb 100644 --- a/tests/unit/test_bsrt_analysis.py +++ b/tests/unit/test_bsrt_analysis.py @@ -17,12 +17,14 @@ BSRT_INPUTS = INPUTS_DIR / "bsrt_analysis" BASELINE_DIR = str(INPUTS_DIR / "mpl_bsrt_baseline") + def test_bsrt_df(_bsrt_df): results = bsrt_analysis.main(directory=str(BSRT_INPUTS), beam="B1") assert_frame_equal( results["bsrt_df"].sort_index(axis=1), _bsrt_df.copy().sort_index(axis=1), - check_dtype=False, check_index_type=False + check_dtype=False, + check_index_type=False, ) diff --git a/tests/unit/test_forced_da_analysis.py b/tests/unit/test_forced_da_analysis.py index 7d221f2..68936f7 100644 --- a/tests/unit/test_forced_da_analysis.py +++ b/tests/unit/test_forced_da_analysis.py @@ -53,7 +53,6 @@ def test_md3312_no_data_given(self, tmp_path): output_directory=tmp_path, ) - def test_md2162_timberdb(tmp_path): data_dir = INPUT / "kicks_horizontal_md2162" fda_analysis( @@ -73,6 +72,7 @@ def test_md2162_timberdb(tmp_path): # Helper ----------------------------------------------------------------------- + def check_output(output_dir: Path) -> None: assert len(list(output_dir.glob("*.pdf"))) == 5 assert len(list(output_dir.glob("*.tfs"))) == 4 diff --git a/tests/unit/test_lsa_to_madx.py b/tests/unit/test_lsa_to_madx.py index 0900320..967ad0c 100644 --- a/tests/unit/test_lsa_to_madx.py +++ b/tests/unit/test_lsa_to_madx.py @@ -28,32 +28,39 @@ def test_parse_knob_definition_file(self, knobs_file, parsed_definitions): class TestMADXWriting: def test_madx_script_writing_from_definition_df(self, knob_definition_df, correct_madx_script): - script = get_madx_script_from_definition_dataframe(knob_definition_df, lsa_knob="LHCBEAM/ATS_Test_Knob") + script = get_madx_script_from_definition_dataframe( + knob_definition_df, lsa_knob="LHCBEAM/ATS_Test_Knob" + ) assert script == correct_madx_script def test_invalid_madx_characters(self): assert _get_trim_variable("hel&lo!-you2") == "trim_hello_you2" - @pytest.mark.parametrize("lsa_knob", ["LHCBEAM/Super_Duper_Long_Name_For_A_Knob_Will_Be_Truncated_For_Sure", "ATS_Test_Knob"]) + @pytest.mark.parametrize( + "lsa_knob", + ["LHCBEAM/Super_Duper_Long_Name_For_A_Knob_Will_Be_Truncated_For_Sure", "ATS_Test_Knob"], + ) def test_trim_variable_from_long_knob_name(self, lsa_knob): """Testing that the generated trim variable is correctly truncated if too long.""" assert ( _get_trim_variable("ATS_2022_05_08_B1_arc_by_arc_coupling_133cm_30cm") == "trim_22_05_08_B1_arc_by_arc_coupling_133cm_30cm" ) - assert _get_trim_variable("___knob") == "trim_knob" # make sure we handle several underscores + assert ( + _get_trim_variable("___knob") == "trim_knob" + ) # make sure we handle several underscores assert len(_get_trim_variable(lsa_knob)) < 48 def test_get_deltas_fails(self, knob_definition_df): - """ Tests that get_delta fails when both columns are present and populated""" + """Tests that get_delta fails when both columns are present and populated""" knob_definition_df["DELTA_KL"] = knob_definition_df.DELTA_K with pytest.raises(ValueError) as e: _get_delta(knob_definition_df) assert "DELTA_KL and DELTA_K" in str(e) def test_get_deltas_succeeds(self, knob_definition_df): - """ Tests that get_delta succeeds when both columns are present but only one has a number, - or when only one column is present""" + """Tests that get_delta succeeds when both columns are present but only one has a number, + or when only one column is present""" deltas = _get_delta(knob_definition_df) assert (deltas == knob_definition_df.DELTA_K).all() @@ -100,4 +107,6 @@ def parsed_definitions() -> Dict[str, float]: @pytest.fixture def correct_madx_script() -> str: """Script for LHCBEAM/MD_ATS_2022_05_04_B1_RigidWaitsShift_IP1pos_knob with trim at +1""" - return (LSA_TO_MADX_INPUTS / "LHCBEAM_MD_ATS_2022_05_04_B1_RigidWaitsShift_IP1pos_knob.madx").read_text() + return ( + LSA_TO_MADX_INPUTS / "LHCBEAM_MD_ATS_2022_05_04_B1_RigidWaitsShift_IP1pos_knob.madx" + ).read_text() From 3b1c36a0799f6f8cfb8f4f7b532ef2cd66b01b8a Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:07:30 +0200 Subject: [PATCH 03/25] simple automated fixes --- doc/conf.py | 1 - pylhc/bsrt_analysis.py | 2 +- pylhc/calibration/beta.py | 6 ++-- pylhc/calibration/dispersion.py | 26 +++++++-------- pylhc/data_extract/lsa.py | 26 +++++++-------- pylhc/forced_da_analysis.py | 52 ++++++++++++++---------------- pylhc/kickgroups.py | 12 +++---- pylhc/lsa_to_madx.py | 9 ++---- pylhc/machine_settings_info.py | 25 +++++++------- tests/unit/test_bpm_calibration.py | 7 ++-- tests/unit/test_bsrt_analysis.py | 1 - tests/unit/test_lsa_to_madx.py | 8 ++--- 12 files changed, 81 insertions(+), 94 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 2bb2e4f..a973ebc 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # pyLHC documentation build configuration file, created by # sphinx-quickstart on Tue Feb 6 12:10:18 2018. diff --git a/pylhc/bsrt_analysis.py b/pylhc/bsrt_analysis.py index 313f75b..a430d91 100644 --- a/pylhc/bsrt_analysis.py +++ b/pylhc/bsrt_analysis.py @@ -23,10 +23,10 @@ import pandas as pd import parse import pytz - import tfs from generic_parser import EntryPointParameters, entrypoint from omc3.utils import logging_tools, time_tools + from pylhc.constants.general import TFS_SUFFIX, TIME_COLUMN from pylhc.forced_da_analysis import get_approximate_index diff --git a/pylhc/calibration/beta.py b/pylhc/calibration/beta.py index 6897a4d..51ca3a8 100644 --- a/pylhc/calibration/beta.py +++ b/pylhc/calibration/beta.py @@ -8,8 +8,8 @@ """ +from collections.abc import Sequence from pathlib import Path -from typing import Dict, Sequence, Tuple import numpy as np import pandas as pd @@ -131,7 +131,7 @@ def _get_factors_from_phase_fit( beta_amp_tfs: pd.DataFrame, ips: Sequence[int], plane: str, -) -> Tuple[pd.Series, pd.Series]: +) -> tuple[pd.Series, pd.Series]: """ This function computes the calibration factors for the beta method with the beta from phase fit values. The associated error is also calculated. @@ -186,7 +186,7 @@ def _get_factors_from_phase_fit( def get_calibration_factors_from_beta( ips: Sequence[int], input_path: Path -) -> Dict[str, pd.DataFrame]: +) -> dict[str, pd.DataFrame]: """ This function is the main function to compute the calibration factors for the beta method. diff --git a/pylhc/calibration/dispersion.py b/pylhc/calibration/dispersion.py index 925b1c5..352be16 100644 --- a/pylhc/calibration/dispersion.py +++ b/pylhc/calibration/dispersion.py @@ -9,36 +9,34 @@ """ from pathlib import Path -from scipy.optimize import curve_fit + import numpy as np import pandas as pd -from typing import Tuple, List, Dict - -from omc3.utils import logging_tools +import tfs from omc3.optics_measurements.constants import ( + DISPERSION_NAME, ERR, EXT, NORM_DISP_NAME, - DISPERSION_NAME, S, ) +from omc3.utils import logging_tools +from scipy.optimize import curve_fit from pylhc.constants.calibration import ( BPMS, D_BPMS, - D, LABELS, TFS_INDEX, + D, ) -import tfs - LOG = logging_tools.get_logger(__name__) def _get_dispersion_fit( positions: pd.Series, dispersion_values: pd.Series, dispersion_err: pd.Series -) -> Tuple[pd.Series, pd.Series]: +) -> tuple[pd.Series, pd.Series]: """ This function returns a fit of the given dispersion values along with the associated error. @@ -77,10 +75,10 @@ def dispersion_function(x, a, b): def _get_factors_from_dispersion( - dispersion: Dict[str, pd.Series], + dispersion: dict[str, pd.Series], phase: str, phase_err: str, -) -> Tuple[pd.Series, pd.Series]: +) -> tuple[pd.Series, pd.Series]: """ This function computes the calibration factors for the dispersion method with the non fitted dispersion values. The associated error is also @@ -114,8 +112,8 @@ def _get_factors_from_dispersion( def get_calibration_factors_from_dispersion( - ips: List[int], input_path: Path -) -> Dict[str, pd.DataFrame]: + ips: list[int], input_path: Path +) -> dict[str, pd.DataFrame]: """ This function is the main function to compute the calibration factors for the dispersion method. @@ -216,7 +214,7 @@ def get_calibration_factors_from_dispersion( factors_for_ip.columns = LABELS factors_for_ip.index.name = TFS_INDEX - if "X" not in calibration_factors.keys(): + if "X" not in calibration_factors: calibration_factors = {"X": factors_for_ip} else: calibration_factors["X"] = pd.concat( diff --git a/pylhc/data_extract/lsa.py b/pylhc/data_extract/lsa.py index c810669..342bc32 100644 --- a/pylhc/data_extract/lsa.py +++ b/pylhc/data_extract/lsa.py @@ -5,14 +5,15 @@ This module provides useful functions to conveniently wrap the functionality of ``pjlsa``. """ -import jpype import logging import re +from collections.abc import Callable + +import jpype import tfs -from jpype import java, JException +from jpype import JException, java from omc3.utils.mock import cern_network_import from omc3.utils.time_tools import AccDatetime -from typing import Callable, Union, Dict, Tuple, List LOG = logging.getLogger(__name__) pytimber = cern_network_import("pytimber") @@ -72,7 +73,7 @@ def find_knob_names(self, accelerator: str = "lhc", regexp: str = "") -> list: return sorted(filter(reg.search, [pp.getName() for pp in lst])) return sorted(pp.getName() for pp in lst) - def find_existing_knobs(self, knobs: List[str]) -> List[str]: + def find_existing_knobs(self, knobs: list[str]) -> list[str]: """ Return only the knobs that exist from the given list. This function was created out of the need to filter these first, @@ -95,7 +96,7 @@ def find_existing_knobs(self, knobs: List[str]) -> List[str]: def find_last_fill( self, acc_time: AccDatetime, accelerator: str = "lhc", source: str = "nxcals" - ) -> Tuple[str, list]: + ) -> tuple[str, list]: """ Return last fill name and BeamProcesses. @@ -128,7 +129,7 @@ def find_beamprocess_history( t_end: AccDatetime, accelerator: str = "lhc", source: str = "nxcals", - ) -> Dict: + ) -> dict: """ Finds the BeamProcesses between t_start and t_end and sorts then by fills. Adapted from pjlsa's FindBeamProcessHistory but with source pass-through @@ -226,7 +227,7 @@ def get_trim_history( ) return trims - def get_beamprocess_info(self, beamprocess: Union[str, object]) -> Dict: + def get_beamprocess_info(self, beamprocess: str | object) -> dict: """ Get context info of the given beamprocess. @@ -296,11 +297,11 @@ def get_knob_circuits(self, knob_name: str, optics: str) -> tfs.TfsDataFrame: df.headers[HEAD_INFO] = "In MAD-X it should be 'name = name + DELTA * knobValue'" knob = self._knobService.findKnob(knob_name) if knob is None: - raise IOError(f"Knob '{knob_name}' does not exist") + raise OSError(f"Knob '{knob_name}' does not exist") try: knob_settings = knob.getKnobFactors().getFactorsForOptic(optics) except jpype.java.lang.IllegalArgumentException: - raise IOError(f"Knob '{knob_name}' not available for optics '{optics}'") + raise OSError(f"Knob '{knob_name}' not available for optics '{optics}'") for knob_factor in knob_settings: factor = knob_factor.getFactor() @@ -363,8 +364,7 @@ def hooked(*args, **kwargs): return result return hooked - else: - return client_attr + return client_attr class LSA(metaclass=LSAMeta): @@ -408,9 +408,9 @@ def try_to_acquire_data(function: Callable, *args, **kwargs): try: return function(*args, **kwargs) except java.lang.IllegalStateException as e: - raise IOError("Could not acquire data, user probably has no access to NXCALS") from e + raise OSError("Could not acquire data, user probably has no access to NXCALS") from e except JException as e: # Might be a case for retries if "RetryableException" in str(e) and (tries + 1) < retries: LOG.warning(f"Could not acquire data! Trial no {tries + 1} / {retries}") continue # will go to the next iteratoin of the loop, so retry - raise IOError("Could not acquire data!") from e + raise OSError("Could not acquire data!") from e diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 788f3d2..1408f24 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -92,7 +92,6 @@ from collections import defaultdict from contextlib import suppress from pathlib import Path -from typing import Tuple import matplotlib as mpl import matplotlib.colors as mcolors @@ -106,9 +105,9 @@ import tfs from generic_parser import EntryPointParameters, entrypoint from generic_parser.entry_datatypes import ( - DictAsString, FALSE_ITEMS, TRUE_ITEMS, + DictAsString, get_instance_faker_meta, get_multi_class, ) @@ -201,14 +200,13 @@ def __new__(cls, value): if value in TRUE_ITEMS: return True - elif value in FALSE_ITEMS: + if value in FALSE_ITEMS: return False - else: - try: - return Path(value) - except TypeError: - return value + try: + return Path(value) + except TypeError: + return value def _get_pathclass(*other_classes): @@ -459,7 +457,7 @@ def _write_tfs( tfs.write(out_dir / outfile_emittance(plane), emittance_df) if emittance_bws_df is not None: tfs.write(out_dir / outfile_emittance_bws(plane), emittance_bws_df) - except (FileNotFoundError, IOError): + except (OSError, FileNotFoundError): LOG.error(f"Cannot write into directory: {str(out_dir)} ") @@ -511,7 +509,7 @@ def _drop_duplicate_indices(df): def _get_dataframes( kick_times: pd.Index, opt: DotDict -) -> Tuple[TfsDataFrame, TfsDataFrame, TfsDataFrame]: +) -> tuple[TfsDataFrame, TfsDataFrame, TfsDataFrame]: """Gets the intensity and emittance dataframes from either input, files or (timber) database.""" db = _get_db(opt) @@ -552,7 +550,7 @@ def _read_tfs(tfs_file_or_path, timespan): """Read previously gathered data (see :meth:`pylhc.forced_da_analysis._write_tfs`).""" try: tfs_df = tfs.read_tfs(tfs_file_or_path, index=TIME_COLUMN) - except IOError: + except OSError: tfs_df = tfs_file_or_path # hopefully tfs_df.index = _convert_time_index(tfs_df.index) @@ -610,7 +608,7 @@ def _get_db(opt): LOG.debug(f"Loading database from file {str(db_path)}") db = PageStore(f"file:{str(db_path)}", str(db_path.with_suffix(""))) if opt.fill is not None: - raise EnvironmentError("'fill' can't be used with pagestore database.") + raise OSError("'fill' can't be used with pagestore database.") else: LOG.debug(" Trying to load database from timber.") try: @@ -632,7 +630,7 @@ def _get_db(opt): error_msg += ( "but there is no database given and no access to timber databases. Aborting." ) - raise EnvironmentError(error_msg) + raise OSError(error_msg) return db @@ -658,7 +656,7 @@ def _get_bctrf_beam_intensity_from_timber(beam, db, timespan): def _get_bsrt_bunch_emittances_from_timber(beam, planes, db, timespan, key_type, nominal_emittance): - dfs = {p: None for p in planes} + dfs = dict.fromkeys(planes) for plane in planes: LOG.debug(f"Getting emittance from BSRT for beam {beam} and plane {plane}.") bunch_emittance_key = bsrt_emittance_key(beam, plane, key_type) @@ -703,7 +701,7 @@ def _get_bsrt_bunch_emittances_from_timber(beam, planes, db, timespan, key_type, def _get_bws_emittances_from_timber(beam, planes, db, timespan): - dfs = {p: None for p in planes} + dfs = dict.fromkeys(planes) for plane in planes: LOG.debug(f"Getting emittance from BWS for beam {beam} and plane {plane}.") all_columns = [column_bws_norm_emittance(plane, d) for d in BWS_DIRECTIONS] @@ -781,7 +779,7 @@ def _get_old_kick_file(kick_dir, plane): def _get_new_kick_file(kick_dir, planes): """Kick files from ``omc3``.""" - dfs = {p: None for p in planes} + dfs = dict.fromkeys(planes) for plane in planes: path = kick_dir / f"{KICKFILE}_{plane.lower()}{TFS_SUFFIX}" LOG.debug(f"Reading kickfile '{str(path)}'.'") @@ -1107,8 +1105,8 @@ def _plot_intensity(directory, beam, plane, kick_df, intensity_df): ax.text( _date2num(kick), 0.5 * sum(normalized_intensity.loc[kick, :]), - " -{:.1f}$\pm${:.1f} %\n".format(*normalized_losses.loc[kick, :]) - + " (-{:.1f}$\pm${:.1f} %)".format(*normalized_losses_kick.loc[kick, :]), + " -{:.1f}$\\pm${:.1f} %\n".format(*normalized_losses.loc[kick, :]) + + r" (-{:.1f}$\pm${:.1f} %)".format(*normalized_losses_kick.loc[kick, :]), va="bottom", color=colors.get_mpl_color(1), fontdict=dict(fontsize=mpl.rcParams["font.size"] * 0.8), @@ -1123,7 +1121,7 @@ def _plot_intensity(directory, beam, plane, kick_df, intensity_df): plt.tight_layout() annotations.set_name(f"Intensity Beam {beam}, Plane {plane}", fig) annotations.set_annotation( - f"Intensity at 100%: {intensity_start * 1e-10:.3f}$\;\cdot\;10^{{{{10}}}}$ charges", + rf"Intensity at 100%: {intensity_start * 1e-10:.3f}$\;\cdot\;10^{{{{10}}}}$ charges", ax=ax, position="left", ) @@ -1240,7 +1238,7 @@ def _plot_da_fit(directory, beam, plane, k_df, fit_type): emittance = kick_df[col_emittance] da, da_err = kick_df.headers[header_da(plane)], kick_df.headers[header_da_error(plane)] da_mu, da_err_mu = significant_digits(da * 1e6, da_err * 1e6) - da_label = f"Fit: DA$_J$= ${da_mu} \pm {da_err_mu} \mu m$" + da_label = rf"Fit: DA$_J$= ${da_mu} \pm {da_err_mu} \mu m$" if fit_type == "linear": fit_fun = fun_linear @@ -1256,7 +1254,7 @@ def _plot_da_fit(directory, beam, plane, k_df, fit_type): kick_df.headers[header_da_error(plane, unit="sigma")], ) da_round, da_err_round = significant_digits(da, da_err) - da_label = f"Fit: DA= ${da_round} \pm {da_err_round} N_{{\sigma}}$" + da_label = rf"Fit: DA= ${da_round} \pm {da_err_round} N_{{\sigma}}$" fit_fun = fun_exp_sigma fit_data = action multiplier = 100 # for percentages @@ -1280,7 +1278,7 @@ def _plot_da_fit(directory, beam, plane, k_df, fit_type): da_string = "2DA$_J$" elif fit_type == "norm": da_x = da - da_string = "DA$_\sigma$" + da_string = r"DA$_\sigma$" if action_max < da: if fit_type in ["linear", "exponential"]: @@ -1329,18 +1327,18 @@ def _plot_da_fit(directory, beam, plane, k_df, fit_type): x=0, y=1.00, s=( - f"$\epsilon_{{mean}}$ = {emittance_sign} $\pm$ {emittance_sign_std} pm " - f"($\epsilon_{{nominal}}$ = {nominal_emittance * 1e12: .2f} pm)" + rf"$\epsilon_{{mean}}$ = {emittance_sign} $\pm$ {emittance_sign_std} pm " + rf"($\epsilon_{{nominal}}$ = {nominal_emittance * 1e12: .2f} pm)" ), transform=ax.transAxes, va="bottom", ha="left", ) ax.set_xlabel( - f"$N_{{\sigma}} = \sqrt{{2J_{{{plane if len(plane) == 1 else ''}}}/\epsilon}}$" + rf"$N_{{\sigma}} = \sqrt{{2J_{{{plane if len(plane) == 1 else ''}}}/\epsilon}}$" ) else: - ax.set_xlabel(f"$2J_{{{plane if len(plane) == 1 else ''}}} \; [\mu m]$") + ax.set_xlabel(rf"$2J_{{{plane if len(plane) == 1 else ''}}} \; [\mu m]$") if fit_type == "linear": ax.set_ylabel(r"ln($I/I_0$)") @@ -1446,7 +1444,7 @@ def _save_fig(directory, plane, fig, ptype): path = os.path.join(directory, outfile_plot(ptype, plane, ftype)) LOG.debug(f"Saving Figure to {path}") fig.savefig(path) - except IOError: + except OSError: LOG.error(f"Couldn't create output files for {ptype} plots.") diff --git a/pylhc/kickgroups.py b/pylhc/kickgroups.py index 27e2de3..77b88c8 100644 --- a/pylhc/kickgroups.py +++ b/pylhc/kickgroups.py @@ -59,13 +59,11 @@ import argparse import json - from datetime import datetime from pathlib import Path import numpy as np import pandas as pd - from dateutil import tz from omc3.utils import logging_tools from pandas import DataFrame @@ -253,9 +251,9 @@ def load_kickfile(kickfile: Path | str) -> pd.Series: for plane in ["X", "Y"]: tune, driven_tune, amp = entry_map[plane] - data[tune] = np.NaN - data[driven_tune] = np.NaN - data[amp] = np.NaN + data[tune] = np.nan + data[driven_tune] = np.nan + data[amp] = np.nan try: idx = _get_plane_index(kick["excitationSettings"], plane) @@ -273,8 +271,8 @@ def load_kickfile(kickfile: Path | str) -> pd.Series: data[driven_tune] = data[tune] + _get_delta_tune(kick, idx) data[amp] = kick["excitationSettings"][idx]["amplitude"] - data[DRIVEN_TUNEZ] = np.NaN - data[AMPZ] = np.NaN + data[DRIVEN_TUNEZ] = np.nan + data[AMPZ] = np.nan return data diff --git a/pylhc/lsa_to_madx.py b/pylhc/lsa_to_madx.py index 78a7665..0aab7a5 100644 --- a/pylhc/lsa_to_madx.py +++ b/pylhc/lsa_to_madx.py @@ -96,14 +96,11 @@ import argparse import re import string - from pathlib import Path -from typing import Dict import numpy as np import pandas as pd import tfs - from omc3.utils import logging_tools from omc3.utils.contexts import timeit @@ -115,7 +112,7 @@ # ----- Helper functions ----- # -def parse_knobs_and_trim_values_from_file(knobs_file: Path) -> Dict[str, float]: +def parse_knobs_and_trim_values_from_file(knobs_file: Path) -> dict[str, float]: """ Parses a file for LSA knobs and their trim values. Each line should be a knob name following by a number of the trim value. If no value is written, it defaults @@ -349,7 +346,7 @@ def main(): LOG.info(f"Loading knob names from file '{options.file}'") knobs_dict = parse_knobs_and_trim_values_from_file(Path(options.file)) else: # given at the command line with --knobs, we initialise trim values to 1 - knobs_dict = {knob: 1.0 for knob in options.knobs} + knobs_dict = dict.fromkeys(options.knobs, 1.0) LOG.info("Instantiating LSA client") lsa_client = LSAClient() @@ -366,7 +363,7 @@ def main(): deltas_df=knob_definition, lsa_knob=lsa_knob, trim=trim_value ) - except (OSError, IOError): # raised by pjlsa if knob not found + except OSError: # raised by pjlsa if knob not found LOG.warning( f"Could not find knob '{lsa_knob}' in the given optics '{lsa_optics}' - skipping" ) diff --git a/pylhc/machine_settings_info.py b/pylhc/machine_settings_info.py index f4a249a..58126a2 100644 --- a/pylhc/machine_settings_info.py +++ b/pylhc/machine_settings_info.py @@ -49,19 +49,20 @@ """ from collections import OrderedDict, namedtuple +from collections.abc import Iterable +from pathlib import Path import tfs from generic_parser import DotDict, EntryPointParameters, entrypoint from generic_parser.entry_datatypes import get_instance_faker_meta +from omc3.knob_extractor import KNOB_CATEGORIES, name2lsa from omc3.utils import logging_tools from omc3.utils.iotools import PathOrStr from omc3.utils.time_tools import AccDatetime, AcceleratorDatetime -from pathlib import Path -from typing import Tuple, Iterable, Dict, Union -from omc3.knob_extractor import name2lsa, KNOB_CATEGORIES from pylhc.constants import machine_settings_info as const -from pylhc.data_extract.lsa import COL_NAME as LSA_COLUMN_NAME, LSA +from pylhc.data_extract.lsa import COL_NAME as LSA_COLUMN_NAME +from pylhc.data_extract.lsa import LSA LOG = logging_tools.get_logger(__name__) @@ -125,7 +126,7 @@ def _get_params() -> dict: @entrypoint(_get_params(), strict=True) -def get_info(opt) -> Dict[str, object]: +def get_info(opt) -> dict[str, object]: """ Get info about **Beamprocess**, **Optics** and **Knobs** at given time. @@ -287,7 +288,7 @@ def log_summary( acc_time: AccDatetime, bp_info: DotDict, optics_info: DotDict = None, - trims: Dict[str, float] = None, + trims: dict[str, float] = None, ): """Log the summary. @@ -327,7 +328,7 @@ def write_summary( acc_time: AccDatetime, bp_info: DotDict, optics_info: DotDict = None, - trims: Dict[str, float] = None, + trims: dict[str, float] = None, ): """Write summary into a ``tfs`` file. @@ -376,7 +377,7 @@ def write_knob_defitions(output_path: Path, definitions: dict): def write_trim_histories( output_path: Path, - trim_histories: Dict[str, namedtuple], + trim_histories: dict[str, namedtuple], accel: str, acc_time: AccDatetime = None, acc_start_time: AccDatetime = None, @@ -468,7 +469,7 @@ def _get_beamprocess( def _get_beamprocess_start( - beamprocesses: Iterable[Tuple[float, str]], acc_time: AccDatetime, bp_name: str + beamprocesses: Iterable[tuple[float, str]], acc_time: AccDatetime, bp_name: str ) -> AccDatetime: """ Get the last beamprocess in the list of beamprocesses before dt_utc. @@ -519,7 +520,7 @@ def _get_knob_definitions(knobs: list, optics: str): for knob in knobs: try: defs[knob] = LSA.get_knob_circuits(knob, optics) - except IOError as e: + except OSError as e: LOG.warning(e.args[0]) return defs @@ -534,7 +535,7 @@ def _get_last_trim(trims: dict) -> dict: Dictionary of knob names and their values. """ LOG.debug("Extracting last trim from found trim histories.") - trim_dict = {trim: trims[trim].data[-1] for trim in trims.keys()} # return last set value + trim_dict = {trim: trims[trim].data[-1] for trim in trims} # return last set value for trim, value in trim_dict.items(): try: trim_dict[trim] = value.flatten()[-1] # the very last entry ... @@ -548,7 +549,7 @@ def _get_last_trim(trims: dict) -> dict: # Other ######################################################################## -def _get_times(time: Union[str, AccDatetime], start_time: Union[str, AccDatetime], accel: str): +def _get_times(time: str | AccDatetime, start_time: str | AccDatetime, accel: str): """Returns acc_time and acc_start_time parameters depending on the user input.""" acc_dt = AcceleratorDatetime[accel] diff --git a/tests/unit/test_bpm_calibration.py b/tests/unit/test_bpm_calibration.py index ec0e23b..35d95d4 100644 --- a/tests/unit/test_bpm_calibration.py +++ b/tests/unit/test_bpm_calibration.py @@ -1,12 +1,13 @@ +import pathlib from pathlib import Path + import numpy as np import pandas as pd -import pathlib import pytest -from pandas.testing import assert_series_equal, assert_frame_equal - import tfs from generic_parser.dict_parser import ArgumentError +from pandas.testing import assert_frame_equal, assert_series_equal + from pylhc import bpm_calibration as calibration from pylhc.constants.calibration import BPMS diff --git a/tests/unit/test_bsrt_analysis.py b/tests/unit/test_bsrt_analysis.py index 58315bb..b4fd3b9 100644 --- a/tests/unit/test_bsrt_analysis.py +++ b/tests/unit/test_bsrt_analysis.py @@ -1,6 +1,5 @@ from ast import literal_eval from pathlib import Path -import sys import matplotlib import numpy as np diff --git a/tests/unit/test_lsa_to_madx.py b/tests/unit/test_lsa_to_madx.py index 967ad0c..cd40661 100644 --- a/tests/unit/test_lsa_to_madx.py +++ b/tests/unit/test_lsa_to_madx.py @@ -1,19 +1,15 @@ import json - from pathlib import Path -from typing import Dict -import numpy as np import pytest import tfs - from pandas._testing import assert_dict_equal from pylhc.lsa_to_madx import ( + _get_delta, _get_trim_variable, get_madx_script_from_definition_dataframe, parse_knobs_and_trim_values_from_file, - _get_delta, ) INPUTS_DIR = Path(__file__).parent.parent / "inputs" @@ -98,7 +94,7 @@ def knob_definition_df() -> tfs.TfsDataFrame: @pytest.fixture() -def parsed_definitions() -> Dict[str, float]: +def parsed_definitions() -> dict[str, float]: with (LSA_TO_MADX_INPUTS / "parsed_definitions.json").open("r") as f: defs = json.load(f) return defs From cbcfb1deb026a5a4d115a5c74dec5793c7f2fe73 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:11:23 +0200 Subject: [PATCH 04/25] need self as first arg --- tests/unit/test_forced_da_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_forced_da_analysis.py b/tests/unit/test_forced_da_analysis.py index 68936f7..ab81c27 100644 --- a/tests/unit/test_forced_da_analysis.py +++ b/tests/unit/test_forced_da_analysis.py @@ -53,7 +53,7 @@ def test_md3312_no_data_given(self, tmp_path): output_directory=tmp_path, ) - def test_md2162_timberdb(tmp_path): + def test_md2162_timberdb(self, tmp_path): data_dir = INPUT / "kicks_horizontal_md2162" fda_analysis( fit="linear", From 58447c7b846832cdb54bcbe2fdf7e4820adf450a Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:11:53 +0200 Subject: [PATCH 05/25] matplotlib as mpl --- tests/unit/test_bsrt_analysis.py | 4 ++-- tests/unit/test_forced_da_analysis.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_bsrt_analysis.py b/tests/unit/test_bsrt_analysis.py index b4fd3b9..1d3d62c 100644 --- a/tests/unit/test_bsrt_analysis.py +++ b/tests/unit/test_bsrt_analysis.py @@ -1,7 +1,7 @@ from ast import literal_eval from pathlib import Path -import matplotlib +import matplotlib as mpl import numpy as np import pandas as pd import pytest @@ -10,7 +10,7 @@ from pylhc import bsrt_analysis # Forcing non-interactive Agg backend so rendering is done similarly across platforms during tests -matplotlib.use("Agg") +mpl.use("Agg") INPUTS_DIR = Path(__file__).parent.parent / "inputs" BSRT_INPUTS = INPUTS_DIR / "bsrt_analysis" diff --git a/tests/unit/test_forced_da_analysis.py b/tests/unit/test_forced_da_analysis.py index ab81c27..034bc53 100644 --- a/tests/unit/test_forced_da_analysis.py +++ b/tests/unit/test_forced_da_analysis.py @@ -1,12 +1,12 @@ from pathlib import Path -import matplotlib +import matplotlib as mpl import pytest from pylhc.forced_da_analysis import main as fda_analysis # Forcing non-interactive Agg backend so rendering is done similarly across platforms during tests -matplotlib.use("Agg") +mpl.use("Agg") INPUT = Path(__file__).parent.parent / "inputs" From aca5deeb4f9b9459490496962e874284ded465b5 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:13:12 +0200 Subject: [PATCH 06/25] no assignment before returning --- pylhc/bsrt_analysis.py | 3 +-- pylhc/bsrt_logger.py | 6 ++---- pylhc/forced_da_analysis.py | 6 ++---- tests/unit/test_lsa_to_madx.py | 3 +-- 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/pylhc/bsrt_analysis.py b/pylhc/bsrt_analysis.py index a430d91..4c074b4 100644 --- a/pylhc/bsrt_analysis.py +++ b/pylhc/bsrt_analysis.py @@ -176,8 +176,7 @@ def _load_files_in_df(opt): ) files_df = files_df.assign(TIME=[f.timestamp() for f in files_df["TIMESTAMP"]]) - files_df = files_df.sort_values(by=["TIME"]).reset_index(drop=True).set_index("TIME") - return files_df + return files_df.sort_values(by=["TIME"]).reset_index(drop=True).set_index("TIME") def _get_timestamp_from_name(name, formatstring): diff --git a/pylhc/bsrt_logger.py b/pylhc/bsrt_logger.py index 8884470..f2eb102 100644 --- a/pylhc/bsrt_logger.py +++ b/pylhc/bsrt_logger.py @@ -35,8 +35,7 @@ def parse_timestamp(thistime): ] for fmat in accepted_time_input_format: try: - dtobject = dt.datetime.strptime(thistime, fmat) - return dtobject + return dt.datetime.strptime(thistime, fmat) except ValueError: pass timefmatstring = "" @@ -54,8 +53,7 @@ def parse_timestamp(thistime): # function to help write output from datetime objects in standard format throughout code def convert_to_data_output_format(dtobject): - output_timestamp = dtobject.strftime(formats.TIME) - return output_timestamp + return dtobject.strftime(formats.TIME) ########################################## diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 1408f24..172dadb 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -582,13 +582,12 @@ def _filter_emittance_data(df, planes, window_length, limit): df.headers[HEADER_BSRT_ROLLING_WINDOW] = window_length df.headers[HEADER_BSRT_OUTLIER_LIMIT] = limit df = _maybe_add_sum_for_planes(df, planes, column_norm_emittance) - df = _maybe_add_sum_for_planes( + return _maybe_add_sum_for_planes( df, planes, lambda p: mean_col(column_norm_emittance(p)), lambda p: err_col(mean_col(column_norm_emittance(p))), ) - return df # Timber Data ------------------------------------------------------------------ @@ -815,8 +814,7 @@ def _add_intensity_and_losses_to_kicks(kick_df, intensity_df, time_before, time_ new_columns = [col for col in col_list + [err_col(c) for c in col_list]] kick_df = kick_df.reindex(columns=kick_df.columns.tolist() + new_columns) kick_df = _get_intensities_around_kicks(kick_df, intensity_df, time_before, time_after) - kick_df = _calculate_intensity_losses_at_kicks(kick_df) - return kick_df + return _calculate_intensity_losses_at_kicks(kick_df) def _get_intensities_around_kicks(kick_df, intensity_df, time_before, time_after): diff --git a/tests/unit/test_lsa_to_madx.py b/tests/unit/test_lsa_to_madx.py index cd40661..1de4c6b 100644 --- a/tests/unit/test_lsa_to_madx.py +++ b/tests/unit/test_lsa_to_madx.py @@ -96,8 +96,7 @@ def knob_definition_df() -> tfs.TfsDataFrame: @pytest.fixture() def parsed_definitions() -> dict[str, float]: with (LSA_TO_MADX_INPUTS / "parsed_definitions.json").open("r") as f: - defs = json.load(f) - return defs + return json.load(f) @pytest.fixture From 9085e1d8eeac548b28ad75693c10cf2e8c1b83e2 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:16:07 +0200 Subject: [PATCH 07/25] do not assign to variables that will be unused --- pylhc/calibration/beta.py | 4 ++-- pylhc/calibration/dispersion.py | 2 +- pylhc/data_extract/lsa.py | 1 - tests/unit/test_bpm_calibration.py | 6 ++---- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/pylhc/calibration/beta.py b/pylhc/calibration/beta.py index 51ca3a8..bf63c28 100644 --- a/pylhc/calibration/beta.py +++ b/pylhc/calibration/beta.py @@ -82,7 +82,7 @@ def err_function(x, popt, pcov): ) # Get the error from the covariance matrix - perr = np.sqrt(np.diag(pcov)) + _perr = np.sqrt(np.diag(pcov)) # not used? # Get the fitted beta and add the errors to get min/max values beta_fit = beta_function(positions[valid], *popt) @@ -151,7 +151,7 @@ def _get_factors_from_phase_fit( # Get the beam concerned by those tfs files beam = int(beta_phase_tfs.iloc[0].name[-1]) - calibration_phase_fit, calibration_phase_fit_err = None, None + calibration_phase_fit, _calibration_phase_fit_err = None, None for ip in ips: LOG.info(f" Computing the calibration factors from phase fit for IP {ip}") diff --git a/pylhc/calibration/dispersion.py b/pylhc/calibration/dispersion.py index 352be16..6ae43a0 100644 --- a/pylhc/calibration/dispersion.py +++ b/pylhc/calibration/dispersion.py @@ -177,7 +177,7 @@ def get_calibration_factors_from_dispersion( # Get the dispersion and dispersion from phase from the tfs files dispersion = dict() - normalised_dispersion = dict() + # normalised_dispersion = dict() dispersion["amp"] = dispersion_tfs.reindex(bpms)["DX"] dispersion["amp_err"] = dispersion_tfs.reindex(bpms)[f"{ERR}{D}X"] diff --git a/pylhc/data_extract/lsa.py b/pylhc/data_extract/lsa.py index 342bc32..02ca559 100644 --- a/pylhc/data_extract/lsa.py +++ b/pylhc/data_extract/lsa.py @@ -352,7 +352,6 @@ def __getattr__(cls, attr): def hooked(*args, **kwargs): result = client_attr(*args, **kwargs) - result_is_self = False try: if result == cls._client: # prevent client from becoming unwrapped diff --git a/tests/unit/test_bpm_calibration.py b/tests/unit/test_bpm_calibration.py index 35d95d4..533ce09 100644 --- a/tests/unit/test_bpm_calibration.py +++ b/tests/unit/test_bpm_calibration.py @@ -17,9 +17,7 @@ def test_calibration_same_betabeat(tmp_path): - factors = calibration.main( - inputdir=MEASUREMENTS / "for_beta", outputdir=tmp_path, ips=[1, 4, 5] - ) + calibration.main(inputdir=MEASUREMENTS / "for_beta", outputdir=tmp_path, ips=[1, 4, 5]) # Let's open the tfs files we just created x_tfs = tfs.read(tmp_path / "calibration_beta_x.tfs", index="NAME") @@ -72,7 +70,7 @@ def test_wrong_ip(tmp_path): def test_calibration_same_dispersion(tmp_path): - factors = calibration.main( + calibration.main( inputdir=MEASUREMENTS / "for_dispersion", outputdir=tmp_path, method="dispersion", From f3f83ae293d1b23ab9e4bd0a2b780dde8846ec02 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:17:19 +0200 Subject: [PATCH 08/25] ok noqa --- pylhc/machine_settings_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylhc/machine_settings_info.py b/pylhc/machine_settings_info.py index 58126a2..b0cf1a4 100644 --- a/pylhc/machine_settings_info.py +++ b/pylhc/machine_settings_info.py @@ -397,7 +397,7 @@ def write_trim_histories( bp_info (DotDict): BeamProcess Info Dictionary optics_info (DotDict): Optics Info Dictionary """ - AccDT = AcceleratorDatetime[accel] + AccDT = AcceleratorDatetime[accel] # noqa: N806 # Create headers with basic info --- headers = OrderedDict([("Hint:", "All times are given in UTC."), (const.head_accel, accel)]) From 7c1ea703564dd70bfe7e0ea109796a02bdcb2930 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:18:06 +0200 Subject: [PATCH 09/25] use dict literals --- pylhc/bpm_calibration.py | 40 +++--- pylhc/bsrt_analysis.py | 75 ++++++----- pylhc/calibration/beta.py | 2 +- pylhc/calibration/dispersion.py | 4 +- pylhc/constants/general.py | 15 ++- pylhc/forced_da_analysis.py | 230 ++++++++++++++++---------------- pylhc/machine_settings_info.py | 56 ++++---- 7 files changed, 220 insertions(+), 202 deletions(-) diff --git a/pylhc/bpm_calibration.py b/pylhc/bpm_calibration.py index cafbd85..745a848 100644 --- a/pylhc/bpm_calibration.py +++ b/pylhc/bpm_calibration.py @@ -75,29 +75,29 @@ def _get_params() -> dict: """ return EntryPointParameters( - inputdir=dict(type=Path, required=True, help="Measurements path."), - outputdir=dict( - type=Path, - required=True, - help="Output directory where to write the calibration factors.", - ), - ips=dict( - type=int, - nargs="+", - choices=IPS, - required=False, - help="IPs to compute calibration factors for.", - ), - method=dict( - type=str, - required=False, - choices=METHODS, - default=METHODS[0], - help=( + inputdir={"type": Path, "required": True, "help": "Measurements path."}, + outputdir={ + "type": Path, + "required": True, + "help": "Output directory where to write the calibration factors.", + }, + ips={ + "type": int, + "nargs": "+", + "choices": IPS, + "required": False, + "help": "IPs to compute calibration factors for.", + }, + method={ + "type": str, + "required": False, + "choices": METHODS, + "default": METHODS[0], + "help": ( "Method to be used to compute the calibration factors. " "The Beta function is used by default." ), - ), + }, ) diff --git a/pylhc/bsrt_analysis.py b/pylhc/bsrt_analysis.py index 4c074b4..3edec64 100644 --- a/pylhc/bsrt_analysis.py +++ b/pylhc/bsrt_analysis.py @@ -39,47 +39,52 @@ def get_params(): return EntryPointParameters( - directory=dict( - flags=["-d", "--directory"], - required=True, - type=str, - help="Directory containing the logged BSRT files.", - ), - beam=dict( - flags=["-b", "--beam"], - required=True, - choices=["B1", "B2"], - type=str, - help="Beam for which analysis is performed.", - ), - outputdir=dict( - flags=["-o", "--outputdir"], - type=str, - default=None, - help=( + directory={ + "flags": ["-d", "--directory"], + "required": True, + "type": str, + "help": "Directory containing the logged BSRT files.", + }, + beam={ + "flags": ["-b", "--beam"], + "required": True, + "choices": ["B1", "B2"], + "type": str, + "help": "Beam for which analysis is performed.", + }, + outputdir={ + "flags": ["-o", "--outputdir"], + "type": str, + "default": None, + "help": ( "Directory in which plots and dataframe will be saved in. If omitted, " "no data will be saved." ), - ), - starttime=dict( - flags=["--starttime"], - type=int, - help="Start of time window for analysis in milliseconds UTC.", - ), - endtime=dict( - flags=["--endtime"], - type=int, - help="End of time window for analysis in milliseconds UTC.", - ), - kick_df=dict( - flags=["--kick_df"], - default=None, - help=( + }, + starttime={ + "flags": ["--starttime"], + "type": int, + "help": "Start of time window for analysis in milliseconds UTC.", + }, + endtime={ + "flags": ["--endtime"], + "type": int, + "help": "End of time window for analysis in milliseconds UTC.", + }, + kick_df={ + "flags": ["--kick_df"], + "default": None, + "help": ( f"TFS with column {TIME_COLUMN} with time stamps to be added in the plots. " f"Additionally, cross section at these timestamps will be plotted.", ), - ), - show_plots=dict(flags=["--show_plots"], type=bool, default=False, help="Show BSRT plots."), + }, + show_plots={ + "flags": ["--show_plots"], + "type": bool, + "default": False, + "help": "Show BSRT plots.", + }, ) diff --git a/pylhc/calibration/beta.py b/pylhc/calibration/beta.py index bf63c28..2ef0c14 100644 --- a/pylhc/calibration/beta.py +++ b/pylhc/calibration/beta.py @@ -217,7 +217,7 @@ def get_calibration_factors_from_beta( """ LOG.info("Computing the calibration factors via beta") # Loop over each plane and compute the calibration factors - calibration_factors = dict() + calibration_factors = {} for plane in PLANES: LOG.info(f" Computing the calibration factors for plane {plane}") diff --git a/pylhc/calibration/dispersion.py b/pylhc/calibration/dispersion.py index 6ae43a0..0cc52b1 100644 --- a/pylhc/calibration/dispersion.py +++ b/pylhc/calibration/dispersion.py @@ -152,7 +152,7 @@ def get_calibration_factors_from_dispersion( beam = int(dispersion_tfs.iloc[0].name[-1]) # Loop over the IPs and compute the calibration factors - calibration_factors = dict() + calibration_factors = {} for ip in ips: LOG.info(f" Computing the calibration factors for IP {ip}, plane X") # Filter our TFS files to only keep the BPMs for the selected IR @@ -176,7 +176,7 @@ def get_calibration_factors_from_dispersion( positions_fit = dispersion_tfs.reindex(d_bpms)[S] # Get the dispersion and dispersion from phase from the tfs files - dispersion = dict() + dispersion = {} # normalised_dispersion = dict() dispersion["amp"] = dispersion_tfs.reindex(bpms)["DX"] diff --git a/pylhc/constants/general.py b/pylhc/constants/general.py index 83425b6..181e8dd 100644 --- a/pylhc/constants/general.py +++ b/pylhc/constants/general.py @@ -10,9 +10,18 @@ BEAMS = (1, 2) PLANES = ("X", "Y") -PLANE_TO_HV = dict(X="H", Y="V") - -UNIT_TO_M = dict(km=1e3, m=1e0, mm=1e-3, um=1e-6, nm=1e-9, pm=1e-12, fm=1e-15, am=1e-18) +PLANE_TO_HV = {"X": "H", "Y": "V"} + +UNIT_TO_M = { + "km": 1e3, + "m": 1e0, + "mm": 1e-3, + "um": 1e-6, + "nm": 1e-9, + "pm": 1e-12, + "fm": 1e-15, + "am": 1e-18, +} PROTON_MASS = 0.938272 # GeV/c^2 LHC_NOMINAL_EMITTANCE = 3.75 * 1e-6 # Design LHC diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 172dadb..abbd6fd 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -232,132 +232,136 @@ def __new__(cls, value): def get_params(): return EntryPointParameters( - kick_directory=dict( - flags=["-k", "--kickdir"], - required=True, - type=PathOrString, - help="Analysis kick_directory containing kick files.", - ), - output_directory=dict( - flags=["-o", "--outdir"], - type=PathOrString, - help="Output kick_directory, if not given subfolder in kick kick_directory", - ), - energy=dict( - flags=["-e", "--energy"], - required=True, - type=get_multi_class(float, int), - help="Beam energy in GeV.", - ), - fill=dict( - flags=["-f", "--fill"], - type=get_multi_class(int, type(None)), - help="Fill that was used. If not given, check out time_around_kicks.", - ), - beam=dict( - flags=["-b", "--beam"], required=True, choices=[1, 2], type=int, help="Beam to use." - ), - plane=dict( - flags=["-p", "--plane"], - choices=["X", "Y"], - required=True, - type=str, - help=( + kick_directory={ + "flags": ["-k", "--kickdir"], + "required": True, + "type": PathOrString, + "help": "Analysis kick_directory containing kick files.", + }, + output_directory={ + "flags": ["-o", "--outdir"], + "type": PathOrString, + "help": "Output kick_directory, if not given subfolder in kick kick_directory", + }, + energy={ + "flags": ["-e", "--energy"], + "required": True, + "type": get_multi_class(float, int), + "help": "Beam energy in GeV.", + }, + fill={ + "flags": ["-f", "--fill"], + "type": get_multi_class(int, type(None)), + "help": "Fill that was used. If not given, check out time_around_kicks.", + }, + beam={ + "flags": ["-b", "--beam"], + "required": True, + "choices": [1, 2], + "type": int, + "help": "Beam to use.", + }, + plane={ + "flags": ["-p", "--plane"], + "choices": ["X", "Y"], + "required": True, + "type": str, + "help": ( "Plane of the kicks." # " Give 'XY' for using both planes (e.g. diagonal kicks)." # Future release ), - ), - time_around_kicks=dict( - type=int, - default=TIME_AROUND_KICKS_MIN, - help=( + }, + time_around_kicks={ + "type": int, + "default": TIME_AROUND_KICKS_MIN, + "help": ( "If no fill is given, this defines the time (in minutes) " "when data before the first and after the last kick is extracted." ), - ), - intensity_time_before_kick=dict( - type=int, - nargs=2, - default=TIME_BEFORE_KICK_S, - help=( + }, + intensity_time_before_kick={ + "type": int, + "nargs": 2, + "default": TIME_BEFORE_KICK_S, + "help": ( "Defines the times before the kicks (in seconds) " "which is used for intensity averaging to calculate the losses." ), - ), - intensity_time_after_kick=dict( - type=int, - nargs=2, - default=TIME_AFTER_KICK_S, - help=( + }, + intensity_time_after_kick={ + "type": int, + "nargs": 2, + "default": TIME_AFTER_KICK_S, + "help": ( "Defines the times after the kicks (in seconds) " "which is used for intensity averaging to calculate the losses." ), - ), - normalized_emittance=dict( - type=float, - default=LHC_NOMINAL_EMITTANCE, - help="Assumed NORMALIZED nominal emittance for the machine.", - ), - emittance_tfs=dict( - type=PathOrDataframe, - help="Dataframe or Path of pre-saved emittance tfs.", - ), - intensity_tfs=dict( - type=PathOrDataframe, - help="Dataframe or Path of pre-saved intensity tfs.", - ), - show_wirescan_emittance=dict( - default=False, - type=BoolOrPathOrDataFrame, - help=( + }, + normalized_emittance={ + "type": float, + "default": LHC_NOMINAL_EMITTANCE, + "help": "Assumed NORMALIZED nominal emittance for the machine.", + }, + emittance_tfs={ + "type": PathOrDataframe, + "help": "Dataframe or Path of pre-saved emittance tfs.", + }, + intensity_tfs={ + "type": PathOrDataframe, + "help": "Dataframe or Path of pre-saved intensity tfs.", + }, + show_wirescan_emittance={ + "default": False, + "type": BoolOrPathOrDataFrame, + "help": ( "Flag if the emittance from wirescan should also be shown, " "can also be a Dataframe or Path of pre-saved emittance bws tfs." ), - ), - timber_db=dict( - type=str, - default="all", - choices=["all", "mdb", "ldb", "nxcals"], - help="Which timber database to use.", - ), - pagestore_db=dict(type=PathOrPagestore, help="(Path to-) presaved timber database"), - fit=dict( - type=str, - default="exponential", - choices=["exponential", "linear"], - help="Fitting function to use (rearranges parameters to make sense).", - ), - emittance_window_length=dict( - help="Length of the moving average window. (# data points)", - type=int, - default=ROLLING_AVERAGE_WINDOW, - ), - emittance_outlier_limit=dict( - help="Limit, i.e. cut from mean, on emittance outliers in meter.", - type=float, - default=OUTLIER_LIMIT, - ), - emittance_type=dict( - type=str, - default="average", - choices=["fit_sigma", "average"], - help="Which BSRT data to use (from database).", - ), - show=dict( - action="store_true", - help="Show plots.", - ), - plot_styles=dict( - type=str, - nargs="+", - default=["standard"], - help="Which plotting styles to use, either from omc3 styles or default mpl.", - ), - manual_style=dict( - type=DictAsString, - default={}, - help="Additional style rcParameters which update the set of predefined ones.", - ), + }, + timber_db={ + "type": str, + "default": "all", + "choices": ["all", "mdb", "ldb", "nxcals"], + "help": "Which timber database to use.", + }, + pagestore_db={"type": PathOrPagestore, "help": "(Path to-) presaved timber database"}, + fit={ + "type": str, + "default": "exponential", + "choices": ["exponential", "linear"], + "help": "Fitting function to use (rearranges parameters to make sense).", + }, + emittance_window_length={ + "help": "Length of the moving average window. (# data points)", + "type": int, + "default": ROLLING_AVERAGE_WINDOW, + }, + emittance_outlier_limit={ + "help": "Limit, i.e. cut from mean, on emittance outliers in meter.", + "type": float, + "default": OUTLIER_LIMIT, + }, + emittance_type={ + "type": str, + "default": "average", + "choices": ["fit_sigma", "average"], + "help": "Which BSRT data to use (from database).", + }, + show={ + "action": "store_true", + "help": "Show plots.", + }, + plot_styles={ + "type": str, + "nargs": "+", + "default": ["standard"], + "help": "Which plotting styles to use, either from omc3 styles or default mpl.", + }, + manual_style={ + "type": DictAsString, + "default": {}, + "help": "Additional style rcParameters which update the set of predefined ones.", + }, ) @@ -408,7 +412,7 @@ def main(opt): _write_tfs(out_dir, opt.plane, kick_df, intensity_df, emittance_df, emittance_bws_df) # plotting - figs = dict() + figs = {} register_matplotlib_converters() # for datetime plotting style.set_style(opt.plot_styles, opt.manual_style) figs["emittance"] = _plot_emittances( @@ -1107,7 +1111,7 @@ def _plot_intensity(directory, beam, plane, kick_df, intensity_df): + r" (-{:.1f}$\pm${:.1f} %)".format(*normalized_losses_kick.loc[kick, :]), va="bottom", color=colors.get_mpl_color(1), - fontdict=dict(fontsize=mpl.rcParams["font.size"] * 0.8), + fontdict={"fontsize": mpl.rcParams["font.size"] * 0.8}, ) _plot_kicks_and_scale_x(ax, kick_df.index, pad=x_span) diff --git a/pylhc/machine_settings_info.py b/pylhc/machine_settings_info.py index b0cf1a4..5711021 100644 --- a/pylhc/machine_settings_info.py +++ b/pylhc/machine_settings_info.py @@ -84,44 +84,44 @@ def __new__(cls, value): def _get_params() -> dict: """Parse Commandline Arguments and return them as options.""" return EntryPointParameters( - time=dict( - default=None, - type=AccDatetimeOrStr, - help=( + time={ + "default": None, + "type": AccDatetimeOrStr, + "help": ( "UTC Time as 'Y-m-d H:M:S.f' or ISO format or AccDatetime object." " Acts as point in time or end time (if ``start_time`` is given)." ), - ), - start_time=dict( - default=None, - type=AccDatetimeOrStr, - help=( + }, + start_time={ + "default": None, + "type": AccDatetimeOrStr, + "help": ( "UTC Time as 'Y-m-d H:M:S.f' or ISO format or AccDatetime object." " Defines the beginning of the time-range." ), - ), - knobs=dict( - default=None, - nargs="+", - type=str, - help="List of knobnames. " + }, + knobs={ + "default": None, + "nargs": "+", + "type": str, + "help": "List of knobnames. " "If `None` (or omitted) no knobs will be extracted. " "If it is just the string ``'all'``, " "all knobs will be extracted (can be slow). " "Use the string ``'default'`` for pre-defined knobs of interest.", - ), - accel=dict(default="lhc", type=str, help="Accelerator name."), - beamprocess=dict( - type=str, - help=("Manual override for the Beamprocess (otherwise taken at the given ``time``)"), - ), - output_dir=dict(default=None, type=PathOrStr, help="Output directory."), - knob_definitions=dict(action="store_true", help="Set to extract knob definitions."), - source=dict(type=str, default="nxcals", help="Source to extract data from."), - log=dict( - action="store_true", - help="Write summary into log (automatically done if no output path is given).", - ), + }, + accel={"default": "lhc", "type": str, "help": "Accelerator name."}, + beamprocess={ + "type": str, + "help": ("Manual override for the Beamprocess (otherwise taken at the given ``time``)"), + }, + output_dir={"default": None, "type": PathOrStr, "help": "Output directory."}, + knob_definitions={"action": "store_true", "help": "Set to extract knob definitions."}, + source={"type": str, "default": "nxcals", "help": "Source to extract data from."}, + log={ + "action": "store_true", + "help": "Write summary into log (automatically done if no output path is given).", + }, ) From 373e21a385da818d35ea88dcff8504bc34c70f8f Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:22:52 +0200 Subject: [PATCH 10/25] no use of .keys --- pylhc/bpm_calibration.py | 6 +++--- pylhc/data_extract/lsa.py | 2 +- pylhc/forced_da_analysis.py | 2 +- pylhc/lsa_to_madx.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pylhc/bpm_calibration.py b/pylhc/bpm_calibration.py index 745a848..ee3ec4c 100644 --- a/pylhc/bpm_calibration.py +++ b/pylhc/bpm_calibration.py @@ -110,13 +110,13 @@ def main(opt): factors = get_calibration_factors_from_dispersion(opt.ips, opt.inputdir) # Fill NaN with 1 because of missing BPMs and that fit cannot be done everywhere - for plane in factors.keys(): + for plane in factors: factors[plane] = factors[plane].fillna(1) - LOG.debug("".join([f"\nPlane {plane}:\n{factors[plane]}" for plane in factors.keys()])) + LOG.debug("".join([f"\nPlane {plane}:\n{factors[plane]}" for plane in factors])) # Write the TFS file to the desired output directory opt.outputdir.mkdir(parents=True, exist_ok=True) - for plane in factors.keys(): + for plane in factors: tfs.write( opt.outputdir / f"{CALIBRATION_NAME[opt.method]}{plane.lower()}{EXT}", factors[plane].reset_index(), diff --git a/pylhc/data_extract/lsa.py b/pylhc/data_extract/lsa.py index 02ca559..c071018 100644 --- a/pylhc/data_extract/lsa.py +++ b/pylhc/data_extract/lsa.py @@ -219,7 +219,7 @@ def get_trim_history( ) from e LOG.debug(f"{len(trims)} trims extracted.") - trims_not_found = [k for k in knobs if k not in trims.keys()] + trims_not_found = [k for k in knobs if k not in trims] if len(trims_not_found): LOG.warning( f"The following knobs were not found in '{beamprocess}' " diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index abbd6fd..c46c1f5 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -678,7 +678,7 @@ def _get_bsrt_bunch_emittances_from_timber(beam, planes, db, timespan, key_type, y_new[f"{x_elem:.3f}"] += y_elem.tolist() # get average and std per timestamp - x = np.array([float(elem) for elem in y_new.keys()]) + x = np.array([float(elem) for elem in y_new]) y = np.array([np.average(elem) for elem in y_new.values()]) * nominal_emittance y_std = np.array([np.std(elem) for elem in y_new.values()]) * nominal_emittance elif key_type == "average": diff --git a/pylhc/lsa_to_madx.py b/pylhc/lsa_to_madx.py index 0aab7a5..460f9a6 100644 --- a/pylhc/lsa_to_madx.py +++ b/pylhc/lsa_to_madx.py @@ -39,7 +39,7 @@ One should not be surprised if long ``LSA`` knob names appear slightly differently in the created ``MAD-X`` files, then functionality stays intact. For instance, the knob ``LHCBEAM/MD_ATS_2022_05_04_B1_RigidWaitsShift_IP1pos`` will lead to the following trim variable definition: - + .. code-block:: fortran trim_D_ATS_2022_05_04_B1_RigidWaitsShift_IP1pos = 1.0; @@ -90,7 +90,7 @@ --optics R2017aT_A30C30A10mL300_CTPPS2 \\ --file knobs.txt -Hint: the knobs active at a given time can be retrieved with the `~pylhc.machine_settings_info` script. +Hint: the knobs active at a given time can be retrieved with the `~pylhc.machine_settings_info` script. """ import argparse @@ -211,7 +211,7 @@ def get_madx_script_from_definition_dataframe( # write all inits first (looks nicer in madx) if by_reference: - for variable in deltas.keys(): + for variable in deltas.keys(): # noqa: SIM118 (this is not a dict) variable_init = f"{variable}_init" change_commands.append(f"{variable_init:<17} = {variable};") From 936e343b0508baee14e39f0e9e42466b9c3d4bd1 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:24:18 +0200 Subject: [PATCH 11/25] unecessary comprehension --- pylhc/forced_da_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index c46c1f5..9bc4416 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -815,7 +815,7 @@ def _get_output_dir(kick_directory, output_directory): def _add_intensity_and_losses_to_kicks(kick_df, intensity_df, time_before, time_after): LOG.debug("Calculating intensity and losses for the kicks.") col_list = [INTENSITY_BEFORE, INTENSITY_AFTER, INTENSITY_LOSSES] - new_columns = [col for col in col_list + [err_col(c) for c in col_list]] + new_columns = list(col_list + [err_col(c) for c in col_list]) kick_df = kick_df.reindex(columns=kick_df.columns.tolist() + new_columns) kick_df = _get_intensities_around_kicks(kick_df, intensity_df, time_before, time_after) return _calculate_intensity_losses_at_kicks(kick_df) From db14f53576f60a4146da22c08af2be4cfddac4c7 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:25:34 +0200 Subject: [PATCH 12/25] order of imports --- pylhc/forced_da_analysis.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 9bc4416..301297c 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -124,10 +124,6 @@ from tfs import TfsDataFrame from tfs.tools import significant_digits -pytimber = cern_network_import("pytimber") -PageStore = cern_network_import("pytimber.pagestore.PageStore") - - from pylhc.constants.forced_da_analysis import ( BSRT_EMITTANCE_TO_METER, BWS_DIRECTIONS, @@ -181,6 +177,9 @@ get_proton_gamma, ) +pytimber = cern_network_import("pytimber") +PageStore = cern_network_import("pytimber.pagestore.PageStore") + LOG = logging_tools.get_logger(__name__) From 66f485e6bbaf0408484cee17faf92b71f87b3be9 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:26:27 +0200 Subject: [PATCH 13/25] noqa --- pylhc/data_extract/lsa.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylhc/data_extract/lsa.py b/pylhc/data_extract/lsa.py index c071018..7c71a93 100644 --- a/pylhc/data_extract/lsa.py +++ b/pylhc/data_extract/lsa.py @@ -390,7 +390,7 @@ def _beamprocess_to_dict(bp): return bp_dict -def try_to_acquire_data(function: Callable, *args, **kwargs): +def try_to_acquire_data(function: Callable, *args, **kwargs): # noqa: RET503 """Tries to get data from function multiple times. TODO: Move to omc3 as is also used there in BBQ extraction. From d685aaada5b867ae943e2568c9eda97bb11b93b1 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:28:14 +0200 Subject: [PATCH 14/25] proper casing --- pylhc/bsrt_logger.py | 16 ++++++++-------- pylhc/data_extract/lsa.py | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pylhc/bsrt_logger.py b/pylhc/bsrt_logger.py index f2eb102..49374ec 100644 --- a/pylhc/bsrt_logger.py +++ b/pylhc/bsrt_logger.py @@ -65,9 +65,9 @@ def convert_to_data_output_format(dtobject): CycleName = "LHC.USER.ALL" INCAacc = "LHC" - noSetFlag = True + no_set_flag = True - japc = pyjapc.PyJapc(selector=CycleName, incaAcceleratorName=INCAacc, noSet=noSetFlag) + japc = pyjapc.PyJapc(selector=CycleName, incaAcceleratorName=INCAacc, noSet=no_set_flag) japc.rbacLogin() acquesitions_per_file = 100 j = 0 @@ -78,8 +78,8 @@ def convert_to_data_output_format(dtobject): B1_image = japc.getParam("LHC.BSRTS.5R4.B1/Image") B2_image = japc.getParam("LHC.BSRTS.5L4.B2/Image") if t == 0: - allB1data = [] - allB2data = [] + all_b1_data = [] + all_b2_data = [] B1_IMGtime = B1_image["acqTime"] B2_IMGtime = B2_image["acqTime"] B1_IMGtime_dt = parse_timestamp(B1_IMGtime) @@ -87,8 +87,8 @@ def convert_to_data_output_format(dtobject): B1_IMGtime_st = convert_to_data_output_format(B1_IMGtime_dt) B2_IMGtime_st = convert_to_data_output_format(B2_IMGtime_dt) - allB1data.append(B1_image) - allB2data.append(B2_image) + all_b1_data.append(B1_image) + all_b2_data.append(B2_image) t += 1 if t == acquesitions_per_file: j += 1 @@ -96,8 +96,8 @@ def convert_to_data_output_format(dtobject): f2name = "data_BSRT_B2_" + B2_IMGtime_st + ".dat" f1 = open(f1name, "wb") f2 = open(f2name, "wb") - pickle.dump(allB1data, f1) - pickle.dump(allB2data, f2) + pickle.dump(all_b1_data, f1) + pickle.dump(all_b2_data, f2) f1.close() f2.close() os.system("gzip " + f1name) diff --git a/pylhc/data_extract/lsa.py b/pylhc/data_extract/lsa.py index 7c71a93..31b7c17 100644 --- a/pylhc/data_extract/lsa.py +++ b/pylhc/data_extract/lsa.py @@ -20,9 +20,9 @@ pjlsa = cern_network_import("pjlsa") try: - pjLSAClient = pjlsa.LSAClient + pjLSAClient = pjlsa.LSAClient # noqa: N816 (it's the real name) except ImportError: - pjLSAClient = object + pjLSAClient = object # noqa: N816 (it's the real name) RELEVANT_BP_CONTEXTS = ("OPERATIONAL", "MD") RELEVANT_BP_CATEGORIES = ("DISCRETE",) From b76baab759c57073216d31e90df75742c7ec5539 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:29:51 +0200 Subject: [PATCH 15/25] use contextlib.suppress instead of try-except-pass --- pylhc/forced_da_analysis.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 301297c..69c8457 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -88,6 +88,7 @@ .. _CarlierForcedDA2019: https://journals.aps.org/prab/pdf/10.1103/PhysRevAccelBeams.22.031002 """ +import contextlib import os from collections import defaultdict from contextlib import suppress @@ -1432,10 +1433,8 @@ def _date2num(times): except AttributeError: pass # probably datetime already except TypeError: - try: # not iterable + with contextlib.suppress(AttributeError): times = times.datetime - except AttributeError: - pass # probably datetime already return mdates.date2num(times) From 5a2f0bbc0473a5778a997c9676c28ec50456d05e Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:32:07 +0200 Subject: [PATCH 16/25] open files with a context manager --- pylhc/bsrt_analysis.py | 3 ++- pylhc/bsrt_logger.py | 9 +++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/pylhc/bsrt_analysis.py b/pylhc/bsrt_analysis.py index 3edec64..445e3a2 100644 --- a/pylhc/bsrt_analysis.py +++ b/pylhc/bsrt_analysis.py @@ -204,7 +204,8 @@ def _check_and_fix_entries(entry): def _load_pickled_data(opt, files_df): merged_df = pd.DataFrame() for bsrtfile in files_df["FILES"]: - data = pickle.load(gzip.open(bsrtfile, "rb")) + with gzip.open(bsrtfile, "rb") as f: + data = pickle.load(f) new_df = pd.DataFrame.from_records([_check_and_fix_entries(entry) for entry in data]) merged_df = pd.concat([merged_df, new_df], axis="index", ignore_index=True) diff --git a/pylhc/bsrt_logger.py b/pylhc/bsrt_logger.py index 49374ec..db9dbd1 100644 --- a/pylhc/bsrt_logger.py +++ b/pylhc/bsrt_logger.py @@ -94,12 +94,9 @@ def convert_to_data_output_format(dtobject): j += 1 f1name = "data_BSRT_B1_" + B1_IMGtime_st + ".dat" f2name = "data_BSRT_B2_" + B2_IMGtime_st + ".dat" - f1 = open(f1name, "wb") - f2 = open(f2name, "wb") - pickle.dump(all_b1_data, f1) - pickle.dump(all_b2_data, f2) - f1.close() - f2.close() + with open(f1name, "wb") as f1, open(f2name, "wb") as f2: + pickle.dump(all_b1_data, f1) + pickle.dump(all_b2_data, f2) os.system("gzip " + f1name) os.system("gzip " + f2name) t = 0 From 70f0e4e55138601e42a56e98a29dd0dcb22bfc90 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:37:19 +0200 Subject: [PATCH 17/25] use pathlib operations where possible --- pylhc/bsrt_analysis.py | 2 +- pylhc/bsrt_logger.py | 3 ++- pylhc/calibration/dispersion.py | 16 ++-------------- pylhc/forced_da_analysis.py | 3 +-- 4 files changed, 6 insertions(+), 18 deletions(-) diff --git a/pylhc/bsrt_analysis.py b/pylhc/bsrt_analysis.py index 445e3a2..8875e6c 100644 --- a/pylhc/bsrt_analysis.py +++ b/pylhc/bsrt_analysis.py @@ -165,7 +165,7 @@ def _select_files(opt, files_df): def _load_files_in_df(opt): files_df = pd.DataFrame( - data={"FILES": glob.glob(str(Path(opt.directory) / _get_bsrt_logger_fname(opt.beam, "*")))} + data={"FILES": glob.glob(str(Path(opt.directory) / _get_bsrt_logger_fname(opt.beam, "*")))} # noqa: PTH207 ) files_df = files_df.assign( diff --git a/pylhc/bsrt_logger.py b/pylhc/bsrt_logger.py index db9dbd1..02eb04b 100644 --- a/pylhc/bsrt_logger.py +++ b/pylhc/bsrt_logger.py @@ -15,6 +15,7 @@ import pickle import sys import time +from pathlib import Path from omc3.definitions import formats from omc3.utils.mock import cern_network_import @@ -94,7 +95,7 @@ def convert_to_data_output_format(dtobject): j += 1 f1name = "data_BSRT_B1_" + B1_IMGtime_st + ".dat" f2name = "data_BSRT_B2_" + B2_IMGtime_st + ".dat" - with open(f1name, "wb") as f1, open(f2name, "wb") as f2: + with Path(f1name).open("wb") as f1, Path(f2name).open("wb") as f2: pickle.dump(all_b1_data, f1) pickle.dump(all_b2_data, f2) os.system("gzip " + f1name) diff --git a/pylhc/calibration/dispersion.py b/pylhc/calibration/dispersion.py index 0cc52b1..28350aa 100644 --- a/pylhc/calibration/dispersion.py +++ b/pylhc/calibration/dispersion.py @@ -13,23 +13,11 @@ import numpy as np import pandas as pd import tfs -from omc3.optics_measurements.constants import ( - DISPERSION_NAME, - ERR, - EXT, - NORM_DISP_NAME, - S, -) +from omc3.optics_measurements.constants import DISPERSION_NAME, ERR, EXT, NORM_DISP_NAME, S from omc3.utils import logging_tools from scipy.optimize import curve_fit -from pylhc.constants.calibration import ( - BPMS, - D_BPMS, - LABELS, - TFS_INDEX, - D, -) +from pylhc.constants.calibration import BPMS, D_BPMS, LABELS, TFS_INDEX, D LOG = logging_tools.get_logger(__name__) diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 69c8457..41dc96d 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -89,7 +89,6 @@ """ import contextlib -import os from collections import defaultdict from contextlib import suppress from pathlib import Path @@ -1441,7 +1440,7 @@ def _date2num(times): def _save_fig(directory, plane, fig, ptype): try: for ftype in PLOT_FILETYPES: - path = os.path.join(directory, outfile_plot(ptype, plane, ftype)) + path = Path(directory) / outfile_plot(ptype, plane, ftype) LOG.debug(f"Saving Figure to {path}") fig.savefig(path) except OSError: From a49a0a148f40c1ec4f2511912599094ccc8be607 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:38:15 +0200 Subject: [PATCH 18/25] prefer ternary operator to if-else checks --- pylhc/forced_da_analysis.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 41dc96d..2981b79 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -793,10 +793,7 @@ def _get_new_kick_file(kick_dir, planes): def _get_output_dir(kick_directory, output_directory): kick_path = Path(kick_directory) - if output_directory: - output_path = Path(output_directory) - else: - output_path = kick_path / RESULTS_DIR + output_path = Path(output_directory) if output_directory else kick_path / RESULTS_DIR try: output_path.mkdir(exist_ok=True) except PermissionError: From f4f6c69a73193fdb7b76c078c309c4c91659f639 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:38:34 +0200 Subject: [PATCH 19/25] use | --- pylhc/bsrt_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylhc/bsrt_analysis.py b/pylhc/bsrt_analysis.py index 8875e6c..9fc88ba 100644 --- a/pylhc/bsrt_analysis.py +++ b/pylhc/bsrt_analysis.py @@ -194,7 +194,7 @@ def _get_timestamp_from_name(name, formatstring): def _check_and_fix_entries(entry): # pd.to_csv does not handle np.array as entries nicely, converting to list circumvents this for key, val in entry.items(): - if isinstance(val, (np.ndarray, tuple)): + if isinstance(val, (np.ndarray | tuple)): entry[key] = list(val) if np.array(val).size == 0: entry[key] = np.nan From 332cf98fc0fd5ccc7dd8a22c551e45c3232f7011 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:39:38 +0200 Subject: [PATCH 20/25] use def and not lambda --- pylhc/forced_da_analysis.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index 2981b79..c1dec1c 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -1403,7 +1403,8 @@ def _maybe_add_sum_for_planes(df, planes, col_fun, col_err_fun=None): """In case planes == 'XY' add the two plane columns and their errors.""" if len(planes) > 1: if col_err_fun is not None: - cols = lambda p: [col_fun(p), col_err_fun(p)] + def cols(p): + return [col_fun(p), col_err_fun(p)] x_cols, y_cols = [cols(p) for p in planes] df = df.reindex(columns=df.columns.to_list() + cols(planes)) df[cols(planes)] = np.array( From 432b27ddf7ac48081ec15e6147a0ae1f2c37cd94 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:39:44 +0200 Subject: [PATCH 21/25] formatting --- pylhc/forced_da_analysis.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pylhc/forced_da_analysis.py b/pylhc/forced_da_analysis.py index c1dec1c..1c3fd90 100644 --- a/pylhc/forced_da_analysis.py +++ b/pylhc/forced_da_analysis.py @@ -1403,8 +1403,10 @@ def _maybe_add_sum_for_planes(df, planes, col_fun, col_err_fun=None): """In case planes == 'XY' add the two plane columns and their errors.""" if len(planes) > 1: if col_err_fun is not None: + def cols(p): return [col_fun(p), col_err_fun(p)] + x_cols, y_cols = [cols(p) for p in planes] df = df.reindex(columns=df.columns.to_list() + cols(planes)) df[cols(planes)] = np.array( From b98f952918c669e703a117bb5c72654713a63ccf Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:42:18 +0200 Subject: [PATCH 22/25] fix deprecated integer access of Series --- pylhc/calibration/beta.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylhc/calibration/beta.py b/pylhc/calibration/beta.py index 2ef0c14..58653c3 100644 --- a/pylhc/calibration/beta.py +++ b/pylhc/calibration/beta.py @@ -61,7 +61,7 @@ def err_function(x, popt, pcov): beta_phase_err = beta_phase_tfs.reindex(bpms)[f"{ERR}{BETA}{plane}"] # Get the rough IP position and beta star for the initial values - ip_position = (positions[-1] - positions[0]) / 2 + ip_position = (positions.iloc[-1] - positions.iloc[0]) / 2 initial_values = (BETA_STAR_ESTIMATION, ip_position) # Get the curve fit for the expected parabola From b64db79581325539dd25ac0793455fc0b034d087 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 10 Jul 2025 17:45:48 +0200 Subject: [PATCH 23/25] do not fill before checking object dtype is correct due to failing future behavior --- pylhc/bpm_calibration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylhc/bpm_calibration.py b/pylhc/bpm_calibration.py index ee3ec4c..611ed91 100644 --- a/pylhc/bpm_calibration.py +++ b/pylhc/bpm_calibration.py @@ -111,7 +111,7 @@ def main(opt): # Fill NaN with 1 because of missing BPMs and that fit cannot be done everywhere for plane in factors: - factors[plane] = factors[plane].fillna(1) + factors[plane] = factors[plane].infer_objects().fillna(1) LOG.debug("".join([f"\nPlane {plane}:\n{factors[plane]}" for plane in factors])) # Write the TFS file to the desired output directory From 015aa145182c07865652c8b3a2d5e4d28fcfa279 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 24 Jul 2025 10:07:45 +0200 Subject: [PATCH 24/25] add a RuntimeError and remove noqa: RET003 --- pylhc/data_extract/lsa.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pylhc/data_extract/lsa.py b/pylhc/data_extract/lsa.py index 31b7c17..27177b2 100644 --- a/pylhc/data_extract/lsa.py +++ b/pylhc/data_extract/lsa.py @@ -390,7 +390,7 @@ def _beamprocess_to_dict(bp): return bp_dict -def try_to_acquire_data(function: Callable, *args, **kwargs): # noqa: RET503 +def try_to_acquire_data(function: Callable, *args, **kwargs): """Tries to get data from function multiple times. TODO: Move to omc3 as is also used there in BBQ extraction. @@ -413,3 +413,4 @@ def try_to_acquire_data(function: Callable, *args, **kwargs): # noqa: RET503 LOG.warning(f"Could not acquire data! Trial no {tries + 1} / {retries}") continue # will go to the next iteratoin of the loop, so retry raise OSError("Could not acquire data!") from e + raise RuntimeError(f"Could not acquire data after {retries:d} retries.") From 16be777f9fb248781b68c208b0027856c3caa6a3 Mon Sep 17 00:00:00 2001 From: Felix Soubelet Date: Thu, 24 Jul 2025 10:08:30 +0200 Subject: [PATCH 25/25] and fix scuffed return type hint --- pylhc/machine_settings_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pylhc/machine_settings_info.py b/pylhc/machine_settings_info.py index 5711021..891a043 100644 --- a/pylhc/machine_settings_info.py +++ b/pylhc/machine_settings_info.py @@ -498,7 +498,7 @@ def _get_optics(acc_time: AccDatetime, beamprocess: str, bp_start: AccDatetime) def _get_last_optics( optics_table, bp: str, bp_start: AccDatetime, acc_time: AccDatetime -) -> (str, AccDatetime): +) -> tuple[str, AccDatetime]: """Get the name of the optics at the right time for current beam process.""" ts = acc_time.timestamp() - bp_start.timestamp() item = None