From 95d13c56833b7fb42be054ef1c7e37a75e322fc7 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Mon, 17 Mar 2025 22:19:28 +0100 Subject: [PATCH 01/27] Merged main with the location changes --- shell.nix | 8 -------- 1 file changed, 8 deletions(-) delete mode 100644 shell.nix diff --git a/shell.nix b/shell.nix deleted file mode 100644 index d54e32421..000000000 --- a/shell.nix +++ /dev/null @@ -1,8 +0,0 @@ -{ pkgs ? import {} }: - -pkgs.mkShell { - packages = with pkgs; [ - (python39.withPackages (ps: [ ps.pip ])) - pre-commit - ]; -} From c872394fea9ff1b9aec03fa386954cb757aff557 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Wed, 19 Mar 2025 18:35:18 +0100 Subject: [PATCH 02/27] no ubx yet --- python/PiFinder/main.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/python/PiFinder/main.py b/python/PiFinder/main.py index 079aa480a..93bfd3735 100644 --- a/python/PiFinder/main.py +++ b/python/PiFinder/main.py @@ -889,10 +889,7 @@ def rotate_logs() -> Path: imu = importlib.import_module("PiFinder.imu_pi") cfg = config.Config() gps_type = cfg.get_option("gps_type") - if gps_type == "ublox": - gps_monitor = importlib.import_module("PiFinder.gps_ubx") - else: - gps_monitor = importlib.import_module("PiFinder.gps_gpsd") + gps_monitor = importlib.import_module("PiFinder.gps_gpsd") if args.display is not None: display_hardware = args.display.lower() From 0ec4de8b165aa8d601baa9b66ce7ae0a83c0ad2d Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Tue, 25 Mar 2025 08:13:25 +0100 Subject: [PATCH 03/27] Revert "no ubx yet" This reverts commit c872394fea9ff1b9aec03fa386954cb757aff557. --- python/PiFinder/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/python/PiFinder/main.py b/python/PiFinder/main.py index bd7dc548f..2911a6bdf 100644 --- a/python/PiFinder/main.py +++ b/python/PiFinder/main.py @@ -893,7 +893,10 @@ def rotate_logs() -> Path: imu = importlib.import_module("PiFinder.imu_pi") cfg = config.Config() gps_type = cfg.get_option("gps_type") - gps_monitor = importlib.import_module("PiFinder.gps_gpsd") + if gps_type == "ublox": + gps_monitor = importlib.import_module("PiFinder.gps_ubx") + else: + gps_monitor = importlib.import_module("PiFinder.gps_gpsd") if args.display is not None: display_hardware = args.display.lower() From b8d65d45c31f9c5c3da2f29916c22de18498b188 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Fri, 21 Nov 2025 17:01:03 +0100 Subject: [PATCH 04/27] First version of the working deep chart feature --- default_config.json | 4 + python/PiFinder/camera_debug.py | 14 +- python/PiFinder/cat_images.py | 151 +++-- python/PiFinder/catalogs.py | 15 +- python/PiFinder/deep_chart.py | 696 ++++++++++++++++++++ python/PiFinder/image_utils.py | 188 ++++++ python/PiFinder/keyboard_local.py | 82 ++- python/PiFinder/keyboard_none.py | 2 +- python/PiFinder/main.py | 59 +- python/PiFinder/solver.py | 28 +- python/PiFinder/sqm/noise_floor.py | 2 +- python/PiFinder/sqm/sqm.py | 4 +- python/PiFinder/star_catalog.py | 808 ++++++++++++++++++++++++ python/PiFinder/ui/lm_entry.py | 222 +++++++ python/PiFinder/ui/log.py | 10 +- python/PiFinder/ui/menu_structure.py | 83 +++ python/PiFinder/ui/object_details.py | 363 ++++++++++- python/tests/test_limiting_magnitude.py | 127 ++++ 18 files changed, 2761 insertions(+), 97 deletions(-) create mode 100644 python/PiFinder/deep_chart.py create mode 100644 python/PiFinder/image_utils.py create mode 100644 python/PiFinder/star_catalog.py create mode 100644 python/PiFinder/ui/lm_entry.py create mode 100644 python/tests/test_limiting_magnitude.py diff --git a/default_config.json b/default_config.json index aa10ca9f8..0b14bd1a4 100644 --- a/default_config.json +++ b/default_config.json @@ -15,6 +15,10 @@ "chart_dso": 128, "chart_reticle": 128, "chart_constellations": 64, + "obj_chart_crosshair": "pulse", + "obj_chart_crosshair_style": "simple", + "obj_chart_lm_mode": "auto", + "obj_chart_lm_fixed": 14.0, "solve_pixel": [256, 256], "gps_type": "ublox", "gps_baud_rate": 9600, diff --git a/python/PiFinder/camera_debug.py b/python/PiFinder/camera_debug.py index 5ae1c6ed1..3870570a4 100644 --- a/python/PiFinder/camera_debug.py +++ b/python/PiFinder/camera_debug.py @@ -63,13 +63,15 @@ def stop_camera(self) -> None: def capture(self) -> Image.Image: sleep_time = self.exposure_time / 1000000 time.sleep(sleep_time) + # FOR TESTING: Keep using the same image (first image - solves, brighter sky) + # Comment out image cycling to maintain consistent roll/orientation # Change images every 10 seconds - elapsed = time.time() - self.last_image_time - if elapsed > 10: - self.current_image_num, self.last_image = next(self.image_cycle) - logger.debug( - f"Debug camera switched to test image #{self.current_image_num}" - ) + # elapsed = time.time() - self.last_image_time + # if elapsed > 10: + # self.current_image_num, self.last_image = next(self.image_cycle) + # logger.debug( + # f"Debug camera switched to test image #{self.current_image_num}" + # ) return self.last_image def capture_bias(self): diff --git a/python/PiFinder/cat_images.py b/python/PiFinder/cat_images.py index ae4a9b967..958d96958 100644 --- a/python/PiFinder/cat_images.py +++ b/python/PiFinder/cat_images.py @@ -27,6 +27,10 @@ def get_display_image( display_class, burn_in=True, magnification=None, + config_object=None, + shared_state=None, + chart_generator=None, # Pass in from UI layer instead of creating here + force_deep_chart=False, # Toggle: force deep chart even if POSS image exists ): """ Returns a 128x128 image buffer for @@ -36,20 +40,89 @@ def get_display_image( fov: 1-.125 roll: degrees + config_object: + Required for deep chart generation + shared_state: + Required for deep chart generation """ object_image_path = resolve_image_name(catalog_object, source="POSS") logger.debug("object_image_path = %s", object_image_path) - if not os.path.exists(object_image_path): - return_image = Image.new("RGB", display_class.resolution) - ri_draw = ImageDraw.Draw(return_image) - if burn_in: - ri_draw.text( - (30, 50), - _("No Image"), - font=display_class.fonts.large.font, - fill=display_class.colors.get(128), - ) + + # If force_deep_chart is True, skip POSS image even if it exists + if force_deep_chart or not os.path.exists(object_image_path): + # Try to generate deep chart if catalog available + return_image = None + + if config_object and shared_state: + from pathlib import Path + from PiFinder import utils + + deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") + + logger.info(f"Deep chart request: chart_generator={chart_generator is not None}, catalog_exists={deep_catalog_path.exists()}, path={deep_catalog_path}") + + # Try to generate deep chart if chart_generator was passed in + if chart_generator is not None and deep_catalog_path.exists(): + try: + from PiFinder.image_utils import create_loading_image + + # Ensure catalog loading started + chart_generator.ensure_catalog_loading() + + # Try to generate chart + chart_image = chart_generator.generate_chart( + catalog_object, + (display_class.fov_res, display_class.fov_res), + burn_in=burn_in, + display_class=display_class, + roll=roll + ) + + if chart_image is None: + # Catalog not ready yet, show "Loading..." with progress + if chart_generator.catalog: + progress_text = chart_generator.catalog.load_progress + progress_percent = chart_generator.catalog.load_percent + else: + progress_text = "Initializing..." + progress_percent = 0 + + return_image = create_loading_image( + display_class, + message="Loading Chart...", + progress_text=progress_text, + progress_percent=progress_percent + ) + # Mark image as "loading" so UI knows to refresh + return_image.is_loading_placeholder = True + else: + # Chart ready, convert to red + return_image = ImageChops.multiply( + chart_image.convert("RGB"), + display_class.colors.red_image + ) + return_image.is_loading_placeholder = False + except Exception as e: + logger.error(f"Chart generation failed: {e}", exc_info=True) + return_image = None + else: + if chart_generator is None: + logger.warning("Deep chart requested but chart_generator is None") + if not deep_catalog_path.exists(): + logger.warning(f"Deep star catalog not found at {deep_catalog_path}") + + # Fallback: "No Image" placeholder + if return_image is None: + return_image = Image.new("RGB", display_class.resolution) + ri_draw = ImageDraw.Draw(return_image) + if burn_in: + ri_draw.text( + (30, 50), + _("No Image"), + font=display_class.fonts.large.font, + fill=display_class.colors.get(128), + ) else: return_image = Image.open(object_image_path) @@ -123,45 +196,29 @@ def get_display_image( ri_draw = ImageDraw.Draw(return_image) if burn_in: - # Top text - FOV on left, magnification on right - ui_utils.shadow_outline_text( - ri_draw, - (1, display_class.titlebar_height - 1), - f"{fov:0.2f}°", - font=display_class.fonts.base, - align="left", - fill=display_class.colors.get(254), - shadow_color=display_class.colors.get(0), - outline=2, - ) + # Use shared overlay utility for consistency with generated charts + # Create fake eyepiece object from text if needed + from PiFinder.image_utils import add_image_overlays - magnification_text = ( - f"{magnification:.0f}x" if magnification and magnification > 0 else "?x" - ) - ui_utils.shadow_outline_text( - ri_draw, - ( - display_class.resX - (display_class.fonts.base.width * 4), - display_class.titlebar_height - 1, - ), - magnification_text, - font=display_class.fonts.base, - align="right", - fill=display_class.colors.get(254), - shadow_color=display_class.colors.get(0), - outline=2, - ) + # Parse eyepiece text to get eyepiece object + # If we have config_object, use actual eyepiece + if config_object and hasattr(config_object, 'equipment'): + eyepiece_obj = config_object.equipment.active_eyepiece + else: + # Create minimal eyepiece object from text for overlay + class FakeEyepiece: + def __init__(self, text): + self.focal_length_mm = 0 + self.name = text + eyepiece_obj = FakeEyepiece(eyepiece_text) - # Bottom text - only eyepiece information - ui_utils.shadow_outline_text( - ri_draw, - (1, display_class.resY - (display_class.fonts.base.height * 1.1)), - eyepiece_text, - font=display_class.fonts.base, - align="left", - fill=display_class.colors.get(128), - shadow_color=display_class.colors.get(0), - outline=2, + return_image = add_image_overlays( + return_image, + display_class, + fov, + magnification, + eyepiece_obj, + burn_in=True ) return return_image diff --git a/python/PiFinder/catalogs.py b/python/PiFinder/catalogs.py index e8a527e35..4839e21b8 100644 --- a/python/PiFinder/catalogs.py +++ b/python/PiFinder/catalogs.py @@ -912,8 +912,18 @@ def _build_composite( # Start background loader for deferred objects if deferred_objects: + # Sort deferred objects: load WDS last (it has 131K objects) + # This ensures smaller catalogs (C, Col, etc.) are available sooner + def sort_key(obj): + if obj["catalog_code"] == "WDS": + return 1 # Load last + else: + return 0 # Load first + + deferred_objects_sorted = sorted(deferred_objects, key=sort_key) + loader = CatalogBackgroundLoader( - deferred_catalog_objects=deferred_objects, + deferred_catalog_objects=deferred_objects_sorted, objects=objects, common_names=common_names, obs_db=obs_db, @@ -929,8 +939,7 @@ def _build_composite( def _on_loader_progress(self, loaded: int, total: int, catalog: str) -> None: """Progress callback - log every 10K objects""" - if loaded % 10000 == 0 or loaded == total: - logger.info(f"Background loading: {loaded}/{total} ({catalog})") + pass # Muted to reduce log noise def _on_loader_complete( self, loaded_objects: List[CompositeObject], ui_queue diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py new file mode 100644 index 000000000..2735e32d6 --- /dev/null +++ b/python/PiFinder/deep_chart.py @@ -0,0 +1,696 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +Deep star chart generator for objects without DSS/POSS images + +Generates on-demand star charts using HEALPix-indexed deep star catalog. +Features: +- Equipment-aware FOV and magnitude limits +- Stereographic projection (matching chart.py) +- Center marker for target object +- Info overlays (FOV, magnification, eyepiece) +- Caching for performance +""" + +import logging +from pathlib import Path +from typing import Optional, Tuple + +import numpy as np +from PIL import Image, ImageDraw, ImageFont + +from PiFinder import utils +from PiFinder.star_catalog import CatalogState, DeepStarCatalog + +logger = logging.getLogger("PiFinder.DeepChart") + +# Global singleton instance to ensure same catalog across all uses +_chart_generator_instance = None + + +def get_chart_generator(config, shared_state): + """Get or create the global chart generator singleton""" + global _chart_generator_instance + if _chart_generator_instance is None: + _chart_generator_instance = DeepChartGenerator(config, shared_state) + return _chart_generator_instance + + +class DeepChartGenerator: + """ + Generate on-demand star charts with equipment-aware settings + + Usage: + gen = DeepChartGenerator(config, shared_state) + image = gen.generate_chart(catalog_object, (128, 128), burn_in=True) + """ + + def __init__(self, config, shared_state): + """ + Initialize chart generator + + Args: + config: PiFinder config object + shared_state: Shared state object + """ + logger.info(">>> DeepChartGenerator.__init__() called") + self.config = config + self.shared_state = shared_state + self.catalog = None + self.chart_cache = {} + + # Initialize font for text overlays + font_path = Path(Path.cwd(), "../fonts/RobotoMonoNerdFontMono-Bold.ttf") + try: + self.small_font = ImageFont.truetype(str(font_path), 8) + except Exception as e: + logger.warning(f"Failed to load font {font_path}: {e}, using default") + self.small_font = ImageFont.load_default() + + def get_catalog_state(self) -> CatalogState: + """Get current catalog loading state""" + if self.catalog is None: + return CatalogState.NOT_LOADED + return self.catalog.state + + def ensure_catalog_loading(self): + """ + Ensure catalog is loading or loaded + Triggers background load if needed + """ + if self.catalog is None: + self.initialize_catalog() + + if self.catalog.state == CatalogState.NOT_LOADED: + # Trigger background load + location = self.shared_state.location() + sqm = self.shared_state.sqm() + + observer_lat = location.lat if location and location.lock else None + limiting_mag = self.get_limiting_magnitude(sqm) + + logger.info( + f"Starting catalog load: lat={observer_lat}, mag_limit={limiting_mag:.1f}" + ) + self.catalog.start_background_load(observer_lat, limiting_mag) + + def initialize_catalog(self): + """Create catalog instance (doesn't load data yet)""" + catalog_path = Path(utils.astro_data_dir, "deep_stars") + + # Check if catalog exists before initializing + metadata_file = catalog_path / "metadata.json" + if not metadata_file.exists(): + logger.warning(f"Deep star catalog not found at {catalog_path}") + logger.warning("To build catalog, run: python -m PiFinder.catalog_tools.gaia_downloader --mag-limit 12 --output /tmp/gaia.csv") + logger.warning("Then: python -m PiFinder.catalog_tools.healpix_builder --input /tmp/gaia.csv --output {}/astro_data/deep_stars".format(Path.home() / "PiFinder")) + + self.catalog = DeepStarCatalog(str(catalog_path)) + logger.info(f"Catalog initialized: {catalog_path}") + + def generate_chart( + self, catalog_object, resolution: Tuple[int, int], burn_in: bool = True, display_class=None, roll=None + ) -> Optional[Image.Image]: + """ + Generate chart for object at current equipment settings + + Args: + catalog_object: CompositeObject with RA/Dec + resolution: (width, height) tuple + burn_in: Add FOV/mag/eyepiece overlays + + Returns: + PIL Image in RGB (red colorspace), or None if catalog not ready + """ + # Ensure catalog is loading + self.ensure_catalog_loading() + + # Check state + if self.catalog.state != CatalogState.READY: + logger.info(f"Chart generation skipped: catalog state = {self.catalog.state}") + return None + + # Check cache + cache_key = self.get_cache_key(catalog_object) + if cache_key in self.chart_cache: + # Return cached base image (without crosshair) + # Crosshair will be added by add_pulsating_crosshair() each frame + return self.chart_cache[cache_key] + + # Get equipment settings + equipment = self.config.equipment + fov = equipment.calc_tfov() + if fov <= 0: + fov = 10.0 # Default fallback + + mag = equipment.calc_magnification() + if mag <= 0: + mag = 50.0 # Default fallback + + sqm = self.shared_state.sqm() + mag_limit_calculated = self.get_limiting_magnitude(sqm) + # For display, keep the calculated value (may be >17) + # For query, cap at catalog max + mag_limit_query = min(mag_limit_calculated, 17.0) + + # Query stars + import time + t0 = time.time() + stars = self.catalog.get_stars_for_fov( + ra_deg=catalog_object.ra, + dec_deg=catalog_object.dec, + fov_deg=fov, + mag_limit=mag_limit_query, + ) + t1 = time.time() + + logger.info( + f"Chart for {catalog_object.catalog_code}{catalog_object.sequence}: " + f"Center RA={catalog_object.ra:.4f}° Dec={catalog_object.dec:.4f}°, " + f"FOV={fov:.4f}°, Roll={roll if roll is not None else 0:.1f}°, " + f"{len(stars)} stars (query: {(t1-t0)*1000:.1f}ms)" + ) + + # Calculate rotation angle for roll / Newtonian orientation + # TODO: Should use telescope type from config (Newtonian vs Refractor) + # For now, hardcoded 180° to match existing cat_images.py behavior + # Add 90° clockwise rotation to match POSS image orientation + image_rotate = 180 + 90 # Newtonian inverts image + 90° CW alignment + if roll is not None: + image_rotate += roll + + # Render chart with rotation applied to star coordinates + t2 = time.time() + image = self.render_chart( + stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query + ) + t3 = time.time() + logger.info(f"Chart rendering: {(t3-t2)*1000:.1f}ms") + + # Add FOV circle BEFORE text overlays so it appears behind them + if burn_in and display_class is not None: + draw = ImageDraw.Draw(image) + width, height = display_class.resolution + cx, cy = width / 2.0, height / 2.0 + radius = min(width, height) / 2.0 - 2 # Leave 2 pixel margin + marker_color = display_class.colors.get(64) # Subtle but visible + bbox = [cx - radius, cy - radius, cx + radius, cy + radius] + draw.ellipse(bbox, outline=marker_color, width=1) + + # Add overlays (using shared utility) + if burn_in and display_class is not None: + from PiFinder.image_utils import add_image_overlays + + logger.info(f"Adding overlays: burn_in={burn_in}, LM={mag_limit_calculated:.1f}") + image = add_image_overlays( + image, + display_class, + fov, + mag, + equipment.active_eyepiece, + burn_in=True, + limiting_magnitude=mag_limit_calculated, # Pass uncapped value for display + ) + + # Cache result (limit cache size to 10 charts) + self.chart_cache[cache_key] = image + if len(self.chart_cache) > 10: + # Remove oldest + oldest = next(iter(self.chart_cache)) + del self.chart_cache[oldest] + + return image + + def render_chart( + self, + stars, + center_ra: float, + center_dec: float, + fov: float, + resolution: Tuple[int, int], + magnification: float = 50.0, + rotation: float = 0.0, + mag_limit: float = 17.0, + ) -> Image.Image: + """ + Render stars to PIL Image with center crosshair + Uses fast vectorized stereographic projection + + Args: + stars: List of (ra, dec, mag) tuples + center_ra: Center RA in degrees + center_dec: Center Dec in degrees + fov: Field of view in degrees + resolution: (width, height) tuple + magnification: Magnification factor + rotation: Rotation angle in degrees (applied to coordinates) + + Returns: + PIL Image in RGB (black background, red stars) + """ + import time + t_start = time.time() + + width, height = resolution + # Use NumPy array for fast pixel operations + image_array = np.zeros((height, width, 3), dtype=np.uint8) + image = Image.new("RGB", (width, height), (0, 0, 0)) + draw = ImageDraw.Draw(image) + + if len(stars) == 0: + # Still draw crosshair even if no stars + cx, cy = width // 2, height // 2 + marker_color = (128, 0, 0) + size = 5 + draw.line([cx - size, cy, cx + size, cy], fill=marker_color, width=1) + draw.line([cx, cy - size, cx, cy + size], fill=marker_color, width=1) + return image + + # Convert to numpy arrays for vectorized operations + t1 = time.time() + stars_array = np.array(stars) + ra_arr = stars_array[:, 0] + dec_arr = stars_array[:, 1] + mag_arr = stars_array[:, 2] + t2 = time.time() + logger.debug(f" Array conversion: {(t2-t1)*1000:.1f}ms") + + # Fast stereographic projection (vectorized) + # Convert degrees to radians + center_ra_rad = np.radians(center_ra) + center_dec_rad = np.radians(center_dec) + ra_rad = np.radians(ra_arr) + dec_rad = np.radians(dec_arr) + + # Use simple tangent plane projection (like POSS images) + # This gives linear scaling: pixels_per_degree is constant + # x = tan(ra - ra0) * cos(dec0) + # y = (tan(dec) - tan(dec0)) / cos(ra - ra0) + # Simplified for small angles: x ≈ (ra - ra0), y ≈ (dec - dec0) + + # Tangent plane projection (matches POSS images) + # For small FOV (< 10°), linear approximation works well + # IMPORTANT: Scale RA by CENTER declination, not individual star declinations + cos_center_dec = np.cos(center_dec_rad) + + dra = ra_rad - center_ra_rad + # Handle RA wrapping at 0°/360° + dra = np.where(dra > np.pi, dra - 2*np.pi, dra) + dra = np.where(dra < -np.pi, dra + 2*np.pi, dra) + ddec = dec_rad - center_dec_rad + + # Project onto tangent plane + # X: RA offset scaled by CENTER declination (matches POSS projection) + # Y: Dec offset (linear) + x_proj = dra * cos_center_dec + y_proj = ddec + + # Simple linear pixel scale (matches POSS behavior) + # fov degrees should map to width pixels + pixel_scale = width / np.radians(fov) + + # Convert to screen coordinates FIRST + # Center of field should always be at width/2, height/2 + # IMPORTANT: Flip X-axis to match POSS image orientation + # RA increases EASTWARD, which is to the LEFT when facing south + # So positive RA offset should go to the LEFT (subtract from center) + x_screen = width / 2.0 - x_proj * pixel_scale # FLIPPED: RA increases to LEFT + y_screen = height / 2.0 - y_proj * pixel_scale + + # Apply rotation to SCREEN coordinates (after scaling) + # This avoids magnifying small numerical errors + if rotation != 0: + rot_rad = np.radians(rotation) + cos_rot = np.cos(rot_rad) + sin_rot = np.sin(rot_rad) + + # Rotate around center + center_x = width / 2.0 + center_y = height / 2.0 + x_rel = x_screen - center_x + y_rel = y_screen - center_y + + x_rotated = x_rel * cos_rot - y_rel * sin_rot + y_rotated = x_rel * sin_rot + y_rel * cos_rot + + x_screen = x_rotated + center_x + y_screen = y_rotated + center_y + + # Filter stars within screen bounds only (no circular mask) + mask = ( + (x_screen >= 0) + & (x_screen < width) + & (y_screen >= 0) + & (y_screen < height) + ) + + x_visible = x_screen[mask] + y_visible = y_screen[mask] + mag_visible = mag_arr[mask] + ra_visible = ra_arr[mask] + dec_visible = dec_arr[mask] + + # Scale brightness based on magnitude range in current field + # Brightest star in field → 255, faintest → 50 + # This auto-adjusts contrast for any FOV + + if len(mag_visible) == 0: + intensities = np.array([]) + else: + brightest_mag = np.min(mag_visible) + faintest_mag = np.max(mag_visible) + + if faintest_mag - brightest_mag < 0.1: + # All stars same magnitude - use full brightness + intensities = np.full_like(mag_visible, 255, dtype=int) + else: + # Linear scaling from brightest (255) to faintest (50) + # Note: Lower magnitude = brighter star + intensities = 255 - ((mag_visible - brightest_mag) / (faintest_mag - brightest_mag) * 205) + intensities = intensities.astype(int) + + # Render stars: crosses for bright ones, single pixels for faint + t3 = time.time() + ix = np.round(x_visible).astype(int) + iy = np.round(y_visible).astype(int) + t4 = time.time() + logger.debug(f" Star projection: {(t3-t2)*1000:.1f}ms") + + for i in range(len(ix)): + px = ix[i] + py = iy[i] + intensity = intensities[i] + + # Draw all stars as single pixels (no crosses) + if 0 <= px < width and 0 <= py < height: + image_array[py, px, 0] = min(255, image_array[py, px, 0] + intensity) + + np.clip(image_array[:, :, 0], 0, 255, out=image_array[:, :, 0]) + t5 = time.time() + logger.debug(f" Star drawing loop: {(t5-t4)*1000:.1f}ms ({len(ix)} stars)") + + # Convert NumPy array back to PIL Image + image = Image.fromarray(image_array, mode="RGB") + t6 = time.time() + logger.debug(f" Image conversion: {(t6-t5)*1000:.1f}ms") + + # NOTE: No vertical flip needed - rotation already handles telescope orientation + # POSS images use rotation only (180° + roll), so we match that behavior + # The 180° rotation in generate_chart() inverts the image for Newtonian telescopes + + # Note: Limiting magnitude display added by add_image_overlays() in generate_chart() + # Note: Pulsating crosshair added separately via add_pulsating_crosshair() + # so base chart can be cached + + t_end = time.time() + logger.debug(f" Total render time: {(t_end-t_start)*1000:.1f}ms") + + return image + + def add_pulsating_crosshair(self, image: Image.Image) -> Image.Image: + """ + Add pulsating crosshair to center of image + Called each frame to animate - does not modify original image + + Args: + image: Base chart image (will be copied) + + Returns: + New image with crosshair overlay + """ + import time + + # Copy image so we don't modify the cached version + result = image.copy() + width, height = result.size + draw = ImageDraw.Draw(result) + + # Center position + cx, cy = width / 2.0, height / 2.0 + + # Pulsate crosshair: full cycle every 2 seconds + pulse_period = 2.0 # seconds + t = time.time() % pulse_period + # Sine wave for smooth pulsation (0.5 to 1.0 range) + pulse_factor = 0.5 + 0.5 * np.sin(2 * np.pi * t / pulse_period) + + # Size pulsates between 3 and 7 pixels + outer = int(3 + 4 * pulse_factor) + inner = 2 # Fixed gap + + # Color pulsates in brightness (32 to 96) + color_intensity = int(32 + 64 * pulse_factor) + marker_color = (color_intensity, 0, 0) + + # Crosshair outline (4 short lines with gap in middle) + draw.line([cx - outer, cy, cx - inner, cy], fill=marker_color, width=1) # Left + draw.line([cx + inner, cy, cx + outer, cy], fill=marker_color, width=1) # Right + draw.line([cx, cy - outer, cx, cy - inner], fill=marker_color, width=1) # Top + draw.line([cx, cy + inner, cx, cy + outer], fill=marker_color, width=1) # Bottom + + return result + + def _draw_star_antialiased_fast(self, image_array, ix, iy, fx, fy, intensity): + """ + Draw star with bilinear anti-aliasing using fast NumPy operations + + Args: + image_array: NumPy array (height, width, 3) + ix, iy: Integer pixel coordinates (top-left) + fx, fy: Fractional offsets (0-1) + intensity: Peak intensity (0-255) + """ + # Bilinear interpolation weights + w00 = (1 - fx) * (1 - fy) # Top-left + w10 = fx * (1 - fy) # Top-right + w01 = (1 - fx) * fy # Bottom-left + w11 = fx * fy # Bottom-right + + # Apply to 2x2 region using NumPy (much faster than getpixel/putpixel) + # Red channel only (index 0) + if w00 > 0.01: + image_array[iy, ix, 0] = min(255, image_array[iy, ix, 0] + int(intensity * w00)) + if w10 > 0.01: + image_array[iy, ix + 1, 0] = min(255, image_array[iy, ix + 1, 0] + int(intensity * w10)) + if w01 > 0.01: + image_array[iy + 1, ix, 0] = min(255, image_array[iy + 1, ix, 0] + int(intensity * w01)) + if w11 > 0.01: + image_array[iy + 1, ix + 1, 0] = min(255, image_array[iy + 1, ix + 1, 0] + int(intensity * w11)) + + def mag_to_intensity(self, mag: float) -> int: + """ + Convert magnitude to red pixel intensity (0-255) + + Args: + mag: Stellar magnitude + + Returns: + Red pixel value (0-255) + """ + if mag < 3: + return 255 + elif mag < 6: + return 200 + elif mag < 9: + return 150 + elif mag < 12: + return 100 + elif mag < 14: + return 75 + else: + return 50 + + @staticmethod + def sqm_to_nelm(sqm: float) -> float: + """ + Convert SQM reading (sky brightness) to NELM (naked eye limiting magnitude) + + Formula: NELM ≈ (SQM - 8.89) / 2 + 0.5 + + Reference: https://www.unihedron.com/projects/darksky/faq.php + Unihedron manufacturer formula for SQM-L devices + + Args: + sqm: Sky Quality Meter reading in mag/arcsec² + + Returns: + Naked Eye Limiting Magnitude + + Examples: + SQM 22.0 (pristine dark sky) → NELM 7.1 + SQM 21.0 (good dark sky) → NELM 6.6 + SQM 20.0 (rural sky) → NELM 6.1 + SQM 19.0 (suburban) → NELM 5.6 + SQM 18.0 (suburban/urban) → NELM 5.1 + SQM 17.0 (urban) → NELM 4.6 + """ + nelm = (sqm - 8.89) / 2.0 + 0.5 + return nelm + + @staticmethod + def feijth_comello_limiting_magnitude(mv: float, D: float, d: float, M: float, t: float) -> float: + """ + Calculate limiting magnitude using Feijth & Comello formula + + Formula: mg = mv - 2 + 2.5 × log₁₀(√(D² - d²) × M × t) + + Where: + - mv = naked eye limiting magnitude + - D = telescope aperture [cm] + - d = central obstruction diameter [cm] (0 for unobstructed) + - M = magnification + - t = transmission (100% = 1.0, typically 0.5-0.9) + + This practical formula is based on over 100,000 observations by Henk Feijth + and Georg Comello (mid-1990s). Unlike simple aperture formulas, it accounts + for obstruction, magnification, and transmission. + + References: + - https://astrobasics.de/en/basics/physical-quantities/limiting-magnitude/ + - https://www.y-auriga.de/astro/formeln.html (section 14) + - https://fr.wikipedia.org/wiki/Magnitude_limite_visuelle + + Args: + mv: Naked eye limiting magnitude + D: Aperture in cm + d: Central obstruction diameter in cm + M: Magnification + t: Transmission (0-1) + + Returns: + Telescopic limiting magnitude + + Example: + With mv=6.04, D=25cm, d=4cm, M=400, t=0.54 → mg=13.36 + """ + from math import log10, sqrt + + # Effective aperture accounting for central obstruction + # Only the (D² - d²) term is under the square root + effective_aperture = sqrt(D**2 - d**2) + + # Complete formula: mg = mv - 2 + 2.5 × log₁₀(√(D² - d²) × M × t) + mg = mv - 2.0 + 2.5 * log10(effective_aperture * M * t) + return mg + + def get_limiting_magnitude(self, sqm) -> float: + """ + Get limiting magnitude based on config mode (auto or fixed) + + Args: + sqm: SQM state object for sky brightness + + Returns: + Limiting magnitude value + """ + lm_mode = self.config.get_option("obj_chart_lm_mode") + + if lm_mode == "fixed": + # Use fixed limiting magnitude from config + lm = self.config.get_option("obj_chart_lm_fixed") + try: + lm = float(lm) + logger.info(f"Using fixed LM from config: {lm:.1f}") + return lm + except (ValueError, TypeError): + # Invalid fixed value, fall back to auto + logger.warning(f"Invalid fixed LM value: {lm}, falling back to auto") + return self.calculate_limiting_magnitude(sqm) + else: + # Auto mode: calculate based on equipment and sky brightness + return self.calculate_limiting_magnitude(sqm) + + def calculate_limiting_magnitude(self, sqm) -> float: + """ + Calculate limiting magnitude using Feijth & Comello formula + + Converts SQM to NELM, then applies Feijth & Comello formula accounting + for telescope aperture, obstruction, magnification, and transmission. + + Args: + sqm: SQM state object for sky brightness + + Returns: + Limiting magnitude (uncapped - caller caps for catalog queries) + """ + import math + + equipment = self.config.equipment + telescope = equipment.active_telescope + eyepiece = equipment.active_eyepiece + + # Get naked eye limiting magnitude from SQM + if sqm and hasattr(sqm, 'value') and sqm.value: + sqm_value = sqm.value + mv = self.sqm_to_nelm(sqm_value) + else: + sqm_value = 19.5 # Default suburban sky + mv = self.sqm_to_nelm(sqm_value) # ≈ 5.8 + + # Calculate telescopic limiting magnitude + if telescope and telescope.aperture_mm > 0 and eyepiece: + # Convert aperture from mm to cm for formula + D_cm = telescope.aperture_mm / 10.0 + + # Calculate magnification + magnification = telescope.focal_length_mm / eyepiece.focal_length_mm + exit_pupil_mm = telescope.aperture_mm / magnification + + # No obstruction assumed (we don't know the secondary mirror size) + d_cm = 0.0 + + # Transmission (typical value for good optics) + transmission = 0.85 + + # Apply Feijth & Comello formula directly + # The formula already accounts for magnification effects + lm = self.feijth_comello_limiting_magnitude( + mv, D_cm, d_cm, magnification, transmission + ) + + logger.info( + f"LM calculation: mv={mv:.1f} (SQM={sqm_value:.1f}), " + f"aperture={telescope.aperture_mm:.0f}mm, mag={magnification:.1f}x, " + f"exit_pupil={exit_pupil_mm:.1f}mm → LM={lm:.1f}" + ) + elif telescope and telescope.aperture_mm > 0: + # No eyepiece: assume minimum useful magnification (exit pupil = 7mm) + D_cm = telescope.aperture_mm / 10.0 + min_magnification = telescope.aperture_mm / 7.0 + transmission = 0.85 + + lm = self.feijth_comello_limiting_magnitude(mv, D_cm, 0.0, min_magnification, transmission) + logger.info(f"LM calculation: aperture={telescope.aperture_mm}mm (no eyepiece, min mag={min_magnification:.1f}x) → LM={lm:.1f}") + else: + # No telescope: use naked eye + lm = mv + logger.info(f"LM calculation: no telescope, NELM={lm:.1f}") + + # Return uncapped value (caller will cap for queries if needed) + return lm + + def get_cache_key(self, catalog_object) -> str: + """ + Generate cache key for object + eyepiece + limiting magnitude combination + + Args: + catalog_object: CompositeObject + + Returns: + Cache key string + """ + obj_key = f"{catalog_object.catalog_code}{catalog_object.sequence}" + eyepiece = self.config.equipment.active_eyepiece + eyepiece_key = str(eyepiece) if eyepiece else "none" + + # Include limiting magnitude in cache key + sqm = self.shared_state.sqm() + lm = self.get_limiting_magnitude(sqm) + lm_key = f"{lm:.1f}" + + return f"{obj_key}_{eyepiece_key}_lm{lm_key}" + + def invalidate_cache(self): + """Clear chart cache (call when equipment changes)""" + self.chart_cache.clear() + logger.debug("Chart cache invalidated") diff --git a/python/PiFinder/image_utils.py b/python/PiFinder/image_utils.py new file mode 100644 index 000000000..2eb941374 --- /dev/null +++ b/python/PiFinder/image_utils.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +Shared image utility functions for POSS/SDSS images and generated charts +""" + +from PIL import Image, ImageDraw + + +def add_image_overlays( + image, display_class, fov, magnification, eyepiece, burn_in=True, limiting_magnitude=None +): + """ + Add FOV/magnification/eyepiece overlays to image + + This function is shared by: + - POSS/SDSS image display (cat_images.py) + - Generated deep star charts (deep_chart.py) + + Args: + image: PIL Image to modify + display_class: Display configuration object + fov: Field of view in degrees + magnification: Telescope magnification + eyepiece: Active eyepiece object + burn_in: Whether to add overlays (default True) + limiting_magnitude: Optional limiting magnitude to display (for generated charts) + + Returns: + Modified PIL Image with overlays added + """ + if not burn_in: + return image + + from PiFinder.ui import ui_utils + + draw = ImageDraw.Draw(image) + + # Top-left: FOV in degrees + ui_utils.shadow_outline_text( + draw, + (1, display_class.titlebar_height - 1), + f"{fov:0.2f}°", + font=display_class.fonts.base, + align="left", + fill=display_class.colors.get(254), + shadow_color=display_class.colors.get(0), + outline=2, + ) + + # Top-right: Magnification + mag_text = f"{magnification:.0f}x" if magnification and magnification > 0 else "?x" + ui_utils.shadow_outline_text( + draw, + ( + display_class.resX - (display_class.fonts.base.width * 4), + display_class.titlebar_height - 1, + ), + mag_text, + font=display_class.fonts.base, + align="right", + fill=display_class.colors.get(254), + shadow_color=display_class.colors.get(0), + outline=2, + ) + + # Top-center: Limiting magnitude (for generated charts) + if limiting_magnitude is not None: + # Show ">17" if exceeds catalog limit, otherwise show actual value + if limiting_magnitude > 17.0: + lm_text = "LM:>17" + else: + lm_text = f"LM:{limiting_magnitude:.1f}" + lm_bbox = draw.textbbox((0, 0), lm_text, font=display_class.fonts.base.font) + lm_width = lm_bbox[2] - lm_bbox[0] + lm_x = (display_class.resX - lm_width) // 2 + + ui_utils.shadow_outline_text( + draw, + (lm_x, display_class.titlebar_height - 1), + lm_text, + font=display_class.fonts.base, + align="left", + fill=display_class.colors.get(254), + shadow_color=display_class.colors.get(0), + outline=2, + ) + + # Bottom-left: Eyepiece name + if eyepiece: + eyepiece_text = f"{eyepiece.focal_length_mm:.0f}mm {eyepiece.name}" + ui_utils.shadow_outline_text( + draw, + (1, display_class.resY - (display_class.fonts.base.height * 1.1)), + eyepiece_text, + font=display_class.fonts.base, + align="left", + fill=display_class.colors.get(128), # Dimmer than FOV/mag + shadow_color=display_class.colors.get(0), + outline=2, + ) + + return image + + +def create_loading_image(display_class, message="Loading...", progress_text=None, progress_percent=0): + """ + Create a placeholder image with loading message and optional progress + + Args: + display_class: Display configuration object + message: Main text to display (default "Loading...") + progress_text: Optional progress status text + progress_percent: Progress percentage (0-100) + + Returns: + PIL Image with centered message and progress + """ + image = Image.new( + "RGB", (display_class.fov_res, display_class.fov_res), (0, 0, 0) + ) + draw = ImageDraw.Draw(image) + + # Draw main message + text_bbox = draw.textbbox((0, 0), message, font=display_class.fonts.large.font) + text_width = text_bbox[2] - text_bbox[0] + text_height = text_bbox[3] - text_bbox[1] + + x = (display_class.fov_res - text_width) // 2 + y = (display_class.fov_res - text_height) // 2 - 10 + + draw.text( + (x, y), + message, + font=display_class.fonts.large.font, + fill=(128, 0, 0), # Medium red for night vision + ) + + # Draw progress text if provided + if progress_text: + progress_bbox = draw.textbbox((0, 0), progress_text, font=display_class.fonts.base.font) + progress_width = progress_bbox[2] - progress_bbox[0] + + px = (display_class.fov_res - progress_width) // 2 + py = y + text_height + 8 + + draw.text( + (px, py), + progress_text, + font=display_class.fonts.base.font, + fill=(100, 0, 0), # Dimmer red + ) + + # Draw progress bar if percentage > 0 + if progress_percent > 0: + bar_width = int(display_class.fov_res * 0.6) + bar_height = 4 + bar_x = (display_class.fov_res - bar_width) // 2 + bar_y = display_class.fov_res - 20 + + # Background bar + draw.rectangle( + [bar_x, bar_y, bar_x + bar_width, bar_y + bar_height], + outline=(64, 0, 0), + fill=(32, 0, 0) + ) + + # Progress fill + fill_width = int(bar_width * (progress_percent / 100)) + if fill_width > 0: + draw.rectangle( + [bar_x, bar_y, bar_x + fill_width, bar_y + bar_height], + fill=(128, 0, 0) + ) + + # Percentage text + percent_text = f"{progress_percent}%" + percent_bbox = draw.textbbox((0, 0), percent_text, font=display_class.fonts.base.font) + percent_width = percent_bbox[2] - percent_bbox[0] + + draw.text( + ((display_class.fov_res - percent_width) // 2, bar_y + bar_height + 4), + percent_text, + font=display_class.fonts.base.font, + fill=(100, 0, 0) + ) + + return image diff --git a/python/PiFinder/keyboard_local.py b/python/PiFinder/keyboard_local.py index 3f6028367..3a7533a96 100644 --- a/python/PiFinder/keyboard_local.py +++ b/python/PiFinder/keyboard_local.py @@ -2,6 +2,7 @@ from PiFinder.keyboard_interface import KeyboardInterface import logging from PiFinder.multiproclogging import MultiprocLogging +import sys logger = logging.getLogger("Keyboard.Local") @@ -31,10 +32,15 @@ class KeyboardLocal(KeyboardInterface): def __init__(self, q): try: from PyHotKey import Key, keyboard + logger.info("PyHotKey imported successfully") except ModuleNotFoundError: logger.error("pyhotkey not supported on pi hardware") return + except Exception as e: + logger.error(f"Failed to import PyHotKey: {e}", exc_info=True) + return try: + logger.info("Setting up keyboard bindings...") self.q = q # Configure unmodified keys keyboard.set_magickey_on_release(Key.left, self.callback, self.LEFT) @@ -79,10 +85,11 @@ def __init__(self, q): keyboard.set_magickey_on_release("i", self.callback, self.LNG_UP) keyboard.set_magickey_on_release("k", self.callback, self.LNG_DOWN) keyboard.set_magickey_on_release("l", self.callback, self.LNG_RIGHT) + logger.info("Keyboard bindings set up successfully") except Exception as e: - logger.error("KeyboardLocal.__init__: {}".format(e)) + logger.error("KeyboardLocal.__init__ failed: {}".format(e), exc_info=True) # keyboard.logger = True - logger.debug("KeyboardLocal.__init__") + logger.info("KeyboardLocal.__init__ complete") def callback(self, key): self.q.put(key) @@ -90,9 +97,70 @@ def callback(self, key): def run_keyboard(q, shared_state, log_queue, bloom_remap=False): MultiprocLogging.configurer(log_queue) - KeyboardLocal(q) - while True: - # the KeyboardLocal class has callbacks to handle - # keypresses. We just need to not terminate here - time.sleep(1) + logger.info("Keyboard process starting...") + + # Try pynput directly first (more reliable on macOS) + try: + from pynput import keyboard as pynput_keyboard + logger.info("Using pynput for keyboard handling") + + # Key mapping + key_map = { + pynput_keyboard.Key.left: KeyboardInterface.LEFT, + pynput_keyboard.Key.up: KeyboardInterface.UP, + pynput_keyboard.Key.down: KeyboardInterface.DOWN, + pynput_keyboard.Key.right: KeyboardInterface.RIGHT, + 'q': KeyboardInterface.PLUS, + 'a': KeyboardInterface.MINUS, + 'z': KeyboardInterface.SQUARE, + 'm': KeyboardInterface.LNG_SQUARE, + '0': 0, '1': 1, '2': 2, '3': 3, '4': 4, + '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, + 'w': KeyboardInterface.ALT_PLUS, + 's': KeyboardInterface.ALT_MINUS, + 'd': KeyboardInterface.ALT_LEFT, + 'r': KeyboardInterface.ALT_UP, + 'f': KeyboardInterface.ALT_DOWN, + 'g': KeyboardInterface.ALT_RIGHT, + 'e': KeyboardInterface.ALT_0, + 'j': KeyboardInterface.LNG_LEFT, + 'i': KeyboardInterface.LNG_UP, + 'k': KeyboardInterface.LNG_DOWN, + 'l': KeyboardInterface.LNG_RIGHT, + } + + def on_release(key): + try: + # Handle special keys + if key in key_map: + q.put(key_map[key]) + logger.debug(f"Key released: {key} -> {key_map[key]}") + # Handle character keys + elif hasattr(key, 'char') and key.char in key_map: + q.put(key_map[key.char]) + logger.debug(f"Key released: {key.char} -> {key_map[key.char]}") + except Exception as e: + logger.error(f"Error handling key: {e}") + + # Start listener + listener = pynput_keyboard.Listener(on_release=on_release) + listener.start() + logger.info("pynput keyboard listener started") + + while True: + time.sleep(1) + + except Exception as e: + logger.error(f"pynput failed, falling back to PyHotKey: {e}", exc_info=True) + + # Fallback to PyHotKey + try: + KeyboardLocal(q) + logger.info("KeyboardLocal initialized successfully") + except Exception as e2: + logger.error(f"Failed to initialize KeyboardLocal: {e2}", exc_info=True) + return + + while True: + time.sleep(1) diff --git a/python/PiFinder/keyboard_none.py b/python/PiFinder/keyboard_none.py index 96d433077..e65efa662 100644 --- a/python/PiFinder/keyboard_none.py +++ b/python/PiFinder/keyboard_none.py @@ -20,7 +20,7 @@ def callback(self, key): self.q.put(key) -def run_keyboard(q, shared_state, log_queue): +def run_keyboard(q, shared_state, log_queue, bloom_remap=False): MultiprocLogging.configurer(log_queue) KeyboardNone(q) diff --git a/python/PiFinder/main.py b/python/PiFinder/main.py index 28ba07de4..02414315a 100644 --- a/python/PiFinder/main.py +++ b/python/PiFinder/main.py @@ -545,9 +545,52 @@ def main( # stop_profiling(profiler, startup_profile_start) log_time = True + + # Set up Pygame event handling if using Pygame display + pygame_events_enabled = display_hardware in ["pg_128", "pg_320"] + if pygame_events_enabled: + import pygame + from PiFinder.keyboard_interface import KeyboardInterface + logger.info("Pygame event polling enabled for keyboard input") + + # Key mapping for Pygame + pygame_key_map = { + pygame.K_LEFT: KeyboardInterface.LEFT, + pygame.K_UP: KeyboardInterface.UP, + pygame.K_DOWN: KeyboardInterface.DOWN, + pygame.K_RIGHT: KeyboardInterface.RIGHT, + pygame.K_q: KeyboardInterface.PLUS, + pygame.K_a: KeyboardInterface.MINUS, + pygame.K_z: KeyboardInterface.SQUARE, + pygame.K_m: KeyboardInterface.LNG_SQUARE, + pygame.K_0: 0, pygame.K_1: 1, pygame.K_2: 2, pygame.K_3: 3, pygame.K_4: 4, + pygame.K_5: 5, pygame.K_6: 6, pygame.K_7: 7, pygame.K_8: 8, pygame.K_9: 9, + pygame.K_w: KeyboardInterface.ALT_PLUS, + pygame.K_s: KeyboardInterface.ALT_MINUS, + pygame.K_d: KeyboardInterface.ALT_LEFT, + pygame.K_r: KeyboardInterface.ALT_UP, + pygame.K_f: KeyboardInterface.ALT_DOWN, + pygame.K_g: KeyboardInterface.ALT_RIGHT, + pygame.K_e: KeyboardInterface.ALT_0, + pygame.K_j: KeyboardInterface.LNG_LEFT, + pygame.K_i: KeyboardInterface.LNG_UP, + pygame.K_k: KeyboardInterface.LNG_DOWN, + pygame.K_l: KeyboardInterface.LNG_RIGHT, + } + # Start of main except handler / loop try: while True: + # Poll Pygame events if using Pygame display + if pygame_events_enabled: + for event in pygame.event.get(): + if event.type == pygame.KEYDOWN: + if event.key in pygame_key_map: + keyboard_queue.put(pygame_key_map[event.key]) + elif event.type == pygame.QUIT: + logger.info("Pygame window closed, exiting...") + raise KeyboardInterrupt + # Console try: console_msg = console_queue.get(block=False) @@ -629,6 +672,9 @@ def main( except queue.Empty: pass + # Deep catalog loading removed - now lazy-loads on first chart view + # (cat_images.py triggers loading when needed) + # ui queue try: ui_command = ui_queue.get(block=False) @@ -1013,17 +1059,18 @@ def main( rlogger.warn("not using camera") from PiFinder import camera_none as camera # type: ignore[no-redef] - if args.keyboard.lower() == "pi": - from PiFinder import keyboard_pi as keyboard - + # When using Pygame display, use built-in event polling (no keyboard subprocess needed) + if display_hardware in ["pg_128", "pg_320"]: + from PiFinder import keyboard_none as keyboard + rlogger.info("using pygame built-in keyboard (no subprocess)") + elif args.keyboard.lower() == "pi": + from PiFinder import keyboard_pi as keyboard # type: ignore[no-redef] rlogger.info("using pi keyboard hat") elif args.keyboard.lower() == "local": from PiFinder import keyboard_local as keyboard # type: ignore[no-redef] - rlogger.info("using local keyboard") elif args.keyboard.lower() == "none": - from PiFinder import keyboard_none as keyboard # type: ignore[no-redef] - + from PiFinder import keyboard_none as keyboard rlogger.warning("using no keyboard") if args.lang: diff --git a/python/PiFinder/solver.py b/python/PiFinder/solver.py index 07a901528..b4b2b4e7f 100644 --- a/python/PiFinder/solver.py +++ b/python/PiFinder/solver.py @@ -178,13 +178,6 @@ def update_sqm_dual_pipeline( last_update=datetime.now().isoformat(), ) shared_state.set_sqm(new_sqm_state) - - raw_str = ( - f", raw={sqm_value_raw:.2f}" - if sqm_value_raw is not None - else ", raw=N/A" - ) - logger.info(f"SQM updated: processed={sqm_value_processed:.2f}{raw_str}") return True except Exception as e: @@ -344,9 +337,7 @@ def solver( ) if len(centroids) == 0: - if log_no_stars_found: - logger.info("No stars found, skipping (Logged only once)") - log_no_stars_found = False + log_no_stars_found = False # Clear solve results to mark solve as failed (otherwise old values persist) solved["RA"] = None solved["Dec"] = None @@ -390,11 +381,12 @@ def solver( ) # Don't clutter printed solution with these fields. - del solution["matched_catID"] - del solution["pattern_centroids"] - del solution["epoch_equinox"] - del solution["epoch_proper_motion"] - del solution["cache_hit_fraction"] + # Use pop() to safely remove keys that may not exist + solution.pop("matched_catID", None) + solution.pop("pattern_centroids", None) + solution.pop("epoch_equinox", None) + solution.pop("epoch_proper_motion", None) + solution.pop("cache_hit_fraction", None) solved |= solution @@ -427,12 +419,6 @@ def solver( # Mark successful solve - use same timestamp as last_solve_attempt for comparison solved["last_solve_success"] = solved["last_solve_attempt"] - logger.info( - f"Solve SUCCESS - {len(centroids)} centroids → " - f"{solved.get('Matches', 0)} matches, " - f"RMSE: {solved.get('RMSE', 0):.1f}px" - ) - # See if we are waiting for alignment if align_ra != 0 and align_dec != 0: if solved.get("x_target") is not None: diff --git a/python/PiFinder/sqm/noise_floor.py b/python/PiFinder/sqm/noise_floor.py index 90f6f2784..046dde6b5 100644 --- a/python/PiFinder/sqm/noise_floor.py +++ b/python/PiFinder/sqm/noise_floor.py @@ -173,7 +173,7 @@ def estimate_noise_floor( logger.debug("Requesting zero-second calibration sample") if not is_valid: - logger.warning( + logger.debug( f"Noise floor estimate may be invalid: {reason} " f"(floor={noise_floor:.1f}, median={np.median(image):.1f})" ) diff --git a/python/PiFinder/sqm/sqm.py b/python/PiFinder/sqm/sqm.py index 84a4e2d9e..be71c0af5 100644 --- a/python/PiFinder/sqm/sqm.py +++ b/python/PiFinder/sqm/sqm.py @@ -213,7 +213,7 @@ def _calculate_mzero( for flux, mag in zip(star_fluxes, star_mags): if flux <= 0: - logger.warning( + logger.debug( f"Skipping star with flux={flux:.1f} ADU (mag={mag:.2f})" ) mzeros.append(None) # Keep array aligned @@ -676,7 +676,7 @@ def update_sqm_if_needed( last_update=datetime.now().isoformat(), ) shared_state.set_sqm(new_sqm_state) - logger.debug(f"SQM: {sqm_value:.2f} mag/arcsec²") + # logger.debug(f"SQM: {sqm_value:.2f} mag/arcsec²") return True else: logger.warning("SQM calculation returned None") diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py new file mode 100644 index 000000000..12d49c18c --- /dev/null +++ b/python/PiFinder/star_catalog.py @@ -0,0 +1,808 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +HEALPix-indexed star catalog loader with background loading and CPU throttling + +This module provides efficient loading of deep star catalogs for chart generation. +Features: +- Background loading with thread safety +- CPU throttling to avoid blocking other processes +- LRU tile caching +- Hemisphere filtering for memory efficiency +- Proper motion corrections +""" + +import json +import logging +import struct +import threading +import time +from datetime import datetime +from enum import Enum +from pathlib import Path +from typing import Any, Dict, List, Optional, Set, Tuple + +import numpy as np + +# Import healpy at module level to avoid first-use delay +# This ensures the slow import happens during initialization, not during first chart render +try: + import healpy as hp # type: ignore[import-not-found] + _HEALPY_AVAILABLE = True +except ImportError: + hp = None + _HEALPY_AVAILABLE = False + +logger = logging.getLogger("PiFinder.StarCatalog") + +# Star record format (must match healpix_builder.py) +STAR_RECORD_FORMAT = " dict: + """ + Load binary spatial index + + Binary format: + - Header: [version: 4][num_tiles: 4][nside: 4] + - Per tile: [tile_id: 4][num_bands: 1][bands: (mag_min:1, mag_max:1)*num_bands] + + Returns: + Dict mapping tile_id -> [(mag_min, mag_max), ...] + """ + with open(bin_path, "rb") as f: + # Read header + header = f.read(12) + version, num_tiles, nside = struct.unpack(" Optional[Set[int]]: + """ + Calculate HEALPix tiles visible from observer latitude + + DISABLED: Too slow (iterates 3M+ pixels) + TODO: Pre-compute hemisphere mask during catalog build + + Args: + observer_lat: Observer latitude in degrees + + Returns: + None (full sky always loaded for now) + """ + return None + + def _preload_mag_band(self, mag_min: float, mag_max: float): + """ + Preload all tiles for a magnitude band + + Args: + mag_min: Minimum magnitude + mag_max: Maximum magnitude + """ + band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" + if not band_dir.exists(): + return + + # Get all tile files in this band + tile_files = sorted(band_dir.glob("tile_*.bin")) + + for tile_file in tile_files: + # Extract tile ID from filename + tile_id = int(tile_file.stem.split("_")[1]) + + # Filter by hemisphere if applicable + if self.visible_tiles and tile_id not in self.visible_tiles: + continue + + # Load tile + self._load_tile_from_file(tile_file, mag_min, mag_max) + + # CPU throttle: 10ms pause between tiles + # (50ms was too conservative, slowing down loading significantly) + time.sleep(0.01) + + def get_stars_for_fov( + self, + ra_deg: float, + dec_deg: float, + fov_deg: float, + mag_limit: Optional[float] = None, + ) -> List[Tuple[float, float, float]]: + """ + Query stars in field of view + + Blocks if state == LOADING (waits for load to complete) + Returns empty list if state == NOT_LOADED + + Args: + ra_deg: Center RA in degrees + dec_deg: Center Dec in degrees + fov_deg: Field of view in degrees + mag_limit: Limiting magnitude (uses catalog default if None) + + Returns: + List of (ra, dec, mag) tuples with proper motion corrected + """ + if self.state == CatalogState.NOT_LOADED: + logger.warning("Catalog not loaded") + return [] + + if self.state == CatalogState.LOADING: + # Wait for loading to complete (with timeout) + logger.info("Waiting for catalog to finish loading...") + timeout = 30 # seconds + start = time.time() + while self.state == CatalogState.LOADING: + time.sleep(0.1) + if time.time() - start > timeout: + logger.error("Catalog loading timeout") + return [] + + # State is READY - metadata must be loaded by now + assert self.metadata is not None, "metadata should be loaded when state is READY" + assert self.nside is not None, "nside should be set when state is READY" + + mag_limit = mag_limit or self.limiting_magnitude + + if not _HEALPY_AVAILABLE: + logger.error("healpy not installed") + return [] + + # Calculate HEALPix tiles covering FOV + # Query larger area to account for rectangular screen and rotation + # Diagonal of square is sqrt(2) * side, with rotation could be any angle + vec = hp.ang2vec(ra_deg, dec_deg, lonlat=True) + # Use full diagonal + margin to ensure corners are covered even when rotated + radius_rad = np.radians(fov_deg * 0.85) # sqrt(2)/2 ≈ 0.707, add extra for rotation + tiles = hp.query_disc(self.nside, vec, radius_rad) + logger.info(f"HEALPix: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") + + # Filter by visible hemisphere + if self.visible_tiles: + tiles = [t for t in tiles if t in self.visible_tiles] + + # Load stars from tiles (batch load for better performance) + stars: List[Tuple[float, float, float]] = [] + tile_star_counts = {} + + # Try batch loading if catalog is compact format + is_compact = self.metadata.get("format") == "compact" + if is_compact and len(tiles) > 10: + # Batch load is much faster for many tiles + # Note: batch loading returns PM-corrected (ra, dec, mag) tuples + logger.info(f"Using BATCH loading for {len(tiles)} tiles") + import time + t_batch_start = time.time() + stars = self._load_tiles_batch(tiles, mag_limit) + t_batch_end = time.time() + logger.info(f"Batch load complete: {len(stars)} stars in {(t_batch_end-t_batch_start)*1000:.1f}ms") + tile_star_counts = {t: 0 for t in tiles} # Don't track individual counts for batch + else: + # Load one by one (better for small queries or legacy format) + logger.info(f"Using SINGLE-TILE loading for {len(tiles)} tiles (compact={is_compact})") + stars_raw: List[Tuple[float, float, float, float, float]] = [] + for tile_id in tiles: + tile_stars = self._load_tile_data(tile_id, mag_limit) + tile_star_counts[tile_id] = len(tile_stars) + stars_raw.extend(tile_stars) + + # Log tile loading stats + if tile_star_counts: + logger.debug(f"Loaded from {len(tile_star_counts)} tiles: " + + f"min={min(tile_star_counts.values())} max={max(tile_star_counts.values())} " + + f"total={sum(tile_star_counts.values())}") + + # Apply proper motion correction (for non-batch path only) + stars = self._apply_proper_motion(stars_raw) + + return stars + + def _load_tile_data( + self, tile_id: int, mag_limit: float + ) -> List[Tuple[float, float, float, float, float]]: + """ + Load star data for a HEALPix tile + + Args: + tile_id: HEALPix tile ID + mag_limit: Maximum magnitude to load + + Returns: + List of (ra, dec, mag, pmra, pmdec) tuples + """ + assert self.metadata is not None, "metadata must be loaded before calling _load_tile_data" + + cache_key = (tile_id, mag_limit) + + # Check cache + with self.cache_lock: + if cache_key in self.tile_cache: + return self.tile_cache[cache_key] + + # Load from disk + stars = [] + + # Check catalog format + is_compact = self.metadata.get("format") == "compact" + + # Determine which magnitude bands to load + for mag_band_info in self.metadata.get("mag_bands", []): + mag_min = mag_band_info["min"] + mag_max = mag_band_info["max"] + + if mag_min >= mag_limit: + continue # Band too faint + + band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" + + if is_compact: + # Compact format: read from consolidated file using index + tile_stars = self._load_tile_compact(band_dir, tile_id, mag_min, mag_max) + else: + # Legacy format: one file per tile + tile_file = band_dir / f"tile_{tile_id:06d}.bin" + if tile_file.exists(): + tile_stars = self._load_tile_from_file(tile_file, mag_min, mag_max) + else: + tile_stars = [] + + # Filter by magnitude + tile_stars = [s for s in tile_stars if s[2] <= mag_limit] + stars.extend(tile_stars) + + # Cache result + with self.cache_lock: + self.tile_cache[cache_key] = stars + # Simple cache size management (keep last 100 tiles) + if len(self.tile_cache) > 100: + # Remove oldest (first) entry + oldest_key = next(iter(self.tile_cache)) + del self.tile_cache[oldest_key] + + return stars + + def _load_tile_from_file( + self, tile_file: Path, mag_min: float, mag_max: float + ) -> List[Tuple[float, float, float, float, float]]: + """ + Load stars from a tile file + + Args: + tile_file: Path to tile binary file + mag_min: Minimum magnitude in this band + mag_max: Maximum magnitude in this band + + Returns: + List of (ra, dec, mag, pmra, pmdec) tuples + """ + if not _HEALPY_AVAILABLE: + return [] + + stars = [] + + with open(tile_file, "rb") as f: + while True: + data = f.read(STAR_RECORD_SIZE) + if len(data) < STAR_RECORD_SIZE: + break + + # Decode record + healpix_pixel, ra_offset_encoded, dec_offset_encoded, mag_encoded, pmra_encoded, pmdec_encoded = ( + struct.unpack(STAR_RECORD_FORMAT, data) + ) + + # Mask to 24 bits + healpix_pixel = healpix_pixel & 0xFFFFFF + + # Get pixel center coordinates + pixel_ra, pixel_dec = hp.pix2ang(self.nside, healpix_pixel, lonlat=True) + + # Calculate pixel size + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + + # Decode offsets + ra_offset_arcsec = (ra_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (dec_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + + # Calculate actual Dec FIRST (needed for RA cosine correction) + dec = pixel_dec + dec_offset_arcsec / 3600.0 + + # Calculate actual RA using the star's actual declination (not pixel center) + # This matches the encoder which uses the star's actual dec for compression + ra = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(dec)) + + mag = mag_encoded / 10.0 # 0.1 mag precision + pmra = pmra_encoded * 50 # mas/year + pmdec = pmdec_encoded * 50 # mas/year + + stars.append((ra, dec, mag, pmra, pmdec)) + + return stars + + def _load_tile_compact( + self, band_dir: Path, tile_id: int, mag_min: float, mag_max: float + ) -> List[Tuple[float, float, float, float, float]]: + """ + Load stars from compact format (consolidated tiles.bin + index.json) + + Args: + band_dir: Magnitude band directory + tile_id: HEALPix tile ID + mag_min: Minimum magnitude + mag_max: Maximum magnitude + + Returns: + List of (ra, dec, mag, pmra, pmdec) tuples + """ + if not _HEALPY_AVAILABLE: + return [] + + # Try binary index first, fall back to JSON for backward compat + index_file_bin = band_dir / "index.bin" + index_file_json = band_dir / "index.json" + tiles_file = band_dir / "tiles.bin" + + if not tiles_file.exists(): + return [] + + # Determine index format + if index_file_bin.exists(): + index_file = index_file_bin + is_binary = True + elif index_file_json.exists(): + index_file = index_file_json + is_binary = False + else: + return [] + + # Load index (cached per band) + tile_key = str(tile_id) + + cache_key = f"index_{mag_min}_{mag_max}" + if cache_key not in self._index_cache: + if is_binary: + self._index_cache[cache_key] = self._read_binary_index(index_file) + else: + with open(index_file, "r") as f: + self._index_cache[cache_key] = json.load(f) + + index = self._index_cache[cache_key] + + if tile_key not in index: + return [] # No stars in this tile + + # Get tile offset and size + tile_info = index[tile_key] + offset = tile_info["offset"] + size = tile_info["size"] + compressed_size = tile_info.get("compressed_size") + + # Read tile data + stars = [] + with open(tiles_file, "rb") as f: + f.seek(offset) + + if compressed_size: + # Compressed tile - decompress in memory + import zlib + compressed_data = f.read(compressed_size) + data = zlib.decompress(compressed_data) + else: + # Uncompressed tile + data = f.read(size) + + # Decode all records in this tile + num_records = len(data) // STAR_RECORD_SIZE + for i in range(num_records): + record_data = data[i * STAR_RECORD_SIZE : (i + 1) * STAR_RECORD_SIZE] + + healpix_pixel, ra_offset_encoded, dec_offset_encoded, mag_encoded, pmra_encoded, pmdec_encoded = ( + struct.unpack(STAR_RECORD_FORMAT, record_data) + ) + + # Mask to 24 bits + healpix_pixel = healpix_pixel & 0xFFFFFF + + # Get pixel center coordinates + pixel_ra, pixel_dec = hp.pix2ang(self.nside, healpix_pixel, lonlat=True) + + # Calculate pixel size + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + + # Decode offsets + ra_offset_arcsec = (ra_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (dec_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + + # Calculate actual Dec FIRST (needed for RA cosine correction) + dec = pixel_dec + dec_offset_arcsec / 3600.0 + + # Calculate actual RA using the star's actual declination (not pixel center) + # This matches the encoder which uses the star's actual dec for compression + ra = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(dec)) + + mag = mag_encoded / 10.0 + pmra = pmra_encoded * 50 + pmdec = pmdec_encoded * 50 + + stars.append((ra, dec, mag, pmra, pmdec)) + + return stars + + def _read_binary_index(self, index_file: Path) -> dict: + """ + Read binary index file + + Format v1 (uncompressed): + Header: [version:4][num_tiles:4] + Per tile: [tile_id:4][offset:8][size:4] + + Format v2 (compressed): + Header: [version:4][num_tiles:4] + Per tile: [tile_id:4][offset:8][compressed_size:4][uncompressed_size:4] + + Returns: + Dict mapping tile_id (as string) -> {"offset": int, "size": int, "compressed_size": int (optional)} + """ + index = {} + + with open(index_file, "rb") as f: + # Read header + header = f.read(8) + version, num_tiles = struct.unpack(" List[Tuple[float, float, float]]: + """ + Apply proper motion corrections from J2016.0 to current epoch + + Shows stars at their current positions in the sky (today), not historical + J2000 positions. This provides the most accurate representation for + real-time telescope pointing. + + Args: + stars: List of (ra, dec, mag, pmra, pmdec) tuples in J2016.0 + + Returns: + List of (ra, dec, mag) tuples with positions corrected to current epoch + """ + # Calculate years from J2016.0 to current date + current_year = datetime.now().year + (datetime.now().timetuple().tm_yday / 365.25) + years_elapsed = current_year - 2016.0 + + corrected = [] + for ra, dec, mag, pmra, pmdec in stars: + # Apply proper motion forward to current epoch + # pmra is in mas/year and needs cos(dec) correction for RA + ra_correction = (pmra / 1000 / 3600) / np.cos(np.radians(dec)) * years_elapsed + dec_correction = (pmdec / 1000 / 3600) * years_elapsed + + ra_corrected = ra + ra_correction + dec_corrected = dec + dec_correction + + # Keep dec in valid range + dec_corrected = max(-90, min(90, dec_corrected)) + + corrected.append((ra_corrected, dec_corrected, mag)) + + return corrected + + def _load_tiles_batch( + self, tile_ids: List[int], mag_limit: float + ) -> List[Tuple[float, float, float]]: + """ + Batch load multiple tiles efficiently (compact format only) + Much faster than loading tiles one-by-one due to reduced I/O overhead + + Args: + tile_ids: List of HEALPix tile IDs + mag_limit: Maximum magnitude + + Returns: + List of (ra, dec, mag) tuples (already PM-corrected) + """ + assert self.metadata is not None, "metadata must be loaded before calling _load_tiles_batch" + + if not _HEALPY_AVAILABLE: + return [] + + all_stars = [] + + # Process each magnitude band + for mag_band_info in self.metadata.get("mag_bands", []): + mag_min = mag_band_info["min"] + mag_max = mag_band_info["max"] + + if mag_min >= mag_limit: + continue # Skip faint bands + + band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" + index_file_bin = band_dir / "index.bin" + index_file_json = band_dir / "index.json" + tiles_file = band_dir / "tiles.bin" + + if not tiles_file.exists(): + continue + + # Load index + cache_key = f"index_{mag_min}_{mag_max}" + if not hasattr(self, '_index_cache'): + self._index_cache = {} + + if cache_key not in self._index_cache: + if index_file_bin.exists(): + self._index_cache[cache_key] = self._read_binary_index(index_file_bin) + elif index_file_json.exists(): + with open(index_file_json, "r") as f: + self._index_cache[cache_key] = json.load(f) + else: + continue + + index = self._index_cache[cache_key] + + # Collect all tile read operations + read_ops = [] + for tile_id in tile_ids: + tile_key = str(tile_id) + if tile_key in index: + tile_info = index[tile_key] + read_ops.append((tile_id, tile_info)) + + if not read_ops: + continue + + # Sort by offset to minimize seeks + read_ops.sort(key=lambda x: x[1]["offset"]) + + # Optimize: Read data in larger sequential chunks when possible + # Group tiles that are close together (within 100KB) + MAX_GAP = 100 * 1024 # 100KB gap tolerance + + # Open file once and read all tiles + with open(tiles_file, "rb") as f: + i = 0 + while i < len(read_ops): + tile_id, tile_info = read_ops[i] + offset = tile_info["offset"] + compressed_size = tile_info.get("compressed_size") + size = tile_info["size"] + + # Check if next tiles are sequential (within gap tolerance) + chunk_end = offset + (compressed_size or size) + tiles_in_chunk = [(tile_id, tile_info)] + + j = i + 1 + while j < len(read_ops): + next_tile_id, next_tile_info = read_ops[j] + next_offset = next_tile_info["offset"] + + # If next tile is within gap tolerance, include in chunk + if next_offset - chunk_end <= MAX_GAP: + tiles_in_chunk.append((next_tile_id, next_tile_info)) + next_size = next_tile_info.get("compressed_size") or next_tile_info["size"] + chunk_end = next_offset + next_size + j += 1 + else: + break + + # Read entire chunk at once + f.seek(offset) + chunk_data = f.read(chunk_end - offset) + + # Process each tile in the chunk + for tile_id, tile_info in tiles_in_chunk: + tile_offset = tile_info["offset"] - offset # Relative offset in chunk + compressed_size = tile_info.get("compressed_size") + size = tile_info["size"] + + if compressed_size: + import zlib + compressed_data = chunk_data[tile_offset:tile_offset + compressed_size] + data = zlib.decompress(compressed_data) + else: + data = chunk_data[tile_offset:tile_offset + size] + + # Decode stars in this tile + num_records = len(data) // STAR_RECORD_SIZE + for k in range(num_records): + record_data = data[k * STAR_RECORD_SIZE : (k + 1) * STAR_RECORD_SIZE] + + healpix_pixel, ra_offset_encoded, dec_offset_encoded, mag_encoded, pmra_encoded, pmdec_encoded = ( + struct.unpack(STAR_RECORD_FORMAT, record_data) + ) + + # Mask to 24 bits + healpix_pixel = healpix_pixel & 0xFFFFFF + + # Get pixel center + pixel_ra, pixel_dec = hp.pix2ang(self.nside, healpix_pixel, lonlat=True) + + # Decode position + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + + ra_offset_arcsec = (ra_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (dec_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + + dec = pixel_dec + dec_offset_arcsec / 3600.0 + ra = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(dec)) + + mag = mag_encoded / 10.0 + pmra = pmra_encoded * 50 + pmdec = pmdec_encoded * 50 + + # Filter by magnitude + if mag <= mag_limit: + all_stars.append((ra, dec, mag, pmra, pmdec)) + + # Move to next chunk + i = j + + # Apply proper motion + return self._apply_proper_motion(all_stars) diff --git a/python/PiFinder/ui/lm_entry.py b/python/PiFinder/ui/lm_entry.py new file mode 100644 index 000000000..4cf35230a --- /dev/null +++ b/python/PiFinder/ui/lm_entry.py @@ -0,0 +1,222 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +Limiting Magnitude Entry UI + +Allows user to enter a fixed limiting magnitude value (e.g., 14.5) +with one decimal place precision. +""" + +from PIL import Image, ImageDraw +from PiFinder.ui.base import UIModule + + +class UILMEntry(UIModule): + """ + UI for entering limiting magnitude value + + Controls: + - 0-9: Enter digits + - Up/Down: Move cursor left/right between digits + - -: Delete digit (backspace) + - Right: Accept (save and return) + - Left: Cancel (discard and return) + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.config_option = self.item_definition.get("config_option", "obj_chart_lm_fixed") + + # Start with placeholder/blank value for user to fill in + # Store as string for editing: format is " . " (spaces for digits) + self.digits = [' ', ' ', '.', ' '] # Two digits, decimal, one digit + + # Cursor position (0-3 for "XX.X" format) + # Position 2 is the decimal point (not editable) + self.cursor_pos = 0 + + self.width = 128 + self.height = 128 + self.screen = Image.new("RGB", (self.width, self.height), "black") + + def update(self, force=False): + """Render the LM entry screen""" + self.screen = Image.new("RGB", (self.width, self.height), "black") + draw = ImageDraw.Draw(self.screen) + + # Title + title = "Set Limiting Mag" + title_bbox = draw.textbbox((0, 0), title, font=self.fonts.base.font) + title_width = title_bbox[2] - title_bbox[0] + title_x = (self.width - title_width) // 2 + draw.text( + (title_x, 5), + title, + font=self.fonts.base.font, + fill=self.colors.get(128) + ) + + # Display current value with cursor + value_y = (self.height - self.fonts.large.height) // 2 - 10 + + # Use fixed-width spacing for consistent alignment + char_width = self.fonts.large.width # Fixed character width + total_width = char_width * len(self.digits) + + # Center the entire value + start_x = (self.width - total_width) // 2 + + # Draw each character + for i, char in enumerate(self.digits): + x_pos = start_x + (i * char_width) + + # Display character or underscore for empty + display_char = char if char != ' ' else '_' + + # Highlight cursor position (but not the decimal point) + if i == self.cursor_pos and char != '.': + # Draw filled rectangle background + draw.rectangle( + [x_pos - 2, value_y - 2, x_pos + char_width + 2, value_y + self.fonts.large.height + 2], + fill=self.colors.get(255), + outline=self.colors.get(255), + width=2 + ) + # Draw text in inverse color + text_color = self.colors.get(0) + else: + text_color = self.colors.get(255) + + draw.text( + (x_pos, value_y), + display_char, + font=self.fonts.large.font, + fill=text_color + ) + + # Icons (matching radec_entry style) + arrow_icons = "󰹺" + back_icon = "" + go_icon = "" + + # Legends at bottom (two lines) + bar_y = self.height - (self.fonts.base.height * 2) - 4 + + # Draw separator line + draw.line( + [(2, bar_y), (self.width - 2, bar_y)], + fill=self.colors.get(128), + width=1 + ) + + # Line 1: Navigation + line1 = f"{arrow_icons}Nav" + draw.text((2, bar_y + 2), line1, font=self.fonts.base.font, fill=self.colors.get(128)) + + # Line 2: Actions + line2 = f"{back_icon}Cancel {go_icon}Accept -Del" + draw.text((2, bar_y + 12), line2, font=self.fonts.base.font, fill=self.colors.get(128)) + + return self.screen, None + + def key_up(self): + """Move cursor left""" + if self.cursor_pos > 0: + self.cursor_pos -= 1 + # Skip over decimal point + if self.cursor_pos == 2: + self.cursor_pos = 1 + return True + + def key_down(self): + """Move cursor right""" + if self.cursor_pos < 3: + self.cursor_pos += 1 + # Skip over decimal point + if self.cursor_pos == 2: + self.cursor_pos = 3 + return True + + def key_number(self, number): + """Enter digit 0-9 at cursor position""" + if 0 <= number <= 9: + # Don't allow editing the decimal point + if self.cursor_pos == 2: + return False + + # Replace digit at cursor position + self.digits[self.cursor_pos] = str(number) + + # Move cursor to next position after entering digit + if self.cursor_pos < 3: + self.cursor_pos += 1 + # Skip over decimal point + if self.cursor_pos == 2: + self.cursor_pos = 3 + + return True + return False + + def key_minus(self): + """Delete digit at cursor position (replace with space)""" + if self.cursor_pos == 2: + # Can't delete decimal point + return False + + # Replace with space (blank) + self.digits[self.cursor_pos] = ' ' + return True + + def key_left(self): + """Cancel - return without saving""" + return True + + def key_right(self): + """Accept - save value and return""" + import logging + logger = logging.getLogger("UILMEntry") + logger.info(">>> key_right() called!") + + try: + # Convert digits to string, replacing spaces with nothing won't work + # We need at least one digit before decimal and one after + value_str = "".join(self.digits).strip() + logger.info(f"LM entry: digits={self.digits}, value_str='{value_str}'") + + # Check if we have any actual digits (not just spaces and decimal) + if value_str.replace('.', '').replace(' ', '') == '': + # No digits entered, reject + logger.info("LM entry rejected: no digits") + return False + + # Replace remaining spaces with 0 for parsing + value_str = value_str.replace(' ', '0') + final_value = float(value_str) + logger.info(f"LM entry: parsed value={final_value}") + + # Validate range + if final_value < 5.0 or final_value > 20.0: + # Out of range, reject + logger.info(f"LM entry rejected: out of range (5.0-20.0)") + return False + + logger.info(f"LM entry accepted: {final_value}") + self.config_object.set_option(self.config_option, final_value) + + # Also set the mode to "fixed" since user entered a value + self.config_object.set_option("obj_chart_lm_mode", "fixed") + + # No need to invalidate cache - cache key includes LM so different + # LM values will automatically get separate cache entries + + logger.info("Returning True to exit LM entry screen") + return True + except ValueError as e: + # Invalid value, don't accept + logger.error(f"LM entry ValueError: {e}") + return False + + def active(self): + """Called when screen becomes active""" + return False diff --git a/python/PiFinder/ui/log.py b/python/PiFinder/ui/log.py index a095aca17..ab3966ebc 100644 --- a/python/PiFinder/ui/log.py +++ b/python/PiFinder/ui/log.py @@ -47,8 +47,16 @@ def __init__(self, *args, **kwargs): roll = 0 if solution: roll = solution["Roll"] + + # Get chart generator singleton for deep chart support + from PiFinder.deep_chart import get_chart_generator + chart_gen = get_chart_generator(self.config_object, self.shared_state) + self.object_image = cat_images.get_display_image( - self.object, "POSS", 1, roll, self.display_class, burn_in=False + self.object, "POSS", 1, roll, self.display_class, burn_in=False, + config_object=self.config_object, + shared_state=self.shared_state, + chart_generator=chart_gen ) self.menu_index = 1 # Observability diff --git a/python/PiFinder/ui/menu_structure.py b/python/PiFinder/ui/menu_structure.py index b6783fc82..b10fa85b5 100644 --- a/python/PiFinder/ui/menu_structure.py +++ b/python/PiFinder/ui/menu_structure.py @@ -14,6 +14,7 @@ from PiFinder.ui.equipment import UIEquipment from PiFinder.ui.location_list import UILocationList from PiFinder.ui.radec_entry import UIRADecEntry +from PiFinder.ui.lm_entry import UILMEntry import PiFinder.ui.callbacks as callbacks @@ -823,6 +824,88 @@ def _(key: str) -> Any: }, ], }, + { + "name": _("Obj Chart..."), + "class": UITextMenu, + "select": "single", + "label": "obj_chart_settings", + "items": [ + { + "name": _("Crosshair"), + "class": UITextMenu, + "select": "single", + "label": "obj_chart_crosshair", + "config_option": "obj_chart_crosshair", + "items": [ + { + "name": _("Off"), + "value": "off", + }, + { + "name": _("On"), + "value": "on", + }, + { + "name": _("Pulse"), + "value": "pulse", + }, + ], + }, + { + "name": _("Style"), + "class": UITextMenu, + "select": "single", + "label": "obj_chart_style", + "config_option": "obj_chart_crosshair_style", + "items": [ + { + "name": _("Simple"), + "value": "simple", + }, + { + "name": _("Circle"), + "value": "circle", + }, + { + "name": _("Bullseye"), + "value": "bullseye", + }, + { + "name": _("Brackets"), + "value": "brackets", + }, + { + "name": _("Dots"), + "value": "dots", + }, + { + "name": _("Cross"), + "value": "cross", + }, + ], + }, + { + "name": _("Set LM"), + "class": UITextMenu, + "select": "single", + "label": "obj_chart_lm", + "config_option": "obj_chart_lm_mode", + "items": [ + { + "name": _("Auto"), + "value": "auto", + }, + { + "name": _("Fixed"), + "value": "fixed", + "class": UILMEntry, + "mode": "lm_entry", + "config_option": "obj_chart_lm_fixed", + }, + ], + }, + ], + }, { "name": _("Camera Exp"), "class": UITextMenu, diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 7ee0d1890..5d0e8d37b 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -48,13 +48,17 @@ def __init__(self, *args, **kwargs): self.screen_direction = self.config_object.get_option("screen_direction") self.mount_type = self.config_object.get_option("mount_type") + self._chart_gen = None # Cached chart generator instance self.object = self.item_definition["object"] self.object_list = self.item_definition["object_list"] self.object_display_mode = DM_LOCATE self.object_image = None + self._is_showing_loading_chart = False # Track if showing "Loading..." for deep chart + self._force_deep_chart = False # Toggle: force deep chart even if POSS image exists + self._is_deep_chart = False # Track if currently showing a deep chart (auto or forced) - # Marking Menu - Just default help for now - self.marking_menu = MarkingMenu( + # Default Marking Menu + self._default_marking_menu = MarkingMenu( left=MarkingMenuOption(), right=MarkingMenuOption(), down=MarkingMenuOption( @@ -68,6 +72,14 @@ def __init__(self, *args, **kwargs): ), ) + # Deep Chart Marking Menu - Settings access + self._deep_chart_marking_menu = MarkingMenu( + up=MarkingMenuOption(label=_("SETTINGS"), menu_jump="obj_chart_settings"), + right=MarkingMenuOption(label=_("CROSS"), menu_jump="obj_chart_crosshair"), + down=MarkingMenuOption(label=_("STYLE"), menu_jump="obj_chart_style"), + left=MarkingMenuOption(label=_("LM"), menu_jump="obj_chart_lm"), + ) + # Used for displaying obsevation counts self.observations_db = ObservationsDatabase() @@ -107,6 +119,15 @@ def __init__(self, *args, **kwargs): self.active() # fill in activation time self.update_object_info() + @property + def marking_menu(self): + """ + Return appropriate marking menu based on current view mode + """ + if self._is_deep_chart: + return self._deep_chart_marking_menu + return self._default_marking_menu + def _layout_designator(self): """ Generates designator layout object @@ -218,6 +239,11 @@ def update_object_info(self): roll = solution["Roll"] magnification = self.config_object.equipment.calc_magnification() + prev_object_image = self.object_image + + # Get or create chart generator (owned by UI layer, not cat_images) + chart_gen = self._get_chart_generator() + self.object_image = cat_images.get_display_image( self.object, str(self.config_object.equipment.active_eyepiece), @@ -226,10 +252,34 @@ def update_object_info(self): self.display_class, burn_in=self.object_display_mode in [DM_POSS, DM_SDSS], magnification=magnification, + config_object=self.config_object, + shared_state=self.shared_state, + chart_generator=chart_gen, # Pass our chart generator to cat_images + force_deep_chart=self._force_deep_chart, # Toggle state + ) + + # Track if we're showing a "Loading..." placeholder for deep chart + # Check if image has the special "is_loading_placeholder" attribute + self._is_showing_loading_chart = ( + self.object_image is not None + and hasattr(self.object_image, 'is_loading_placeholder') + and self.object_image.is_loading_placeholder + and self.object_display_mode in [DM_POSS, DM_SDSS] + ) + + # Detect if we're showing a deep chart (forced or automatic due to no POSS image) + # Deep charts are identified by the is_loading_placeholder attribute (loading or False) + self._is_deep_chart = ( + self.object_image is not None + and hasattr(self.object_image, 'is_loading_placeholder') + and self.object_display_mode in [DM_POSS, DM_SDSS] ) def active(self): self.activation_time = time.time() + # Regenerate object info when returning to this screen + # This ensures config changes (like LM) are applied + self.update_object_info() def _check_catalog_initialized(self): code = self.object.catalog_code @@ -239,6 +289,214 @@ def _check_catalog_initialized(self): catalog = self.catalogs.get_catalog_by_code(code) return catalog and catalog.initialized + def _get_pulse_factor(self): + """ + Calculate current pulse factor for animations + Returns tuple: (pulse_factor, size_multiplier, color_intensity) + - pulse_factor: 0.0 to 1.0 sine wave + - size_multiplier: factor to multiply sizes by (0.6 to 1.0 for smoother animation) + - color_intensity: brightness value (48 to 128 for more visible change) + """ + import time + import numpy as np + + # Pulsate: full cycle every 2 seconds + pulse_period = 2.0 # seconds + t = time.time() % pulse_period + # Sine wave for smooth pulsation (0.0 to 1.0 range) + pulse_factor = 0.5 + 0.5 * np.sin(2 * np.pi * t / pulse_period) + + # Size multiplier: 0.6 to 1.0 (smaller range, smoother looking) + size_multiplier = 0.6 + 0.4 * pulse_factor + + # Color intensity: 48 to 128 (brighter and more visible) + color_intensity = int(48 + 80 * pulse_factor) + + return pulse_factor, size_multiplier, color_intensity + + def _draw_crosshair_simple(self, pulse=False): + """ + Draw simple crosshair with 4 lines and center gap + + Args: + pulse: If True, apply pulsation effect + """ + width, height = self.display_class.resolution + cx, cy = width / 2.0, height / 2.0 + + if pulse: + _, size_mult, color_intensity = self._get_pulse_factor() + # Size pulsates from 6 down to 3 pixels (inverted - more steps) + outer = 6.0 - (3.0 * size_mult) # 6.0 down to 3.0 (more visible integer steps) + marker_color = self.colors.get(color_intensity) + else: + # Fixed size and brightness + outer = 4 + marker_color = self.colors.get(64) + + inner = 2 # Fixed gap (small center) + + # Crosshair outline (4 short lines with gap in middle) + self.draw.line([cx - outer, cy, cx - inner, cy], fill=marker_color, width=1) # Left + self.draw.line([cx + inner, cy, cx + outer, cy], fill=marker_color, width=1) # Right + self.draw.line([cx, cy - outer, cx, cy - inner], fill=marker_color, width=1) # Top + self.draw.line([cx, cy + inner, cx, cy + outer], fill=marker_color, width=1) # Bottom + + def _draw_crosshair_circle(self, pulse=False): + """ + Draw circle reticle + + Args: + pulse: If True, apply pulsation effect + """ + width, height = self.display_class.resolution + cx, cy = width / 2.0, height / 2.0 + + if pulse: + _, size_mult, color_intensity = self._get_pulse_factor() + radius = 8.0 - (4.0 * size_mult) # 8.0 down to 4.0 (more steps) + marker_color = self.colors.get(color_intensity) + else: + radius = 4 # Smaller fixed size + marker_color = self.colors.get(64) + + # Draw circle + bbox = [cx - radius, cy - radius, cx + radius, cy + radius] + self.draw.ellipse(bbox, outline=marker_color, width=1) + + # Small center dot + self.draw.ellipse([cx - 1, cy - 1, cx + 1, cy + 1], fill=marker_color) + + def _draw_crosshair_bullseye(self, pulse=False): + """ + Draw concentric circles (bullseye) + + Args: + pulse: If True, apply pulsation effect + """ + width, height = self.display_class.resolution + cx, cy = width / 2.0, height / 2.0 + + if pulse: + _, size_mult, color_intensity = self._get_pulse_factor() + marker_color = self.colors.get(color_intensity) + # Pulsate from larger to smaller (more visible steps) + radii = [4.0 - (2.0 * size_mult), 8.0 - (4.0 * size_mult), 12.0 - (6.0 * size_mult)] # 4→2, 8→4, 12→6 + else: + marker_color = self.colors.get(64) + radii = [2, 4, 6] # Smaller fixed radii + + # Draw concentric circles + for radius in radii: + bbox = [cx - radius, cy - radius, cx + radius, cy + radius] + self.draw.ellipse(bbox, outline=marker_color, width=1) + + def _draw_crosshair_brackets(self, pulse=False): + """ + Draw corner brackets (frame corners) + + Args: + pulse: If True, apply pulsation effect + """ + width, height = self.display_class.resolution + cx, cy = width / 2.0, height / 2.0 + + if pulse: + _, size_mult, color_intensity = self._get_pulse_factor() + size = 8.0 - (4.0 * size_mult) # 8.0 down to 4.0 (more steps) + length = 5.0 - (2.0 * size_mult) # 5.0 down to 3.0 (more steps) + marker_color = self.colors.get(color_intensity) + else: + size = 4 # Smaller distance from center to bracket corner + length = 3 # Shorter bracket arms + marker_color = self.colors.get(64) + + # Top-left bracket + self.draw.line([cx - size, cy - size, cx - size + length, cy - size], fill=marker_color, width=1) + self.draw.line([cx - size, cy - size, cx - size, cy - size + length], fill=marker_color, width=1) + + # Top-right bracket + self.draw.line([cx + size, cy - size, cx + size - length, cy - size], fill=marker_color, width=1) + self.draw.line([cx + size, cy - size, cx + size, cy - size + length], fill=marker_color, width=1) + + # Bottom-left bracket + self.draw.line([cx - size, cy + size, cx - size + length, cy + size], fill=marker_color, width=1) + self.draw.line([cx - size, cy + size, cx - size, cy + size - length], fill=marker_color, width=1) + + # Bottom-right bracket + self.draw.line([cx + size, cy + size, cx + size - length, cy + size], fill=marker_color, width=1) + self.draw.line([cx + size, cy + size, cx + size, cy + size - length], fill=marker_color, width=1) + + def _draw_crosshair_dots(self, pulse=False): + """ + Draw four corner dots + + Args: + pulse: If True, apply pulsation effect + """ + width, height = self.display_class.resolution + cx, cy = width / 2.0, height / 2.0 + + if pulse: + _, size_mult, color_intensity = self._get_pulse_factor() + distance = 8.0 - (4.0 * size_mult) # 8.0 down to 4.0 (more steps) + dot_size = 3.0 - (1.5 * size_mult) # 3.0 down to 1.5 (more steps) + marker_color = self.colors.get(color_intensity) + else: + distance = 4 # Smaller distance from center to dots + dot_size = 1 # Smaller dot radius + marker_color = self.colors.get(64) + + # Four corner dots + positions = [ + (cx - distance, cy - distance), # Top-left + (cx + distance, cy - distance), # Top-right + (cx - distance, cy + distance), # Bottom-left + (cx + distance, cy + distance), # Bottom-right + ] + + for x, y in positions: + bbox = [x - dot_size, y - dot_size, x + dot_size, y + dot_size] + self.draw.ellipse(bbox, fill=marker_color) + + def _draw_crosshair_cross(self, pulse=False): + """ + Draw full cross (lines extend across entire screen) + + Args: + pulse: If True, apply pulsation effect + """ + width, height = self.display_class.resolution + cx, cy = width / 2.0, height / 2.0 + + if pulse: + _, size_mult, color_intensity = self._get_pulse_factor() + marker_color = self.colors.get(color_intensity) + else: + marker_color = self.colors.get(64) + + # Horizontal line + self.draw.line([0, cy, width, cy], fill=marker_color, width=1) + # Vertical line + self.draw.line([cx, 0, cx, height], fill=marker_color, width=1) + + def _draw_fov_circle(self): + """ + Draw FOV circle to show eyepiece field of view boundary + Matches the POSS view circular crop + """ + width, height = self.display_class.resolution + cx, cy = width / 2.0, height / 2.0 + + # Use slightly smaller than screen to show the boundary + # Screen is typically 128x128, so use radius that fits within screen + radius = min(width, height) / 2.0 - 2 # Leave 2 pixel margin + + # Draw subtle circle + marker_color = self.colors.get(32) # Very dim, just to show boundary + bbox = [cx - radius, cy - radius, cx + radius, cy + radius] + self.draw.ellipse(bbox, outline=marker_color, width=1) + def _render_pointing_instructions(self): # Pointing Instructions if self.shared_state.solution() is None: @@ -378,7 +636,40 @@ def _render_pointing_instructions(self): fill=self.colors.get(indicator_color), ) + def _get_chart_generator(self): + """Get the global chart generator singleton""" + from PiFinder.deep_chart import get_chart_generator + import logging + logger = logging.getLogger("ObjectDetails") + + chart_gen = get_chart_generator(self.config_object, self.shared_state) + logger.info(f">>> _get_chart_generator returning: {chart_gen}") + return chart_gen + def update(self, force=True): + import logging + logger = logging.getLogger("ObjectDetails") + + # Check if we're showing "Loading..." for a deep chart + # and if catalog is now ready, regenerate the image + if self._is_showing_loading_chart: + try: + from PiFinder.star_catalog import CatalogState + + # Use cached chart generator to preserve catalog state + chart_gen = self._get_chart_generator() + state = chart_gen.get_catalog_state() + logger.info(f">>> Update check: catalog state = {state}") + + if state == CatalogState.READY: + # Catalog ready! Regenerate display + logger.info(">>> Catalog READY! Regenerating image...") + self._is_showing_loading_chart = False + self.update_object_info() + force = True # Force screen update + except Exception as e: + logger.error(f">>> Update check failed: {e}", exc_info=True) + pass # Clear Screen self.clear_screen() @@ -386,6 +677,28 @@ def update(self, force=True): if self.object_display_mode in [DM_POSS, DM_SDSS]: self.screen.paste(self.object_image) + # If showing deep chart, draw crosshair based on config + if self._force_deep_chart and self.object_image is not None: + crosshair_mode = self.config_object.get_option("obj_chart_crosshair") + crosshair_style = self.config_object.get_option("obj_chart_crosshair_style") + + if crosshair_mode != "off": + # Determine if we should pulse + pulse = (crosshair_mode == "pulse") + + # Call the appropriate drawing method based on style + style_methods = { + "simple": self._draw_crosshair_simple, + "circle": self._draw_crosshair_circle, + "bullseye": self._draw_crosshair_bullseye, + "brackets": self._draw_crosshair_brackets, + "dots": self._draw_crosshair_dots, + "cross": self._draw_crosshair_cross, + } + + draw_method = style_methods.get(crosshair_style, self._draw_crosshair_simple) + draw_method(pulse=pulse) + if self.object_display_mode == DM_DESC or self.object_display_mode == DM_LOCATE: # catalog and entry field i.e. NGC-311 self.refresh_designator() @@ -478,6 +791,40 @@ def mm_cancel(self, _marking_menu, _menu_item) -> bool: """ return True + def mm_toggle_crosshair(self, _marking_menu, _menu_item) -> bool: + """ + Cycle through crosshair modes: off -> on -> pulse -> off + """ + current_mode = self.config_object.get_option("obj_chart_crosshair") + modes = ["off", "on", "pulse"] + current_index = modes.index(current_mode) if current_mode in modes else 0 + next_index = (current_index + 1) % len(modes) + self.config_object.set_option("obj_chart_crosshair", modes[next_index]) + return False # Don't exit, just update + + def mm_cycle_style(self, _marking_menu, _menu_item) -> bool: + """ + Cycle through crosshair styles + """ + current_style = self.config_object.get_option("obj_chart_crosshair_style") + styles = ["simple", "circle", "bullseye", "brackets", "dots", "cross"] + current_index = styles.index(current_style) if current_style in styles else 0 + next_index = (current_index + 1) % len(styles) + self.config_object.set_option("obj_chart_crosshair_style", styles[next_index]) + return False # Don't exit, just update + + def mm_toggle_lm_mode(self, _marking_menu, _menu_item) -> bool: + """ + Toggle between auto and fixed LM mode + """ + current_mode = self.config_object.get_option("obj_chart_lm_mode") + new_mode = "fixed" if current_mode == "auto" else "auto" + self.config_object.set_option("obj_chart_lm_mode", new_mode) + # If switching to auto, regenerate the chart with new calculation + if new_mode == "auto": + self.update_object_info() + return False # Don't exit, just update + def mm_align(self, _marking_menu, _menu_item) -> bool: """ Called from marking menu to align on curent object @@ -545,3 +892,15 @@ def key_minus(self): typeconst.next() else: self.change_fov(-1) + + def key_number(self, number): + """ + Handle number key presses + 0: Toggle between POSS image and deep chart (when both are available) + """ + if number == 0: + # Toggle the flag + self._force_deep_chart = not self._force_deep_chart + # Reload image with new setting + self.update_object_info() + self.update() diff --git a/python/tests/test_limiting_magnitude.py b/python/tests/test_limiting_magnitude.py new file mode 100644 index 000000000..6efee7d57 --- /dev/null +++ b/python/tests/test_limiting_magnitude.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +""" +Unit tests for limiting magnitude calculations using Feijth & Comello formula +""" + +import pytest +from PiFinder.deep_chart import DeepChartGenerator + + +class TestFeijthComelloFormula: + """Test the Feijth & Comello limiting magnitude formula""" + + def test_reference_calculation(self): + """ + Test with Schaefer's reference values + + Reference from astrobasics.de: + If Schaefer's result is used with mv = 6.04, D = 25, d = 4, M = 400 + and t = 0.54 the following limiting magnitude results: 13.36 + + Formula: mg = mv - 2 + 2.5 × log₁₀(√(D² - d²) × M × t) + """ + mv = 6.04 # Naked eye limiting magnitude + D = 25.0 # Aperture in cm + d = 4.0 # Obstruction diameter in cm + M = 400.0 # Magnification + t = 0.54 # Transmission + + result = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, t) + + # Should be 13.36 according to reference (allow 0.1 mag tolerance) + assert abs(result - 13.36) < 0.1, f"Expected ~13.36, got {result:.2f}" + + def test_unobstructed_telescope(self): + """Test with no central obstruction (refractor/unobstructed Newtonian)""" + mv = 6.0 + D = 20.0 # 200mm aperture + d = 0.0 # No obstruction + M = 100.0 + t = 0.85 + + result = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, t) + + # Should give reasonable result (12-14 range for 200mm scope) + assert 10.0 < result < 15.0, f"Result {result:.2f} outside expected range" + + def test_higher_magnification_improves_lm(self): + """ + Test that higher magnification improves limiting magnitude + (darkens sky background, improving contrast) + """ + mv = 6.0 + D = 20.0 + d = 0.0 + t = 0.85 + + lm_40x = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 40.0, t) + lm_100x = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 100.0, t) + lm_200x = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 200.0, t) + + # Higher magnification should give better (larger number) limiting magnitude + assert lm_100x > lm_40x, f"100x ({lm_100x:.2f}) should be > 40x ({lm_40x:.2f})" + assert lm_200x > lm_100x, f"200x ({lm_200x:.2f}) should be > 100x ({lm_100x:.2f})" + + def test_larger_aperture_improves_lm(self): + """Test that larger aperture improves limiting magnitude""" + mv = 6.0 + d = 0.0 + M = 100.0 + t = 0.85 + + lm_80mm = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, 8.0, d, M, t) + lm_150mm = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, 15.0, d, M, t) + lm_250mm = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, 25.0, d, M, t) + + # Larger aperture should give better limiting magnitude + assert lm_150mm > lm_80mm, f"150mm ({lm_150mm:.2f}) should be > 80mm ({lm_80mm:.2f})" + assert lm_250mm > lm_150mm, f"250mm ({lm_250mm:.2f}) should be > 150mm ({lm_150mm:.2f})" + + def test_obstruction_reduces_lm(self): + """Test that central obstruction reduces limiting magnitude""" + mv = 6.0 + D = 20.0 + M = 100.0 + t = 0.85 + + lm_no_obstruction = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, 0.0, M, t) + lm_with_obstruction = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, 5.0, M, t) + + # Obstruction should reduce limiting magnitude + assert lm_no_obstruction > lm_with_obstruction, \ + f"Unobstructed ({lm_no_obstruction:.2f}) should be > obstructed ({lm_with_obstruction:.2f})" + + def test_better_transmission_improves_lm(self): + """Test that better transmission improves limiting magnitude""" + mv = 6.0 + D = 20.0 + d = 0.0 + M = 100.0 + + lm_poor_transmission = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, 0.50) + lm_good_transmission = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, 0.85) + + # Better transmission should give better limiting magnitude + assert lm_good_transmission > lm_poor_transmission, \ + f"Good transmission ({lm_good_transmission:.2f}) should be > poor ({lm_poor_transmission:.2f})" + + def test_darker_sky_improves_naked_eye_lm(self): + """ + Test that darker sky (higher mv) improves telescopic limiting magnitude + Since telescopic LM builds on naked eye LM + """ + D = 20.0 + d = 0.0 + M = 100.0 + t = 0.85 + + lm_bright_sky = DeepChartGenerator.feijth_comello_limiting_magnitude(5.0, D, d, M, t) + lm_dark_sky = DeepChartGenerator.feijth_comello_limiting_magnitude(6.5, D, d, M, t) + + # Darker sky should give better limiting magnitude + assert lm_dark_sky > lm_bright_sky, \ + f"Dark sky ({lm_dark_sky:.2f}) should be > bright sky ({lm_bright_sky:.2f})" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) From f660fe50e59395e5718c5a9ed34527a99c2c0ced Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Fri, 21 Nov 2025 17:06:44 +0100 Subject: [PATCH 05/27] Add healpy requirement --- python/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/python/requirements.txt b/python/requirements.txt index 5f92091ab..9badef9f1 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -5,6 +5,7 @@ cheroot==10.0.0 dataclasses_json==0.6.7 gpsdclient==1.3.2 grpcio==1.64.1 +healpy==1.16.6 json5==0.9.25 luma.oled==3.12.0 luma.lcd==2.11.0 From 8ed0c6127db56dd314197e05d932baff3ccfa268 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Fri, 21 Nov 2025 17:54:03 +0100 Subject: [PATCH 06/27] optimisations --- python/PiFinder/deep_chart.py | 28 ++++++++++- python/PiFinder/star_catalog.py | 84 ++++++++++++++++++++++----------- 2 files changed, 82 insertions(+), 30 deletions(-) diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index 2735e32d6..44e0c388c 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -58,6 +58,7 @@ def __init__(self, config, shared_state): self.shared_state = shared_state self.catalog = None self.chart_cache = {} + self._lm_cache = None # Cache (sqm, eyepiece_id, lm) to avoid recalculation # Initialize font for text overlays font_path = Path(Path.cwd(), "../fonts/RobotoMonoNerdFontMono-Bold.ttf") @@ -583,6 +584,24 @@ def get_limiting_magnitude(self, sqm) -> float: Returns: Limiting magnitude value """ + # Build cache key from sqm, telescope, and eyepiece focal lengths + # Round SQM to 1 decimal to avoid floating point comparison issues + equipment = self.config.equipment + telescope = equipment.active_telescope + eyepiece = equipment.active_eyepiece + + # Cache key includes all factors that affect LM calculation + telescope_fl = telescope.focal_length_mm if telescope else None + telescope_aperture = telescope.aperture_mm if telescope else None + eyepiece_fl = eyepiece.focal_length_mm if eyepiece else None + sqm_value = round(sqm.value, 1) if sqm and hasattr(sqm, 'value') and sqm.value else None + + cache_key = (sqm_value, telescope_aperture, telescope_fl, eyepiece_fl) + + # Check cache - return cached value without logging + if self._lm_cache is not None and self._lm_cache[0] == cache_key: + return self._lm_cache[1] + lm_mode = self.config.get_option("obj_chart_lm_mode") if lm_mode == "fixed": @@ -591,14 +610,19 @@ def get_limiting_magnitude(self, sqm) -> float: try: lm = float(lm) logger.info(f"Using fixed LM from config: {lm:.1f}") + self._lm_cache = (cache_key, lm) return lm except (ValueError, TypeError): # Invalid fixed value, fall back to auto logger.warning(f"Invalid fixed LM value: {lm}, falling back to auto") - return self.calculate_limiting_magnitude(sqm) + lm = self.calculate_limiting_magnitude(sqm) + self._lm_cache = (cache_key, lm) + return lm else: # Auto mode: calculate based on equipment and sky brightness - return self.calculate_limiting_magnitude(sqm) + lm = self.calculate_limiting_magnitude(sqm) + self._lm_cache = (cache_key, lm) + return lm def calculate_limiting_magnitude(self, sqm) -> float: """ diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 12d49c18c..9707fe775 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -27,7 +27,7 @@ # Import healpy at module level to avoid first-use delay # This ensures the slow import happens during initialization, not during first chart render try: - import healpy as hp # type: ignore[import-not-found] + import healpy as hp # type: ignore[import-untyped] _HEALPY_AVAILABLE = True except ImportError: hp = None @@ -39,6 +39,17 @@ STAR_RECORD_FORMAT = " 10: + if is_compact and 10 < len(tiles) <= 50: # Batch load is much faster for many tiles # Note: batch loading returns PM-corrected (ra, dec, mag) tuples logger.info(f"Using BATCH loading for {len(tiles)} tiles") @@ -674,6 +686,8 @@ def _load_tiles_batch( all_stars = [] + logger.info(f"_load_tiles_batch: Starting batch load of {len(tile_ids)} tiles") + # Process each magnitude band for mag_band_info in self.metadata.get("mag_bands", []): mag_min = mag_band_info["min"] @@ -682,6 +696,7 @@ def _load_tiles_batch( if mag_min >= mag_limit: continue # Skip faint bands + logger.info(f"_load_tiles_batch: Processing mag band {mag_min}-{mag_max}") band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" index_file_bin = band_dir / "index.bin" index_file_json = band_dir / "index.json" @@ -717,6 +732,8 @@ def _load_tiles_batch( if not read_ops: continue + logger.info(f"_load_tiles_batch: Found {len(read_ops)} tiles in mag band {mag_min}-{mag_max}") + # Sort by offset to minimize seeks read_ops.sort(key=lambda x: x[1]["offset"]) @@ -724,6 +741,7 @@ def _load_tiles_batch( # Group tiles that are close together (within 100KB) MAX_GAP = 100 * 1024 # 100KB gap tolerance + logger.info(f"_load_tiles_batch: Opening {tiles_file}") # Open file once and read all tiles with open(tiles_file, "rb") as f: i = 0 @@ -752,10 +770,13 @@ def _load_tiles_batch( break # Read entire chunk at once + chunk_size = chunk_end - offset + logger.info(f"_load_tiles_batch: Reading chunk at offset {offset}, size {chunk_size/1024:.1f}KB with {len(tiles_in_chunk)} tiles") f.seek(offset) - chunk_data = f.read(chunk_end - offset) + chunk_data = f.read(chunk_size) + logger.info(f"_load_tiles_batch: Read complete, processing {len(tiles_in_chunk)} tiles") - # Process each tile in the chunk + # Process each tile in the chunk using vectorized operations for tile_id, tile_info in tiles_in_chunk: tile_offset = tile_info["offset"] - offset # Relative offset in chunk compressed_size = tile_info.get("compressed_size") @@ -764,45 +785,52 @@ def _load_tiles_batch( if compressed_size: import zlib compressed_data = chunk_data[tile_offset:tile_offset + compressed_size] + logger.info(f"_load_tiles_batch: Decompressing tile {tile_id}, {compressed_size} → {size} bytes") data = zlib.decompress(compressed_data) else: data = chunk_data[tile_offset:tile_offset + size] - # Decode stars in this tile + # VECTORIZED: Parse all star records at once using numpy num_records = len(data) // STAR_RECORD_SIZE - for k in range(num_records): - record_data = data[k * STAR_RECORD_SIZE : (k + 1) * STAR_RECORD_SIZE] + logger.info(f"_load_tiles_batch: Decoding {num_records} stars from tile {tile_id} (vectorized)") + + records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - healpix_pixel, ra_offset_encoded, dec_offset_encoded, mag_encoded, pmra_encoded, pmdec_encoded = ( - struct.unpack(STAR_RECORD_FORMAT, record_data) - ) + # Mask healpix to 24 bits + healpix_pixels = records['healpix'] & 0xFFFFFF - # Mask to 24 bits - healpix_pixel = healpix_pixel & 0xFFFFFF + # VECTORIZED: Get all pixel centers at once (healpy handles arrays efficiently) + pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) - # Get pixel center - pixel_ra, pixel_dec = hp.pix2ang(self.nside, healpix_pixel, lonlat=True) + # Calculate pixel size once (not per star!) + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 - # Decode position - pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + # VECTORIZED: Decode all offsets at once + ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - ra_offset_arcsec = (ra_offset_encoded / 127.5 - 1.0) * max_offset_arcsec - dec_offset_arcsec = (dec_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + # VECTORIZED: Calculate final positions + decs = pixel_decs + dec_offset_arcsec / 3600.0 + ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) - dec = pixel_dec + dec_offset_arcsec / 3600.0 - ra = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(dec)) + # VECTORIZED: Decode magnitudes and proper motions + mags = records['mag'] / 10.0 + pmras = records['pmra'] * 50 + pmdecs = records['pmdec'] * 50 - mag = mag_encoded / 10.0 - pmra = pmra_encoded * 50 - pmdec = pmdec_encoded * 50 + # VECTORIZED: Filter by magnitude + mag_mask = mags <= mag_limit - # Filter by magnitude - if mag <= mag_limit: - all_stars.append((ra, dec, mag, pmra, pmdec)) + # Collect stars that pass magnitude filter + for i in np.where(mag_mask)[0]: + all_stars.append((ras[i], decs[i], mags[i], pmras[i], pmdecs[i])) # Move to next chunk i = j + logger.info(f"_load_tiles_batch: Loaded {len(all_stars)} stars total, applying proper motion") # Apply proper motion - return self._apply_proper_motion(all_stars) + result = self._apply_proper_motion(all_stars) + logger.info(f"_load_tiles_batch: Complete, returning {len(result)} stars") + return result From 201b765658088e938e5692711264510a12fd0ba7 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Fri, 21 Nov 2025 18:09:27 +0100 Subject: [PATCH 07/27] vectorize single --- python/PiFinder/star_catalog.py | 103 +++++++++++++++----------------- 1 file changed, 48 insertions(+), 55 deletions(-) diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 9707fe775..1b4828cf1 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -424,45 +424,42 @@ def _load_tile_from_file( if not _HEALPY_AVAILABLE: return [] - stars = [] - + # Read entire file at once with open(tile_file, "rb") as f: - while True: - data = f.read(STAR_RECORD_SIZE) - if len(data) < STAR_RECORD_SIZE: - break + data = f.read() - # Decode record - healpix_pixel, ra_offset_encoded, dec_offset_encoded, mag_encoded, pmra_encoded, pmdec_encoded = ( - struct.unpack(STAR_RECORD_FORMAT, data) - ) + if len(data) == 0: + return [] - # Mask to 24 bits - healpix_pixel = healpix_pixel & 0xFFFFFF + # VECTORIZED: Parse all records at once + num_records = len(data) // STAR_RECORD_SIZE + records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - # Get pixel center coordinates - pixel_ra, pixel_dec = hp.pix2ang(self.nside, healpix_pixel, lonlat=True) + # Mask healpix to 24 bits + healpix_pixels = records['healpix'] & 0xFFFFFF - # Calculate pixel size - pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + # VECTORIZED: Get all pixel centers at once + pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) - # Decode offsets - ra_offset_arcsec = (ra_offset_encoded / 127.5 - 1.0) * max_offset_arcsec - dec_offset_arcsec = (dec_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + # Calculate pixel size once (not per star!) + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 - # Calculate actual Dec FIRST (needed for RA cosine correction) - dec = pixel_dec + dec_offset_arcsec / 3600.0 + # VECTORIZED: Decode all offsets at once + ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - # Calculate actual RA using the star's actual declination (not pixel center) - # This matches the encoder which uses the star's actual dec for compression - ra = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(dec)) + # VECTORIZED: Calculate final positions + decs = pixel_decs + dec_offset_arcsec / 3600.0 + ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) - mag = mag_encoded / 10.0 # 0.1 mag precision - pmra = pmra_encoded * 50 # mas/year - pmdec = pmdec_encoded * 50 # mas/year + # VECTORIZED: Decode magnitudes and proper motions + mags = records['mag'] / 10.0 + pmras = records['pmra'] * 50 + pmdecs = records['pmdec'] * 50 - stars.append((ra, dec, mag, pmra, pmdec)) + # Build result list + stars = [(ras[i], decs[i], mags[i], pmras[i], pmdecs[i]) for i in range(num_records)] return stars @@ -538,41 +535,37 @@ def _load_tile_compact( # Uncompressed tile data = f.read(size) - # Decode all records in this tile + # VECTORIZED: Decode all records in this tile at once num_records = len(data) // STAR_RECORD_SIZE - for i in range(num_records): - record_data = data[i * STAR_RECORD_SIZE : (i + 1) * STAR_RECORD_SIZE] - - healpix_pixel, ra_offset_encoded, dec_offset_encoded, mag_encoded, pmra_encoded, pmdec_encoded = ( - struct.unpack(STAR_RECORD_FORMAT, record_data) - ) - # Mask to 24 bits - healpix_pixel = healpix_pixel & 0xFFFFFF + # Parse all records using numpy + records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - # Get pixel center coordinates - pixel_ra, pixel_dec = hp.pix2ang(self.nside, healpix_pixel, lonlat=True) + # Mask healpix to 24 bits + healpix_pixels = records['healpix'] & 0xFFFFFF - # Calculate pixel size - pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + # VECTORIZED: Get all pixel centers at once + pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) - # Decode offsets - ra_offset_arcsec = (ra_offset_encoded / 127.5 - 1.0) * max_offset_arcsec - dec_offset_arcsec = (dec_offset_encoded / 127.5 - 1.0) * max_offset_arcsec + # Calculate pixel size once (not per star!) + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 - # Calculate actual Dec FIRST (needed for RA cosine correction) - dec = pixel_dec + dec_offset_arcsec / 3600.0 + # VECTORIZED: Decode all offsets at once + ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - # Calculate actual RA using the star's actual declination (not pixel center) - # This matches the encoder which uses the star's actual dec for compression - ra = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(dec)) + # VECTORIZED: Calculate final positions + decs = pixel_decs + dec_offset_arcsec / 3600.0 + ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) - mag = mag_encoded / 10.0 - pmra = pmra_encoded * 50 - pmdec = pmdec_encoded * 50 + # VECTORIZED: Decode magnitudes and proper motions + mags = records['mag'] / 10.0 + pmras = records['pmra'] * 50 + pmdecs = records['pmdec'] * 50 - stars.append((ra, dec, mag, pmra, pmdec)) + # Build result list + stars = [(ras[i], decs[i], mags[i], pmras[i], pmdecs[i]) for i in range(num_records)] return stars From 05668bc0a6e670c4e95a1e3ef35ef169ab4ddde1 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 00:05:50 +0100 Subject: [PATCH 08/27] Performance and logging --- python/PiFinder/cat_images.py | 11 +- python/PiFinder/deep_chart.py | 121 ++++++----- python/PiFinder/star_catalog.py | 342 +++++++++++++++++++++++++++++++- 3 files changed, 420 insertions(+), 54 deletions(-) diff --git a/python/PiFinder/cat_images.py b/python/PiFinder/cat_images.py index 958d96958..29946b0ac 100644 --- a/python/PiFinder/cat_images.py +++ b/python/PiFinder/cat_images.py @@ -70,14 +70,19 @@ def get_display_image( # Ensure catalog loading started chart_generator.ensure_catalog_loading() - # Try to generate chart - chart_image = chart_generator.generate_chart( + # Try to generate chart (progressive generator - consume all yields) + # The generator yields intermediate images as magnitude bands load + # We'll use the final (most complete) image + chart_image = None + for image in chart_generator.generate_chart( catalog_object, (display_class.fov_res, display_class.fov_res), burn_in=burn_in, display_class=display_class, roll=roll - ) + ): + chart_image = image # Keep updating to latest + # TODO: Could potentially display intermediate images here for faster feedback if chart_image is None: # Catalog not ready yet, show "Loading..." with progress diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index 44e0c388c..dd4a2d8f8 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -14,7 +14,7 @@ import logging from pathlib import Path -from typing import Optional, Tuple +from typing import Generator, Optional, Tuple import numpy as np from PIL import Image, ImageDraw, ImageFont @@ -111,7 +111,7 @@ def initialize_catalog(self): def generate_chart( self, catalog_object, resolution: Tuple[int, int], burn_in: bool = True, display_class=None, roll=None - ) -> Optional[Image.Image]: + ) -> Generator[Optional[Image.Image], None, None]: """ Generate chart for object at current equipment settings @@ -154,22 +154,24 @@ def generate_chart( # For query, cap at catalog max mag_limit_query = min(mag_limit_calculated, 17.0) - # Query stars + # Query stars PROGRESSIVELY (bright to faint) + # This is a generator that yields partial results as each magnitude band loads import time t0 = time.time() - stars = self.catalog.get_stars_for_fov( - ra_deg=catalog_object.ra, - dec_deg=catalog_object.dec, - fov_deg=fov, - mag_limit=mag_limit_query, - ) - t1 = time.time() logger.info( f"Chart for {catalog_object.catalog_code}{catalog_object.sequence}: " f"Center RA={catalog_object.ra:.4f}° Dec={catalog_object.dec:.4f}°, " f"FOV={fov:.4f}°, Roll={roll if roll is not None else 0:.1f}°, " - f"{len(stars)} stars (query: {(t1-t0)*1000:.1f}ms)" + f"Starting PROGRESSIVE loading (mag_limit={mag_limit_query:.1f})" + ) + + # Use progressive loading to show bright stars first + stars_generator = self.catalog.get_stars_for_fov_progressive( + ra_deg=catalog_object.ra, + dec_deg=catalog_object.dec, + fov_deg=fov, + mag_limit=mag_limit_query, ) # Calculate rotation angle for roll / Newtonian orientation @@ -180,47 +182,68 @@ def generate_chart( if roll is not None: image_rotate += roll - # Render chart with rotation applied to star coordinates - t2 = time.time() - image = self.render_chart( - stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query - ) - t3 = time.time() - logger.info(f"Chart rendering: {(t3-t2)*1000:.1f}ms") - - # Add FOV circle BEFORE text overlays so it appears behind them - if burn_in and display_class is not None: - draw = ImageDraw.Draw(image) - width, height = display_class.resolution - cx, cy = width / 2.0, height / 2.0 - radius = min(width, height) / 2.0 - 2 # Leave 2 pixel margin - marker_color = display_class.colors.get(64) # Subtle but visible - bbox = [cx - radius, cy - radius, cx + radius, cy + radius] - draw.ellipse(bbox, outline=marker_color, width=1) - - # Add overlays (using shared utility) - if burn_in and display_class is not None: - from PiFinder.image_utils import add_image_overlays - - logger.info(f"Adding overlays: burn_in={burn_in}, LM={mag_limit_calculated:.1f}") - image = add_image_overlays( - image, - display_class, - fov, - mag, - equipment.active_eyepiece, - burn_in=True, - limiting_magnitude=mag_limit_calculated, # Pass uncapped value for display + # Progressive rendering: Yield image after each magnitude band loads + final_image = None + for stars, is_complete in stars_generator: + t_render_start = time.time() + + # Render chart with rotation applied to star coordinates + image = self.render_chart( + stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query ) - # Cache result (limit cache size to 10 charts) - self.chart_cache[cache_key] = image - if len(self.chart_cache) > 10: - # Remove oldest - oldest = next(iter(self.chart_cache)) - del self.chart_cache[oldest] + # Add FOV circle BEFORE text overlays so it appears behind them + if burn_in and display_class is not None: + draw = ImageDraw.Draw(image) + width, height = display_class.resolution + cx, cy = width / 2.0, height / 2.0 + radius = min(width, height) / 2.0 - 2 # Leave 2 pixel margin + marker_color = display_class.colors.get(64) # Subtle but visible + bbox = [cx - radius, cy - radius, cx + radius, cy + radius] + draw.ellipse(bbox, outline=marker_color, width=1) + + # Add overlays (using shared utility) + if burn_in and display_class is not None: + from PiFinder.image_utils import add_image_overlays + + image = add_image_overlays( + image, + display_class, + fov, + mag, + equipment.active_eyepiece, + burn_in=True, + limiting_magnitude=mag_limit_calculated, # Pass uncapped value for display + ) + + t_render_end = time.time() + logger.info( + f"PROGRESSIVE: Rendered {len(stars)} stars in {(t_render_end-t_render_start)*1000:.1f}ms " + f"(complete={is_complete})" + ) - return image + final_image = image + + # Yield intermediate result (allows UI to update) + if not is_complete: + yield image + # If complete, will yield final image after loop + + # Final yield with complete image + t1 = time.time() + logger.info(f"Chart complete: {(t1-t0)*1000:.1f}ms total") + + # Cache result (limit cache size to 10 charts) + if final_image is not None: + self.chart_cache[cache_key] = final_image + if len(self.chart_cache) > 10: + # Remove oldest + oldest = next(iter(self.chart_cache)) + del self.chart_cache[oldest] + + yield final_image + else: + yield None def render_chart( self, diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 1b4828cf1..29d41f632 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -27,7 +27,7 @@ # Import healpy at module level to avoid first-use delay # This ensures the slow import happens during initialization, not during first chart render try: - import healpy as hp # type: ignore[import-untyped] + import healpy as hp # type: ignore[import-not-found] _HEALPY_AVAILABLE = True except ImportError: hp = None @@ -246,6 +246,107 @@ def _preload_mag_band(self, mag_min: float, mag_max: float): # (50ms was too conservative, slowing down loading significantly) time.sleep(0.01) + def get_stars_for_fov_progressive( + self, + ra_deg: float, + dec_deg: float, + fov_deg: float, + mag_limit: Optional[float] = None, + ): + """ + Query stars in field of view progressively (bright to faint) + + This is a generator that yields (stars, is_complete) tuples as each + magnitude band is loaded. This allows the UI to display bright stars + immediately while continuing to load fainter stars in the background. + + Blocks if state == LOADING (waits for load to complete) + Returns empty list if state == NOT_LOADED + + Args: + ra_deg: Center RA in degrees + dec_deg: Center Dec in degrees + fov_deg: Field of view in degrees + mag_limit: Limiting magnitude (uses catalog default if None) + + Yields: + (stars, is_complete) tuples where: + - stars: List of (ra, dec, mag) tuples with proper motion corrected + - is_complete: True if this is the final yield with all stars + """ + if self.state == CatalogState.NOT_LOADED: + logger.warning("Catalog not loaded") + yield ([], True) + return + + # Wait for catalog to be loaded + while self.state == CatalogState.LOADING: + import time + time.sleep(0.1) + + if mag_limit is None: + mag_limit = self.metadata.get("mag_limit", 17.0) if self.metadata else 17.0 + + if not _HEALPY_AVAILABLE: + logger.error("healpy not available - cannot perform HEALPix queries") + yield ([], True) + return + + # Calculate HEALPix tiles covering FOV + vec = hp.ang2vec(ra_deg, dec_deg, lonlat=True) + radius_rad = np.radians(fov_deg * 0.85) + tiles = hp.query_disc(self.nside, vec, radius_rad) + logger.info(f"HEALPix PROGRESSIVE: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") + + # Filter by visible hemisphere + if self.visible_tiles: + tiles = [t for t in tiles if t in self.visible_tiles] + + # Limit tile count to prevent excessive loading + if len(tiles) > 100: + logger.warning(f"Large tile count ({len(tiles)}) detected! Limiting to 100 tiles") + tiles = tiles[:100] + + # Load stars progressively by magnitude band (bright to faint) + all_stars = [] + + if not self.metadata: + yield ([], True) + return + + for mag_band_info in self.metadata.get("mag_bands", []): + mag_min = mag_band_info["min"] + mag_max = mag_band_info["max"] + + # Skip bands fainter than limit + if mag_min >= mag_limit: + break + + logger.info(f"PROGRESSIVE: Loading mag band {mag_min}-{mag_max}") + import time + t_band_start = time.time() + + # Load stars from this magnitude band only + band_stars = self._load_tiles_for_mag_band( + tiles, mag_band_info, mag_limit, ra_deg, dec_deg, fov_deg + ) + + t_band_end = time.time() + logger.info(f"PROGRESSIVE: Mag band {mag_min}-{mag_max} loaded {len(band_stars)} stars in {(t_band_end-t_band_start)*1000:.1f}ms") + + # Add to cumulative list + all_stars.extend(band_stars) + + # Yield current results (not complete yet unless this is the last band) + is_last_band = mag_max >= mag_limit + yield (all_stars.copy(), is_last_band) + + if is_last_band: + break + + # Final yield (should already be done above, but just in case) + logger.info(f"PROGRESSIVE: Complete! Total {len(all_stars)} stars loaded") + def get_stars_for_fov( self, ra_deg: float, @@ -326,12 +427,47 @@ def get_stars_for_fov( else: # Load one by one (better for small queries or legacy format) logger.info(f"Using SINGLE-TILE loading for {len(tiles)} tiles (compact={is_compact})") + import time + t_single_start = time.time() stars_raw: List[Tuple[float, float, float, float, float]] = [] - for tile_id in tiles: + + # To prevent UI blocking, limit the number of tiles loaded at once + # For large tile counts (>100), only load the most important tiles + if len(tiles) > 100: + logger.warning(f"Large tile count ({len(tiles)}) detected! Limiting to 100 tiles to prevent UI freeze") + # Tiles from query_disc are roughly ordered by distance from center + # Keep the first 100 which are closest to FOV center + tiles = tiles[:100] + + cache_hits = 0 + cache_misses = 0 + + for i, tile_id in enumerate(tiles): + # Check if this tile is cached (for performance tracking) + cache_key = (tile_id, mag_limit) + was_cached = cache_key in self.tile_cache + tile_stars = self._load_tile_data(tile_id, mag_limit) tile_star_counts[tile_id] = len(tile_stars) stars_raw.extend(tile_stars) + if was_cached: + cache_hits += 1 + else: + cache_misses += 1 + + # Log progress every 25 tiles + if (i + 1) % 25 == 0: + elapsed = (time.time() - t_single_start) * 1000 + logger.info(f"Progress: {i+1}/{len(tiles)} tiles loaded ({elapsed:.0f}ms elapsed)") + + t_single_end = time.time() + elapsed_ms = (t_single_end - t_single_start) * 1000 + + # Log cache performance + logger.info(f"Tile cache: {cache_hits} hits, {cache_misses} misses ({cache_hits/(cache_hits+cache_misses)*100:.1f}% hit rate)") + logger.info(f"Single-tile loading complete: {len(stars_raw)} stars in {elapsed_ms:.1f}ms ({elapsed_ms/len(tiles):.2f}ms/tile)") + # Log tile loading stats if tile_star_counts: logger.debug(f"Loaded from {len(tile_star_counts)} tiles: " + @@ -339,10 +475,69 @@ def get_stars_for_fov( f"total={sum(tile_star_counts.values())}") # Apply proper motion correction (for non-batch path only) + t_pm_start = time.time() stars = self._apply_proper_motion(stars_raw) + t_pm_end = time.time() + logger.info(f"Proper motion correction: {len(stars)} stars in {(t_pm_end-t_pm_start)*1000:.1f}ms") return stars + def _load_tiles_for_mag_band( + self, + tile_ids: List[int], + mag_band_info: dict, + mag_limit: float, + ra_deg: float, + dec_deg: float, + fov_deg: float, + ) -> List[Tuple[float, float, float]]: + """ + Load tiles for a specific magnitude band (used by progressive loading) + + Args: + tile_ids: List of HEALPix tile IDs to load + mag_band_info: Magnitude band metadata dict with 'min', 'max' keys + mag_limit: Maximum magnitude to include + ra_deg: Center RA (for logging) + dec_deg: Center Dec (for logging) + fov_deg: Field of view (for logging) + + Returns: + List of (ra, dec, mag) tuples with proper motion corrected + """ + mag_min = mag_band_info["min"] + mag_max = mag_band_info["max"] + band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" + + # Check if this band directory exists + if not band_dir.exists(): + logger.debug(f"Magnitude band directory not found: {band_dir}") + return [] + + # For compact format, use vectorized batch loading per band + assert self.metadata is not None, "metadata must be loaded" + is_compact = self.metadata.get("format") == "compact" + if is_compact: + return self._load_tiles_batch_single_band( + tile_ids, mag_band_info, mag_limit + ) + else: + # Legacy format - load tiles one by one (will load all bands for each tile) + # This is less efficient but legacy format doesn't support per-band loading + stars_raw = [] + for tile_id in tile_ids: + tile_stars = self._load_tile_data(tile_id, mag_limit) + # Filter to just this magnitude band + tile_stars_filtered = [ + (ra, dec, mag, pmra, pmdec) + for ra, dec, mag, pmra, pmdec in tile_stars + if mag_min <= mag < mag_max + ] + stars_raw.extend(tile_stars_filtered) + + # Apply proper motion + return self._apply_proper_motion(stars_raw) + def _load_tile_data( self, tile_id: int, mag_limit: float ) -> List[Tuple[float, float, float, float, float]]: @@ -658,6 +853,149 @@ def _apply_proper_motion( return corrected + def _load_tiles_batch_single_band( + self, + tile_ids: List[int], + mag_band_info: dict, + mag_limit: float, + ) -> List[Tuple[float, float, float]]: + """ + Batch load multiple tiles for a SINGLE magnitude band (compact format only) + Used by progressive loading to load one mag band at a time + + Args: + tile_ids: List of HEALPix tile IDs + mag_band_info: Magnitude band metadata dict + mag_limit: Maximum magnitude + + Returns: + List of (ra, dec, mag) tuples (already PM-corrected) + """ + if not _HEALPY_AVAILABLE: + return [] + + mag_min = mag_band_info["min"] + mag_max = mag_band_info["max"] + + band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" + index_file_bin = band_dir / "index.bin" + index_file_json = band_dir / "index.json" + tiles_file = band_dir / "tiles.bin" + + if not tiles_file.exists(): + return [] + + # Load index + cache_key = f"index_{mag_min}_{mag_max}" + if not hasattr(self, '_index_cache'): + self._index_cache = {} + + if cache_key not in self._index_cache: + if index_file_bin.exists(): + self._index_cache[cache_key] = self._read_binary_index(index_file_bin) + elif index_file_json.exists(): + with open(index_file_json, "r") as f: + self._index_cache[cache_key] = json.load(f) + else: + return [] + + index = self._index_cache[cache_key] + + # Collect all tile read operations + read_ops = [] + for tile_id in tile_ids: + tile_key = str(tile_id) + if tile_key in index: + tile_info = index[tile_key] + read_ops.append((tile_id, tile_info)) + + if not read_ops: + return [] + + # Sort by offset to minimize seeks + read_ops.sort(key=lambda x: x[1]["offset"]) + + # Read data in larger sequential chunks when possible + MAX_GAP = 100 * 1024 # 100KB gap tolerance + all_stars = [] + + with open(tiles_file, "rb") as f: + i = 0 + while i < len(read_ops): + tile_id, tile_info = read_ops[i] + offset = tile_info["offset"] + chunk_end = offset + tile_info.get("compressed_size", tile_info["size"]) + + # Find consecutive tiles for chunk reading + tiles_in_chunk = [(tile_id, tile_info)] + j = i + 1 + while j < len(read_ops): + next_tile_id, next_tile_info = read_ops[j] + next_offset = next_tile_info["offset"] + if next_offset - chunk_end <= MAX_GAP: + chunk_end = next_offset + next_tile_info.get("compressed_size", next_tile_info["size"]) + tiles_in_chunk.append((next_tile_id, next_tile_info)) + j += 1 + else: + break + + # Read entire chunk + chunk_size = chunk_end - offset + f.seek(offset) + chunk_data = f.read(chunk_size) + + # Process each tile in chunk + for tile_id, tile_info in tiles_in_chunk: + tile_offset = tile_info["offset"] - offset + compressed_size = tile_info.get("compressed_size") + size = tile_info["size"] + + if compressed_size: + import zlib + compressed_data = chunk_data[tile_offset:tile_offset + compressed_size] + data = zlib.decompress(compressed_data) + else: + data = chunk_data[tile_offset:tile_offset + size] + + # VECTORIZED: Parse all star records at once + num_records = len(data) // STAR_RECORD_SIZE + records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) + + # Mask healpix to 24 bits + healpix_pixels = records['healpix'] & 0xFFFFFF + + # VECTORIZED: Get all pixel centers at once + pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) + + # Calculate pixel size once + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + + # VECTORIZED: Decode all offsets + ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec + + # VECTORIZED: Calculate final positions + decs = pixel_decs + dec_offset_arcsec / 3600.0 + ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) + + # VECTORIZED: Decode magnitudes and proper motions + mags = records['mag'] / 10.0 + pmras = records['pmra'] * 50 + pmdecs = records['pmdec'] * 50 + + # Filter by magnitude + mag_mask = mags < mag_limit + + # Collect stars + for idx in np.where(mag_mask)[0]: + all_stars.append((ras[idx], decs[idx], mags[idx], pmras[idx], pmdecs[idx])) + + i = j + + # Apply proper motion + return self._apply_proper_motion(all_stars) + def _load_tiles_batch( self, tile_ids: List[int], mag_limit: float ) -> List[Tuple[float, float, float]]: From 0d86a0fa47065b66846f813a168e949650edeba7 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 09:57:40 +0100 Subject: [PATCH 09/27] LM entry and chart fixes --- python/PiFinder/deep_chart.py | 7 +++++-- python/PiFinder/ui/lm_entry.py | 13 ++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index dd4a2d8f8..cee30d292 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -129,14 +129,17 @@ def generate_chart( # Check state if self.catalog.state != CatalogState.READY: logger.info(f"Chart generation skipped: catalog state = {self.catalog.state}") - return None + yield None + return # Check cache cache_key = self.get_cache_key(catalog_object) if cache_key in self.chart_cache: # Return cached base image (without crosshair) # Crosshair will be added by add_pulsating_crosshair() each frame - return self.chart_cache[cache_key] + logger.info(f"Chart cache HIT for {cache_key}") + yield self.chart_cache[cache_key] + return # Get equipment settings equipment = self.config.equipment diff --git a/python/PiFinder/ui/lm_entry.py b/python/PiFinder/ui/lm_entry.py index 4cf35230a..e2b8cf0b3 100644 --- a/python/PiFinder/ui/lm_entry.py +++ b/python/PiFinder/ui/lm_entry.py @@ -97,8 +97,8 @@ def update(self, force=False): # Icons (matching radec_entry style) arrow_icons = "󰹺" - back_icon = "" - go_icon = "" + left_icon = "" + right_icon = "" # Legends at bottom (two lines) bar_y = self.height - (self.fonts.base.height * 2) - 4 @@ -115,7 +115,7 @@ def update(self, force=False): draw.text((2, bar_y + 2), line1, font=self.fonts.base.font, fill=self.colors.get(128)) # Line 2: Actions - line2 = f"{back_icon}Cancel {go_icon}Accept -Del" + line2 = f"{left_icon}Cancel {right_icon}Save -Del" draw.text((2, bar_y + 12), line2, font=self.fonts.base.font, fill=self.colors.get(128)) return self.screen, None @@ -173,7 +173,7 @@ def key_left(self): return True def key_right(self): - """Accept - save value and return""" + """Accept - save value and exit""" import logging logger = logging.getLogger("UILMEntry") logger.info(">>> key_right() called!") @@ -210,7 +210,10 @@ def key_right(self): # No need to invalidate cache - cache key includes LM so different # LM values will automatically get separate cache entries - logger.info("Returning True to exit LM entry screen") + logger.info("Calling remove_from_stack() to exit LM entry screen") + # Exit the screen by removing from stack + if self.remove_from_stack: + self.remove_from_stack() return True except ValueError as e: # Invalid value, don't accept From 7fbf9919c4086bfdff1d596a7bb917c2ae5871f8 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 10:03:42 +0100 Subject: [PATCH 10/27] Revert solver.py formatting changes that introduced UnboundLocalError MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The reformatting in b8d65d4 changed: - solution.pop('key', None) → del solution['key'] (unsafe, fails if key missing) - Added logging that referenced 'solution' before it could be assigned This caused UnboundLocalError when solve_from_centroids() raised exceptions, as 'solution' was never assigned but later code tried to check it. Reverting to the previous working version (parent of b8d65d4). --- python/PiFinder/solver.py | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/python/PiFinder/solver.py b/python/PiFinder/solver.py index b4b2b4e7f..07a901528 100644 --- a/python/PiFinder/solver.py +++ b/python/PiFinder/solver.py @@ -178,6 +178,13 @@ def update_sqm_dual_pipeline( last_update=datetime.now().isoformat(), ) shared_state.set_sqm(new_sqm_state) + + raw_str = ( + f", raw={sqm_value_raw:.2f}" + if sqm_value_raw is not None + else ", raw=N/A" + ) + logger.info(f"SQM updated: processed={sqm_value_processed:.2f}{raw_str}") return True except Exception as e: @@ -337,7 +344,9 @@ def solver( ) if len(centroids) == 0: - log_no_stars_found = False + if log_no_stars_found: + logger.info("No stars found, skipping (Logged only once)") + log_no_stars_found = False # Clear solve results to mark solve as failed (otherwise old values persist) solved["RA"] = None solved["Dec"] = None @@ -381,12 +390,11 @@ def solver( ) # Don't clutter printed solution with these fields. - # Use pop() to safely remove keys that may not exist - solution.pop("matched_catID", None) - solution.pop("pattern_centroids", None) - solution.pop("epoch_equinox", None) - solution.pop("epoch_proper_motion", None) - solution.pop("cache_hit_fraction", None) + del solution["matched_catID"] + del solution["pattern_centroids"] + del solution["epoch_equinox"] + del solution["epoch_proper_motion"] + del solution["cache_hit_fraction"] solved |= solution @@ -419,6 +427,12 @@ def solver( # Mark successful solve - use same timestamp as last_solve_attempt for comparison solved["last_solve_success"] = solved["last_solve_attempt"] + logger.info( + f"Solve SUCCESS - {len(centroids)} centroids → " + f"{solved.get('Matches', 0)} matches, " + f"RMSE: {solved.get('RMSE', 0):.1f}px" + ) + # See if we are waiting for alignment if align_ra != 0 and align_dec != 0: if solved.get("x_target") is not None: From 48248ce94707dbbfe9447483cf677cb1bb6992bc Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 10:29:36 +0100 Subject: [PATCH 11/27] More logging --- python/PiFinder/cat_images.py | 29 +++++++++++++++++++----- python/PiFinder/deep_chart.py | 19 ++++++++++++++-- python/PiFinder/main.py | 11 ++++++++++ python/PiFinder/solver.py | 3 +++ python/PiFinder/star_catalog.py | 33 +++++++++++++++++++--------- python/PiFinder/ui/object_details.py | 11 ++++++++++ 6 files changed, 89 insertions(+), 17 deletions(-) diff --git a/python/PiFinder/cat_images.py b/python/PiFinder/cat_images.py index 29946b0ac..66c2af981 100644 --- a/python/PiFinder/cat_images.py +++ b/python/PiFinder/cat_images.py @@ -46,11 +46,15 @@ def get_display_image( Required for deep chart generation """ + logger.info(f">>> get_display_image() called for {catalog_object.display_name if catalog_object else 'None'}") + logger.info(f">>> force_deep_chart={force_deep_chart}, chart_generator={chart_generator is not None}") + object_image_path = resolve_image_name(catalog_object, source="POSS") - logger.debug("object_image_path = %s", object_image_path) + logger.info(f">>> POSS image path: {object_image_path}, exists: {os.path.exists(object_image_path)}") # If force_deep_chart is True, skip POSS image even if it exists if force_deep_chart or not os.path.exists(object_image_path): + logger.info(f">>> Will use deep chart (force={force_deep_chart}, poss_missing={not os.path.exists(object_image_path)})") # Try to generate deep chart if catalog available return_image = None @@ -60,20 +64,25 @@ def get_display_image( deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") - logger.info(f"Deep chart request: chart_generator={chart_generator is not None}, catalog_exists={deep_catalog_path.exists()}, path={deep_catalog_path}") + logger.info(f">>> Deep chart request: chart_generator={chart_generator is not None}, catalog_exists={deep_catalog_path.exists()}, path={deep_catalog_path}") # Try to generate deep chart if chart_generator was passed in if chart_generator is not None and deep_catalog_path.exists(): + logger.info(">>> chart_generator and deep catalog available, generating chart...") try: from PiFinder.image_utils import create_loading_image # Ensure catalog loading started + logger.info(">>> Calling chart_generator.ensure_catalog_loading()...") chart_generator.ensure_catalog_loading() + logger.info(f">>> Catalog state: {chart_generator.get_catalog_state()}") # Try to generate chart (progressive generator - consume all yields) # The generator yields intermediate images as magnitude bands load # We'll use the final (most complete) image chart_image = None + logger.info(">>> Starting to consume chart generator yields...") + yield_count = 0 for image in chart_generator.generate_chart( catalog_object, (display_class.fov_res, display_class.fov_res), @@ -81,10 +90,15 @@ def get_display_image( display_class=display_class, roll=roll ): + yield_count += 1 + logger.info(f">>> Received yield #{yield_count}: {type(image)}") chart_image = image # Keep updating to latest # TODO: Could potentially display intermediate images here for faster feedback + logger.info(f">>> Chart generation complete: {yield_count} yields, final image: {type(chart_image)}") + if chart_image is None: + logger.info(">>> Chart is None, creating loading placeholder...") # Catalog not ready yet, show "Loading..." with progress if chart_generator.catalog: progress_text = chart_generator.catalog.load_progress @@ -101,24 +115,28 @@ def get_display_image( ) # Mark image as "loading" so UI knows to refresh return_image.is_loading_placeholder = True + logger.info(f">>> Returning loading placeholder: {type(return_image)}") else: + logger.info(">>> Chart ready, converting to red...") # Chart ready, convert to red return_image = ImageChops.multiply( chart_image.convert("RGB"), display_class.colors.red_image ) return_image.is_loading_placeholder = False + logger.info(f">>> Returning final chart image: {type(return_image)}") except Exception as e: - logger.error(f"Chart generation failed: {e}", exc_info=True) + logger.error(f">>> Chart generation failed: {e}", exc_info=True) return_image = None else: if chart_generator is None: - logger.warning("Deep chart requested but chart_generator is None") + logger.warning(">>> Deep chart requested but chart_generator is None") if not deep_catalog_path.exists(): - logger.warning(f"Deep star catalog not found at {deep_catalog_path}") + logger.warning(f">>> Deep star catalog not found at {deep_catalog_path}") # Fallback: "No Image" placeholder if return_image is None: + logger.info(">>> No chart generated, creating 'No Image' placeholder") return_image = Image.new("RGB", display_class.resolution) ri_draw = ImageDraw.Draw(return_image) if burn_in: @@ -129,6 +147,7 @@ def get_display_image( fill=display_class.colors.get(128), ) else: + logger.info(">>> Using POSS image") return_image = Image.open(object_image_path) # rotate for roll / newtonian orientation diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index cee30d292..d137059a6 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -31,8 +31,13 @@ def get_chart_generator(config, shared_state): """Get or create the global chart generator singleton""" global _chart_generator_instance + logger.info(f">>> get_chart_generator() called, instance exists: {_chart_generator_instance is not None}") if _chart_generator_instance is None: + logger.info(">>> Creating new DeepChartGenerator instance...") _chart_generator_instance = DeepChartGenerator(config, shared_state) + logger.info(f">>> DeepChartGenerator created, state: {_chart_generator_instance.get_catalog_state()}") + else: + logger.info(f">>> Returning existing instance, state: {_chart_generator_instance.get_catalog_state()}") return _chart_generator_instance @@ -79,8 +84,11 @@ def ensure_catalog_loading(self): Ensure catalog is loading or loaded Triggers background load if needed """ + logger.info(f">>> ensure_catalog_loading() called, catalog is None: {self.catalog is None}") if self.catalog is None: + logger.info(">>> Calling initialize_catalog()...") self.initialize_catalog() + logger.info(f">>> initialize_catalog() done, state: {self.catalog.state}") if self.catalog.state == CatalogState.NOT_LOADED: # Trigger background load @@ -91,13 +99,15 @@ def ensure_catalog_loading(self): limiting_mag = self.get_limiting_magnitude(sqm) logger.info( - f"Starting catalog load: lat={observer_lat}, mag_limit={limiting_mag:.1f}" + f">>> Starting background catalog load: lat={observer_lat}, mag_limit={limiting_mag:.1f}" ) self.catalog.start_background_load(observer_lat, limiting_mag) + logger.info(f">>> start_background_load() called, new state: {self.catalog.state}") def initialize_catalog(self): """Create catalog instance (doesn't load data yet)""" catalog_path = Path(utils.astro_data_dir, "deep_stars") + logger.info(f">>> initialize_catalog() - catalog_path: {catalog_path}") # Check if catalog exists before initializing metadata_file = catalog_path / "metadata.json" @@ -106,8 +116,13 @@ def initialize_catalog(self): logger.warning("To build catalog, run: python -m PiFinder.catalog_tools.gaia_downloader --mag-limit 12 --output /tmp/gaia.csv") logger.warning("Then: python -m PiFinder.catalog_tools.healpix_builder --input /tmp/gaia.csv --output {}/astro_data/deep_stars".format(Path.home() / "PiFinder")) + logger.info(f">>> Creating DeepStarCatalog instance...") + import time + t0 = time.time() self.catalog = DeepStarCatalog(str(catalog_path)) - logger.info(f"Catalog initialized: {catalog_path}") + t_init = (time.time() - t0) * 1000 + logger.info(f">>> DeepStarCatalog.__init__() took {t_init:.1f}ms") + logger.info(f">>> Catalog initialized: {catalog_path}, state: {self.catalog.state}") def generate_chart( self, catalog_object, resolution: Tuple[int, int], burn_in: bool = True, display_class=None, roll=None diff --git a/python/PiFinder/main.py b/python/PiFinder/main.py index 02414315a..90b1bad2e 100644 --- a/python/PiFinder/main.py +++ b/python/PiFinder/main.py @@ -520,6 +520,17 @@ def main( _new_filter = CatalogFilter(shared_state=shared_state) _new_filter.load_from_config(cfg) catalogs.set_catalog_filter(_new_filter) + + # Initialize deep chart generator in background to avoid first-use delay + console.write(" Deep Charts") + console.update() + logger.info(" Initializing deep chart generator...") + from PiFinder.deep_chart import get_chart_generator + chart_gen = get_chart_generator(cfg, shared_state) + # Trigger background loading so catalog is ready when needed + chart_gen.ensure_catalog_loading() + logger.info(" Deep chart background loading started") + console.write(" Menus") console.update() diff --git a/python/PiFinder/solver.py b/python/PiFinder/solver.py index 07a901528..33946852a 100644 --- a/python/PiFinder/solver.py +++ b/python/PiFinder/solver.py @@ -343,6 +343,9 @@ def solver( % ("camera", len(centroids), t_extract) ) + # Initialize solution to prevent UnboundLocalError + solution = {} + if len(centroids) == 0: if log_no_stars_found: logger.info("No stars found, skipping (Logged only once)") diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 29d41f632..2ea4d0d13 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -27,7 +27,7 @@ # Import healpy at module level to avoid first-use delay # This ensures the slow import happens during initialization, not during first chart render try: - import healpy as hp # type: ignore[import-not-found] + import healpy as hp # type: ignore[import-untyped] _HEALPY_AVAILABLE = True except ImportError: hp = None @@ -77,6 +77,8 @@ def __init__(self, catalog_path: str): Args: catalog_path: Path to deep_stars directory containing metadata.json """ + logger.info(f">>> DeepStarCatalog.__init__() called with path: {catalog_path}") + t0 = time.time() self.catalog_path = Path(catalog_path) self.state = CatalogState.NOT_LOADED self.metadata: Optional[Dict[str, Any]] = None @@ -91,6 +93,8 @@ def __init__(self, catalog_path: str): self.load_progress: str = "" # Status message for UI self.load_percent: int = 0 # Progress percentage (0-100) self._index_cache: Dict[str, Any] = {} + t_init = (time.time() - t0) * 1000 + logger.info(f">>> DeepStarCatalog.__init__() completed in {t_init:.1f}ms") def start_background_load( self, observer_lat: Optional[float] = None, limiting_mag: float = 12.0 @@ -102,48 +106,55 @@ def start_background_load( observer_lat: Observer latitude for hemisphere filtering (None = full sky) limiting_mag: Magnitude limit for preloading bright stars """ + logger.info(f">>> start_background_load() called, current state: {self.state}") if self.state != CatalogState.NOT_LOADED: - logger.warning("Catalog already loading or loaded") + logger.warning(f">>> Catalog already loading or loaded (state={self.state}), skipping") return - logger.info(f"Starting background load: lat={observer_lat}, mag={limiting_mag}, path={self.catalog_path}") + logger.info(f">>> Starting background load: lat={observer_lat}, mag={limiting_mag}, path={self.catalog_path}") self.state = CatalogState.LOADING self.observer_lat = observer_lat self.limiting_magnitude = limiting_mag # Start background thread + logger.info(">>> Creating background thread...") self.load_thread = threading.Thread( target=self._background_load_worker, daemon=True, name="CatalogLoader" ) self.load_thread.start() - logger.info("Deep catalog background thread started") + logger.info(f">>> Background thread started, thread alive: {self.load_thread.is_alive()}") def _background_load_worker(self): """Background worker - just loads metadata""" + logger.info(">>> _background_load_worker() started") + t_worker_start = time.time() try: # Load metadata self.load_progress = "Loading..." self.load_percent = 50 - logger.info(f"Loading catalog metadata from {self.catalog_path}") + logger.info(f">>> Loading catalog metadata from {self.catalog_path}") metadata_file = self.catalog_path / "metadata.json" if not metadata_file.exists(): - logger.error(f"Catalog metadata not found: {metadata_file}") - logger.error(f"Please build catalog using: python -m PiFinder.catalog_tools.gaia_downloader") + logger.error(f">>> Catalog metadata not found: {metadata_file}") + logger.error(f">>> Please build catalog using: python -m PiFinder.catalog_tools.gaia_downloader") self.load_progress = "Error: catalog not built" self.state = CatalogState.NOT_LOADED return + t0 = time.time() with open(metadata_file, "r") as f: self.metadata = json.load(f) + t_json = (time.time() - t0) * 1000 + logger.info(f">>> metadata.json loaded in {t_json:.1f}ms") self.nside = self.metadata.get("nside", 512) star_count = self.metadata.get('star_count', 0) logger.info( - f"Catalog ready: {star_count:,} stars, " - f"mag limit {self.metadata.get('mag_limit', 0):.1f}" + f">>> Catalog metadata ready: {star_count:,} stars, " + f"mag limit {self.metadata.get('mag_limit', 0):.1f}, nside={self.nside}" ) # Initialize empty structures (no preloading) @@ -154,9 +165,11 @@ def _background_load_worker(self): self.load_progress = "Ready" self.load_percent = 100 self.state = CatalogState.READY + t_worker_total = (time.time() - t_worker_start) * 1000 + logger.info(f">>> _background_load_worker() completed in {t_worker_total:.1f}ms, state: {self.state}") except Exception as e: - logger.error(f"Catalog loading failed: {e}", exc_info=True) + logger.error(f">>> Catalog loading failed: {e}", exc_info=True) self.load_progress = f"Error: {str(e)}" self.state = CatalogState.NOT_LOADED diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 5d0e8d37b..7f455f671 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -165,6 +165,7 @@ def update_object_info(self): """ Generates object text and loads object images """ + logger.info(f">>> update_object_info() called for {self.object.display_name if self.object else 'None'}") # Title... self.title = self.object.display_name @@ -242,8 +243,11 @@ def update_object_info(self): prev_object_image = self.object_image # Get or create chart generator (owned by UI layer, not cat_images) + logger.info(">>> Getting chart generator...") chart_gen = self._get_chart_generator() + logger.info(f">>> Chart generator obtained, state: {chart_gen.get_catalog_state() if chart_gen else 'None'}") + logger.info(f">>> Calling cat_images.get_display_image with force_deep_chart={self._force_deep_chart}") self.object_image = cat_images.get_display_image( self.object, str(self.config_object.equipment.active_eyepiece), @@ -257,6 +261,7 @@ def update_object_info(self): chart_generator=chart_gen, # Pass our chart generator to cat_images force_deep_chart=self._force_deep_chart, # Toggle state ) + logger.info(f">>> cat_images.get_display_image returned: {type(self.object_image)}") # Track if we're showing a "Loading..." placeholder for deep chart # Check if image has the special "is_loading_placeholder" attribute @@ -898,9 +903,15 @@ def key_number(self, number): Handle number key presses 0: Toggle between POSS image and deep chart (when both are available) """ + logger.info(f">>> key_number({number}) called") if number == 0: + logger.info(f">>> Toggling _force_deep_chart (was: {self._force_deep_chart})") # Toggle the flag self._force_deep_chart = not self._force_deep_chart + logger.info(f">>> _force_deep_chart now: {self._force_deep_chart}") # Reload image with new setting + logger.info(">>> Calling update_object_info()...") self.update_object_info() + logger.info(">>> Calling update()...") self.update() + logger.info(">>> key_number(0) complete") From 75db432cdfe8757ea3db2c47223b756ded042bb4 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 10:42:48 +0100 Subject: [PATCH 12/27] fix logging issue --- python/PiFinder/solver.py | 10 +++++----- python/PiFinder/ui/object_details.py | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/python/PiFinder/solver.py b/python/PiFinder/solver.py index 33946852a..049fb069d 100644 --- a/python/PiFinder/solver.py +++ b/python/PiFinder/solver.py @@ -399,12 +399,12 @@ def solver( del solution["epoch_proper_motion"] del solution["cache_hit_fraction"] - solved |= solution + solved |= solution - total_tetra_time = t_extract + solved["T_solve"] - if total_tetra_time > 1000: - console_queue.put(f"SLV: Long: {total_tetra_time}") - logger.warning("Long solver time: %i", total_tetra_time) + total_tetra_time = t_extract + solved["T_solve"] + if total_tetra_time > 1000: + console_queue.put(f"SLV: Long: {total_tetra_time}") + logger.warning("Long solver time: %i", total_tetra_time) if solved["RA"] is not None: # RA, Dec, Roll at the center of the camera's FoV: diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 7f455f671..16950387d 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -23,9 +23,11 @@ import functools from PiFinder.db.observations_db import ObservationsDatabase +import logging import numpy as np import time +logger = logging.getLogger("PiFinder.UIObjectDetails") # Constants for display modes DM_DESC = 0 # Display mode for description From e81320fd592e9f89bebc183316aaf4d3c94a8245 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 10:58:47 +0100 Subject: [PATCH 13/27] some yield issues --- python/PiFinder/cat_images.py | 11 ++++------- python/PiFinder/star_catalog.py | 19 +++++++++++-------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/python/PiFinder/cat_images.py b/python/PiFinder/cat_images.py index 66c2af981..881d04510 100644 --- a/python/PiFinder/cat_images.py +++ b/python/PiFinder/cat_images.py @@ -77,11 +77,9 @@ def get_display_image( chart_generator.ensure_catalog_loading() logger.info(f">>> Catalog state: {chart_generator.get_catalog_state()}") - # Try to generate chart (progressive generator - consume all yields) - # The generator yields intermediate images as magnitude bands load - # We'll use the final (most complete) image + # Generate chart - consume ALL yields to get final complete chart chart_image = None - logger.info(">>> Starting to consume chart generator yields...") + logger.info(">>> Starting chart generation (consuming all yields)...") yield_count = 0 for image in chart_generator.generate_chart( catalog_object, @@ -92,10 +90,9 @@ def get_display_image( ): yield_count += 1 logger.info(f">>> Received yield #{yield_count}: {type(image)}") - chart_image = image # Keep updating to latest - # TODO: Could potentially display intermediate images here for faster feedback + chart_image = image # Keep last (most complete) image - logger.info(f">>> Chart generation complete: {yield_count} yields, final image: {type(chart_image)}") + logger.info(f">>> Chart complete after {yield_count} yields: {type(chart_image)}") if chart_image is None: logger.info(">>> Chart is None, creating loading placeholder...") diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 2ea4d0d13..96f2d8f18 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -316,9 +316,11 @@ def get_stars_for_fov_progressive( tiles = [t for t in tiles if t in self.visible_tiles] # Limit tile count to prevent excessive loading - if len(tiles) > 100: - logger.warning(f"Large tile count ({len(tiles)}) detected! Limiting to 100 tiles") - tiles = tiles[:100] + # For small FOVs (<1°), 20-30 tiles is more than enough + MAX_TILES = 25 + if len(tiles) > MAX_TILES: + logger.warning(f"Large tile count ({len(tiles)}) detected! Limiting to {MAX_TILES} tiles") + tiles = tiles[:MAX_TILES] # Load stars progressively by magnitude band (bright to faint) all_stars = [] @@ -445,12 +447,13 @@ def get_stars_for_fov( stars_raw: List[Tuple[float, float, float, float, float]] = [] # To prevent UI blocking, limit the number of tiles loaded at once - # For large tile counts (>100), only load the most important tiles - if len(tiles) > 100: - logger.warning(f"Large tile count ({len(tiles)}) detected! Limiting to 100 tiles to prevent UI freeze") + # For small FOVs (<1°), 20-30 tiles is more than enough + MAX_TILES = 25 + if len(tiles) > MAX_TILES: + logger.warning(f"Large tile count ({len(tiles)}) detected! Limiting to {MAX_TILES} tiles to prevent UI freeze") # Tiles from query_disc are roughly ordered by distance from center - # Keep the first 100 which are closest to FOV center - tiles = tiles[:100] + # Keep the first MAX_TILES which are closest to FOV center + tiles = tiles[:MAX_TILES] cache_hits = 0 cache_misses = 0 From 3c2116f46a8da47459772c2e4dc11ed7db045d81 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 11:01:40 +0100 Subject: [PATCH 14/27] More logging --- python/PiFinder/star_catalog.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 96f2d8f18..b3a17e1dd 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -935,9 +935,14 @@ def _load_tiles_batch_single_band( MAX_GAP = 100 * 1024 # 100KB gap tolerance all_stars = [] + logger.info(f">>> Batch loading {len(read_ops)} tiles for mag {mag_min}-{mag_max}") with open(tiles_file, "rb") as f: i = 0 + chunk_num = 0 while i < len(read_ops): + chunk_num += 1 + logger.debug(f">>> Processing chunk {chunk_num}, tile {i+1}/{len(read_ops)}") + tile_id, tile_info = read_ops[i] offset = tile_info["offset"] chunk_end = offset + tile_info.get("compressed_size", tile_info["size"]) @@ -945,7 +950,13 @@ def _load_tiles_batch_single_band( # Find consecutive tiles for chunk reading tiles_in_chunk = [(tile_id, tile_info)] j = i + 1 + inner_iterations = 0 while j < len(read_ops): + inner_iterations += 1 + if inner_iterations > 1000: + logger.error(f">>> INFINITE LOOP DETECTED in chunk consolidation! j={j}, len={len(read_ops)}, i={i}") + break # Safety break + next_tile_id, next_tile_info = read_ops[j] next_offset = next_tile_info["offset"] if next_offset - chunk_end <= MAX_GAP: @@ -957,11 +968,14 @@ def _load_tiles_batch_single_band( # Read entire chunk chunk_size = chunk_end - offset + logger.debug(f">>> Reading chunk: {len(tiles_in_chunk)} tiles, size={chunk_size} bytes") f.seek(offset) chunk_data = f.read(chunk_size) + logger.debug(f">>> Chunk read complete, processing tiles...") # Process each tile in chunk - for tile_id, tile_info in tiles_in_chunk: + for tile_idx, (tile_id, tile_info) in enumerate(tiles_in_chunk): + logger.debug(f">>> Processing tile {tile_idx+1}/{len(tiles_in_chunk)} (id={tile_id})") tile_offset = tile_info["offset"] - offset compressed_size = tile_info.get("compressed_size") size = tile_info["size"] From d4c71152b0ff54d36f292c87067c7ae798e6ce2f Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 11:22:48 +0100 Subject: [PATCH 15/27] Fix images not yielding --- python/PiFinder/cat_images.py | 59 ++++++++++++++++++++-------- python/PiFinder/star_catalog.py | 13 ++++-- python/PiFinder/ui/object_details.py | 28 +++++++++++-- 3 files changed, 78 insertions(+), 22 deletions(-) diff --git a/python/PiFinder/cat_images.py b/python/PiFinder/cat_images.py index 881d04510..6a2417ed4 100644 --- a/python/PiFinder/cat_images.py +++ b/python/PiFinder/cat_images.py @@ -77,23 +77,49 @@ def get_display_image( chart_generator.ensure_catalog_loading() logger.info(f">>> Catalog state: {chart_generator.get_catalog_state()}") - # Generate chart - consume ALL yields to get final complete chart + # RETURN THE GENERATOR ITSELF - don't consume it here! + # The UI will consume yields and update display for each one + logger.info(">>> Returning chart generator (not consuming yields here)") + + # Create generator that yields converted images + def chart_image_generator(): + for image in chart_generator.generate_chart( + catalog_object, + (display_class.fov_res, display_class.fov_res), + burn_in=burn_in, + display_class=display_class, + roll=roll + ): + if image is None: + # Catalog not ready yet, show "Loading..." with progress + if chart_generator.catalog: + progress_text = chart_generator.catalog.load_progress + progress_percent = chart_generator.catalog.load_percent + else: + progress_text = "Initializing..." + progress_percent = 0 + + loading_image = create_loading_image( + display_class, + message="Loading Chart...", + progress_text=progress_text, + progress_percent=progress_percent + ) + loading_image.is_loading_placeholder = True + yield loading_image + else: + # Convert chart to red and yield it + red_image = ImageChops.multiply( + image.convert("RGB"), + display_class.colors.red_image + ) + red_image.is_loading_placeholder = False + yield red_image + + return chart_image_generator() + + # OLD CODE BELOW - never reached chart_image = None - logger.info(">>> Starting chart generation (consuming all yields)...") - yield_count = 0 - for image in chart_generator.generate_chart( - catalog_object, - (display_class.fov_res, display_class.fov_res), - burn_in=burn_in, - display_class=display_class, - roll=roll - ): - yield_count += 1 - logger.info(f">>> Received yield #{yield_count}: {type(image)}") - chart_image = image # Keep last (most complete) image - - logger.info(f">>> Chart complete after {yield_count} yields: {type(chart_image)}") - if chart_image is None: logger.info(">>> Chart is None, creating loading placeholder...") # Catalog not ready yet, show "Loading..." with progress @@ -242,6 +268,7 @@ def __init__(self, text): burn_in=True ) + logger.info(f">>> get_display_image() RETURNING: {type(return_image)}, size={return_image.size if return_image else None}, has_is_loading={hasattr(return_image, 'is_loading_placeholder') if return_image else False}") return return_image diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index b3a17e1dd..6d6a8e5ea 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -337,14 +337,16 @@ def get_stars_for_fov_progressive( if mag_min >= mag_limit: break - logger.info(f"PROGRESSIVE: Loading mag band {mag_min}-{mag_max}") + logger.info(f">>> PROGRESSIVE: Loading mag band {mag_min}-{mag_max}, tiles={len(tiles)}, mag_limit={mag_limit}") import time t_band_start = time.time() # Load stars from this magnitude band only + logger.info(f">>> Calling _load_tiles_for_mag_band...") band_stars = self._load_tiles_for_mag_band( tiles, mag_band_info, mag_limit, ra_deg, dec_deg, fov_deg ) + logger.info(f">>> _load_tiles_for_mag_band returned {len(band_stars)} stars") t_band_end = time.time() logger.info(f"PROGRESSIVE: Mag band {mag_min}-{mag_max} loaded {len(band_stars)} stars in {(t_band_end-t_band_start)*1000:.1f}ms") @@ -525,18 +527,23 @@ def _load_tiles_for_mag_band( mag_max = mag_band_info["max"] band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" + logger.info(f">>> _load_tiles_for_mag_band: mag {mag_min}-{mag_max}, band_dir={band_dir}, tiles={len(tile_ids)}") + # Check if this band directory exists if not band_dir.exists(): - logger.debug(f"Magnitude band directory not found: {band_dir}") + logger.warning(f">>> Magnitude band directory not found: {band_dir}") return [] # For compact format, use vectorized batch loading per band assert self.metadata is not None, "metadata must be loaded" is_compact = self.metadata.get("format") == "compact" + logger.info(f">>> Format is_compact={is_compact}, calling _load_tiles_batch_single_band...") if is_compact: - return self._load_tiles_batch_single_band( + result = self._load_tiles_batch_single_band( tile_ids, mag_band_info, mag_limit ) + logger.info(f">>> _load_tiles_batch_single_band returned {len(result)} stars") + return result else: # Legacy format - load tiles one by one (will load all bands for each tile) # This is less efficient but legacy format doesn't support per-band loading diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 16950387d..8b1aa5bb2 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -250,7 +250,9 @@ def update_object_info(self): logger.info(f">>> Chart generator obtained, state: {chart_gen.get_catalog_state() if chart_gen else 'None'}") logger.info(f">>> Calling cat_images.get_display_image with force_deep_chart={self._force_deep_chart}") - self.object_image = cat_images.get_display_image( + + # get_display_image returns either an image directly (POSS) or a generator (deep chart) + result = cat_images.get_display_image( self.object, str(self.config_object.equipment.active_eyepiece), self.config_object.equipment.calc_tfov(), @@ -263,7 +265,24 @@ def update_object_info(self): chart_generator=chart_gen, # Pass our chart generator to cat_images force_deep_chart=self._force_deep_chart, # Toggle state ) - logger.info(f">>> cat_images.get_display_image returned: {type(self.object_image)}") + + # Check if it's a generator (progressive deep chart) or direct image (POSS) + if hasattr(result, '__iter__') and hasattr(result, '__next__'): + # It's a generator - consume yields and update display for each one + logger.info(">>> get_display_image returned GENERATOR, consuming yields progressively...") + for yield_num, image in enumerate(result, 1): + logger.info(f">>> Received yield #{yield_num} from generator: {type(image)}") + self.object_image = image + # Force immediate screen update to show this progressive result + self.update(force=True) + logger.info(f">>> Display updated with yield #{yield_num}") + logger.info(f">>> Generator exhausted, final image: {type(self.object_image)}") + else: + # Direct image (POSS) + logger.info(f">>> get_display_image returned direct image: {type(result)}") + self.object_image = result + + logger.info(f">>> update_object_info() complete, self.object_image is now: {type(self.object_image)}") # Track if we're showing a "Loading..." placeholder for deep chart # Check if image has the special "is_loading_placeholder" attribute @@ -914,6 +933,9 @@ def key_number(self, number): # Reload image with new setting logger.info(">>> Calling update_object_info()...") self.update_object_info() + logger.info(f">>> After update_object_info(), self.object_image type: {type(self.object_image)}, size: {self.object_image.size if self.object_image else None}") logger.info(">>> Calling update()...") - self.update() + update_result = self.update() + logger.info(f">>> update() returned: {type(update_result)}") logger.info(">>> key_number(0) complete") + return True From 8c1099b6b1ec4c26d995caeacd17f9d85af952ac Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 11:45:51 +0100 Subject: [PATCH 16/27] fix images not showing --- python/PiFinder/star_catalog.py | 7 +++++++ python/PiFinder/ui/object_details.py | 21 +++++++++++++++++---- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 6d6a8e5ea..b5d042832 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -913,16 +913,23 @@ def _load_tiles_batch_single_band( if not hasattr(self, '_index_cache'): self._index_cache = {} + logger.info(f">>> Checking index cache for {cache_key}, in_cache={cache_key in self._index_cache}") if cache_key not in self._index_cache: if index_file_bin.exists(): + logger.info(f">>> Reading binary index from {index_file_bin}") self._index_cache[cache_key] = self._read_binary_index(index_file_bin) + logger.info(f">>> Binary index loaded, {len(self._index_cache[cache_key])} tiles in index") elif index_file_json.exists(): + logger.info(f">>> Reading JSON index from {index_file_json}") with open(index_file_json, "r") as f: self._index_cache[cache_key] = json.load(f) + logger.info(f">>> JSON index loaded, {len(self._index_cache[cache_key])} tiles in index") else: + logger.warning(f">>> No index file found for {cache_key}") return [] index = self._index_cache[cache_key] + logger.info(f">>> Index ready, building read_ops for {len(tile_ids)} tiles...") # Collect all tile read operations read_ops = [] diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 8b1aa5bb2..5b54fec8b 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -34,6 +34,7 @@ DM_LOCATE = 1 # Display mode for LOCATE DM_POSS = 2 # Display mode for POSS DM_SDSS = 3 # Display mode for SDSS +DM_CHART = 4 # Display mode for deep chart class UIObjectDetails(UIModule): @@ -258,7 +259,7 @@ def update_object_info(self): self.config_object.equipment.calc_tfov(), roll, self.display_class, - burn_in=self.object_display_mode in [DM_POSS, DM_SDSS], + burn_in=self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART], magnification=magnification, config_object=self.config_object, shared_state=self.shared_state, @@ -290,7 +291,7 @@ def update_object_info(self): self.object_image is not None and hasattr(self.object_image, 'is_loading_placeholder') and self.object_image.is_loading_placeholder - and self.object_display_mode in [DM_POSS, DM_SDSS] + and self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART] ) # Detect if we're showing a deep chart (forced or automatic due to no POSS image) @@ -298,7 +299,7 @@ def update_object_info(self): self._is_deep_chart = ( self.object_image is not None and hasattr(self.object_image, 'is_loading_placeholder') - and self.object_display_mode in [DM_POSS, DM_SDSS] + and self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART] ) def active(self): @@ -700,8 +701,11 @@ def update(self, force=True): self.clear_screen() # paste image - if self.object_display_mode in [DM_POSS, DM_SDSS]: + logger.info(f">>> update(): object_display_mode={self.object_display_mode}, DM_POSS={DM_POSS}, DM_SDSS={DM_SDSS}, DM_CHART={DM_CHART}, will_paste={self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART]}") + logger.info(f">>> update(): object_image type={type(self.object_image)}, size={self.object_image.size if self.object_image else None}") + if self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART]: self.screen.paste(self.object_image) + logger.info(f">>> Image pasted to screen") # If showing deep chart, draw crosshair based on config if self._force_deep_chart and self.object_image is not None: @@ -930,6 +934,15 @@ def key_number(self, number): # Toggle the flag self._force_deep_chart = not self._force_deep_chart logger.info(f">>> _force_deep_chart now: {self._force_deep_chart}") + + # Set appropriate display mode: DM_CHART for deep chart, DM_POSS for POSS image + if self._force_deep_chart: + logger.info(f">>> Setting object_display_mode to DM_CHART (was {self.object_display_mode})") + self.object_display_mode = DM_CHART + else: + logger.info(f">>> Setting object_display_mode to DM_POSS (was {self.object_display_mode})") + self.object_display_mode = DM_POSS + # Reload image with new setting logger.info(">>> Calling update_object_info()...") self.update_object_info() From d678797b7468c3e67c6346022e641bbf4d345d71 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 14:55:36 +0100 Subject: [PATCH 17/27] flag fix --- python/PiFinder/ui/object_details.py | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 5b54fec8b..5810f591b 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -169,6 +169,12 @@ def update_object_info(self): Generates object text and loads object images """ logger.info(f">>> update_object_info() called for {self.object.display_name if self.object else 'None'}") + + # CRITICAL: Clear loading flag at START to prevent recursive update() calls + # during generator consumption. If we don't do this, calling self.update() + # while consuming yields will trigger update() -> update_object_info() recursion. + self._is_showing_loading_chart = False + # Title... self.title = self.object.display_name @@ -703,10 +709,37 @@ def update(self, force=True): # paste image logger.info(f">>> update(): object_display_mode={self.object_display_mode}, DM_POSS={DM_POSS}, DM_SDSS={DM_SDSS}, DM_CHART={DM_CHART}, will_paste={self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART]}") logger.info(f">>> update(): object_image type={type(self.object_image)}, size={self.object_image.size if self.object_image else None}") + + # DEBUG: Check if object_image has the is_loading_placeholder attribute (indicates it's a chart) + if self.object_image: + is_chart = hasattr(self.object_image, 'is_loading_placeholder') + logger.info(f">>> update(): object_image has is_loading_placeholder={is_chart}, _force_deep_chart={self._force_deep_chart}") + if self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART]: + # DEBUG: Check if image has any non-black pixels + if self.object_image and self.object_display_mode == DM_CHART: + import numpy as np + img_array = np.array(self.object_image) + non_zero = np.count_nonzero(img_array) + max_val = np.max(img_array) + logger.info(f">>> CHART IMAGE DEBUG: non-zero pixels={non_zero}, max_value={max_val}, shape={img_array.shape}") + self.screen.paste(self.object_image) logger.info(f">>> Image pasted to screen") + # DEBUG: Save screen buffer to file for inspection + if self.object_display_mode == DM_CHART and self._force_deep_chart: + try: + import os + debug_path = "/tmp/pifinder_chart_debug.png" + self.object_image.save(debug_path) + logger.info(f">>> SAVED object_image to {debug_path}") + screen_path = "/tmp/pifinder_screen_debug.png" + self.screen.save(screen_path) + logger.info(f">>> SAVED screen buffer to {screen_path}") + except Exception as e: + logger.error(f">>> Failed to save debug images: {e}") + # If showing deep chart, draw crosshair based on config if self._force_deep_chart and self.object_image is not None: crosshair_mode = self.config_object.get_option("obj_chart_crosshair") From b1863d0af248777b2bf33fb161af1659057dcd6f Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 17:09:31 +0100 Subject: [PATCH 18/27] throwing things against the wall --- python/PiFinder/ui/object_details.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 5810f591b..1e25e4d87 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -56,6 +56,7 @@ def __init__(self, *args, **kwargs): self.object_list = self.item_definition["object_list"] self.object_display_mode = DM_LOCATE self.object_image = None + self._chart_generator = None # Active generator for progressive chart updates self._is_showing_loading_chart = False # Track if showing "Loading..." for deep chart self._force_deep_chart = False # Toggle: force deep chart even if POSS image exists self._is_deep_chart = False # Track if currently showing a deep chart (auto or forced) @@ -275,18 +276,14 @@ def update_object_info(self): # Check if it's a generator (progressive deep chart) or direct image (POSS) if hasattr(result, '__iter__') and hasattr(result, '__next__'): - # It's a generator - consume yields and update display for each one - logger.info(">>> get_display_image returned GENERATOR, consuming yields progressively...") - for yield_num, image in enumerate(result, 1): - logger.info(f">>> Received yield #{yield_num} from generator: {type(image)}") - self.object_image = image - # Force immediate screen update to show this progressive result - self.update(force=True) - logger.info(f">>> Display updated with yield #{yield_num}") - logger.info(f">>> Generator exhausted, final image: {type(self.object_image)}") + # It's a generator - store it for progressive consumption by update() + logger.info(">>> get_display_image returned GENERATOR, storing for progressive updates...") + self._chart_generator = result + self.object_image = None # Will be set by first yield else: # Direct image (POSS) logger.info(f">>> get_display_image returned direct image: {type(result)}") + self._chart_generator = None self.object_image = result logger.info(f">>> update_object_info() complete, self.object_image is now: {type(self.object_image)}") @@ -683,6 +680,17 @@ def update(self, force=True): import logging logger = logging.getLogger("ObjectDetails") + # If we have a chart generator, consume one yield to get the next progressive update + if hasattr(self, '_chart_generator') and self._chart_generator is not None: + try: + next_image = next(self._chart_generator) + logger.info(f">>> update(): Consumed next chart yield: {type(next_image)}") + self.object_image = next_image + force = True # Force screen update for progressive chart + except StopIteration: + logger.info(">>> update(): Chart generator exhausted") + self._chart_generator = None # Generator exhausted + # Check if we're showing "Loading..." for a deep chart # and if catalog is now ready, regenerate the image if self._is_showing_loading_chart: From 90e68cefa3e9195dc6f74085b1a056d042af684b Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 18:52:04 +0100 Subject: [PATCH 19/27] Speed up indexes --- python/PiFinder/star_catalog.py | 50 +++++++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index b5d042832..4629540bd 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -787,9 +787,9 @@ def _load_tile_compact( return stars - def _read_binary_index(self, index_file: Path) -> dict: + def _read_binary_index(self, index_file: Path, needed_tiles: Optional[set] = None) -> dict: """ - Read binary index file + Read binary index file - optimized to only load needed tiles for large indices Format v1 (uncompressed): Header: [version:4][num_tiles:4] @@ -799,6 +799,11 @@ def _read_binary_index(self, index_file: Path) -> dict: Header: [version:4][num_tiles:4] Per tile: [tile_id:4][offset:8][compressed_size:4][uncompressed_size:4] + Args: + index_file: Path to the index file + needed_tiles: Set of tile IDs we actually need. If provided and index is large (>100K tiles), + only load these specific tiles instead of the whole index. + Returns: Dict mapping tile_id (as string) -> {"offset": int, "size": int, "compressed_size": int (optional)} """ @@ -809,6 +814,44 @@ def _read_binary_index(self, index_file: Path) -> dict: header = f.read(8) version, num_tiles = struct.unpack("100K tiles), only load what we need if a subset is specified + # This avoids loading millions of entries when we only need a few dozen + if needed_tiles is not None and num_tiles > 100000 and len(needed_tiles) < num_tiles / 100: + logger.info(f">>> Large index detected ({num_tiles:,} tiles), loading only {len(needed_tiles)} needed tiles") + entry_size = 16 if version == 1 else 20 + + # Convert needed_tiles to integers for comparison + needed_int = {int(t) if isinstance(t, str) else t for t in needed_tiles} + + # Read only needed entries by seeking + for _ in range(num_tiles): + if version == 1: + tile_data = f.read(16) + if len(tile_data) < 16: + break + tile_id, offset, size = struct.unpack("= len(needed_int): + break # Found all needed tiles + else: # version == 2 + tile_data = f.read(20) + if len(tile_data) < 20: + break + tile_id, offset, compressed_size, uncompressed_size = struct.unpack("= len(needed_int): + break # Found all needed tiles + + logger.info(f">>> Loaded {len(index)} entries from large index (scanned to find matches)") + return index + + # For small indices or when we need everything, load all entries if version == 1: # Uncompressed format for _ in range(num_tiles): @@ -917,7 +960,8 @@ def _load_tiles_batch_single_band( if cache_key not in self._index_cache: if index_file_bin.exists(): logger.info(f">>> Reading binary index from {index_file_bin}") - self._index_cache[cache_key] = self._read_binary_index(index_file_bin) + # Pass needed tiles for optimization + self._index_cache[cache_key] = self._read_binary_index(index_file_bin, needed_tiles=set(tile_ids)) logger.info(f">>> Binary index loaded, {len(self._index_cache[cache_key])} tiles in index") elif index_file_json.exists(): logger.info(f">>> Reading JSON index from {index_file_json}") From bcddba67eec54a3933be903e713717a51dd20721 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sat, 22 Nov 2025 18:56:52 +0100 Subject: [PATCH 20/27] lifted tile limit --- python/PiFinder/star_catalog.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 4629540bd..38631db37 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -315,13 +315,6 @@ def get_stars_for_fov_progressive( if self.visible_tiles: tiles = [t for t in tiles if t in self.visible_tiles] - # Limit tile count to prevent excessive loading - # For small FOVs (<1°), 20-30 tiles is more than enough - MAX_TILES = 25 - if len(tiles) > MAX_TILES: - logger.warning(f"Large tile count ({len(tiles)}) detected! Limiting to {MAX_TILES} tiles") - tiles = tiles[:MAX_TILES] - # Load stars progressively by magnitude band (bright to faint) all_stars = [] From ea11bc9448ff9551c65e76774cba6e02023cb046 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Sun, 23 Nov 2025 23:06:58 +0100 Subject: [PATCH 21/27] Added bloom filters to speed up charts --- python/PiFinder/cat_images.py | 24 +- python/PiFinder/deep_chart.py | 80 +- python/PiFinder/solver.py | 4 +- python/PiFinder/sqm/noise_floor.py | 4 +- python/PiFinder/sqm/sqm.py | 4 +- python/PiFinder/star_catalog.py | 1217 ++++++++++++++++++++------ python/PiFinder/ui/object_details.py | 93 +- python/tests/test_bloom_filter.py | 382 ++++++++ python/tests/test_star_catalog.py | 138 +++ 9 files changed, 1549 insertions(+), 397 deletions(-) create mode 100644 python/tests/test_bloom_filter.py create mode 100644 python/tests/test_star_catalog.py diff --git a/python/PiFinder/cat_images.py b/python/PiFinder/cat_images.py index 6a2417ed4..a4693cc6c 100644 --- a/python/PiFinder/cat_images.py +++ b/python/PiFinder/cat_images.py @@ -46,15 +46,15 @@ def get_display_image( Required for deep chart generation """ - logger.info(f">>> get_display_image() called for {catalog_object.display_name if catalog_object else 'None'}") - logger.info(f">>> force_deep_chart={force_deep_chart}, chart_generator={chart_generator is not None}") + logger.debug(f">>> get_display_image() called for {catalog_object.display_name if catalog_object else 'None'}") + logger.debug(f">>> force_deep_chart={force_deep_chart}, chart_generator={chart_generator is not None}") object_image_path = resolve_image_name(catalog_object, source="POSS") - logger.info(f">>> POSS image path: {object_image_path}, exists: {os.path.exists(object_image_path)}") + logger.debug(f">>> POSS image path: {object_image_path}, exists: {os.path.exists(object_image_path)}") # If force_deep_chart is True, skip POSS image even if it exists if force_deep_chart or not os.path.exists(object_image_path): - logger.info(f">>> Will use deep chart (force={force_deep_chart}, poss_missing={not os.path.exists(object_image_path)})") + logger.debug(f">>> Will use deep chart (force={force_deep_chart}, poss_missing={not os.path.exists(object_image_path)})") # Try to generate deep chart if catalog available return_image = None @@ -64,22 +64,22 @@ def get_display_image( deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") - logger.info(f">>> Deep chart request: chart_generator={chart_generator is not None}, catalog_exists={deep_catalog_path.exists()}, path={deep_catalog_path}") + logger.debug(f">>> Deep chart request: chart_generator={chart_generator is not None}, catalog_exists={deep_catalog_path.exists()}, path={deep_catalog_path}") # Try to generate deep chart if chart_generator was passed in if chart_generator is not None and deep_catalog_path.exists(): - logger.info(">>> chart_generator and deep catalog available, generating chart...") + logger.debug(">>> chart_generator and deep catalog available, generating chart...") try: from PiFinder.image_utils import create_loading_image # Ensure catalog loading started - logger.info(">>> Calling chart_generator.ensure_catalog_loading()...") + logger.debug(">>> Calling chart_generator.ensure_catalog_loading()...") chart_generator.ensure_catalog_loading() - logger.info(f">>> Catalog state: {chart_generator.get_catalog_state()}") + logger.debug(f">>> Catalog state: {chart_generator.get_catalog_state()}") # RETURN THE GENERATOR ITSELF - don't consume it here! # The UI will consume yields and update display for each one - logger.info(">>> Returning chart generator (not consuming yields here)") + logger.debug(">>> Returning chart generator (not consuming yields here)") # Create generator that yields converted images def chart_image_generator(): @@ -159,7 +159,7 @@ def chart_image_generator(): # Fallback: "No Image" placeholder if return_image is None: - logger.info(">>> No chart generated, creating 'No Image' placeholder") + logger.debug(">>> No chart generated, creating 'No Image' placeholder") return_image = Image.new("RGB", display_class.resolution) ri_draw = ImageDraw.Draw(return_image) if burn_in: @@ -170,7 +170,7 @@ def chart_image_generator(): fill=display_class.colors.get(128), ) else: - logger.info(">>> Using POSS image") + logger.debug(">>> Using POSS image") return_image = Image.open(object_image_path) # rotate for roll / newtonian orientation @@ -268,7 +268,7 @@ def __init__(self, text): burn_in=True ) - logger.info(f">>> get_display_image() RETURNING: {type(return_image)}, size={return_image.size if return_image else None}, has_is_loading={hasattr(return_image, 'is_loading_placeholder') if return_image else False}") + logger.debug(f">>> get_display_image() RETURNING: {type(return_image)}, size={return_image.size if return_image else None}, has_is_loading={hasattr(return_image, 'is_loading_placeholder') if return_image else False}") return return_image diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index d137059a6..9a21af256 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -31,13 +31,13 @@ def get_chart_generator(config, shared_state): """Get or create the global chart generator singleton""" global _chart_generator_instance - logger.info(f">>> get_chart_generator() called, instance exists: {_chart_generator_instance is not None}") + logger.debug(f">>> get_chart_generator() called, instance exists: {_chart_generator_instance is not None}") if _chart_generator_instance is None: logger.info(">>> Creating new DeepChartGenerator instance...") _chart_generator_instance = DeepChartGenerator(config, shared_state) logger.info(f">>> DeepChartGenerator created, state: {_chart_generator_instance.get_catalog_state()}") else: - logger.info(f">>> Returning existing instance, state: {_chart_generator_instance.get_catalog_state()}") + logger.debug(f">>> Returning existing instance, state: {_chart_generator_instance.get_catalog_state()}") return _chart_generator_instance @@ -84,7 +84,7 @@ def ensure_catalog_loading(self): Ensure catalog is loading or loaded Triggers background load if needed """ - logger.info(f">>> ensure_catalog_loading() called, catalog is None: {self.catalog is None}") + logger.debug(f">>> ensure_catalog_loading() called, catalog is None: {self.catalog is None}") if self.catalog is None: logger.info(">>> Calling initialize_catalog()...") self.initialize_catalog() @@ -152,7 +152,7 @@ def generate_chart( if cache_key in self.chart_cache: # Return cached base image (without crosshair) # Crosshair will be added by add_pulsating_crosshair() each frame - logger.info(f"Chart cache HIT for {cache_key}") + logger.debug(f"Chart cache HIT for {cache_key}") yield self.chart_cache[cache_key] return @@ -168,9 +168,10 @@ def generate_chart( sqm = self.shared_state.sqm() mag_limit_calculated = self.get_limiting_magnitude(sqm) - # For display, keep the calculated value (may be >17) # For query, cap at catalog max mag_limit_query = min(mag_limit_calculated, 17.0) + + logger.info(f">>> Mag Limit: calculated={mag_limit_calculated:.2f}, query={mag_limit_query:.2f}, sqm={sqm.value if sqm else 'None'}") # Query stars PROGRESSIVELY (bright to faint) # This is a generator that yields partial results as each magnitude band loads @@ -237,7 +238,7 @@ def generate_chart( t_render_end = time.time() logger.info( f"PROGRESSIVE: Rendered {len(stars)} stars in {(t_render_end-t_render_start)*1000:.1f}ms " - f"(complete={is_complete})" + f"(complete={is_complete}, mag_limit={mag_limit_query:.1f})" ) final_image = image @@ -265,7 +266,7 @@ def generate_chart( def render_chart( self, - stars, + stars: np.ndarray, center_ra: float, center_dec: float, fov: float, @@ -279,7 +280,7 @@ def render_chart( Uses fast vectorized stereographic projection Args: - stars: List of (ra, dec, mag) tuples + stars: Numpy array (N, 3) of (ra, dec, mag) center_ra: Center RA in degrees center_dec: Center Dec in degrees fov: Field of view in degrees @@ -298,6 +299,8 @@ def render_chart( image_array = np.zeros((height, width, 3), dtype=np.uint8) image = Image.new("RGB", (width, height), (0, 0, 0)) draw = ImageDraw.Draw(image) + + logger.info(f"Render Chart: {len(stars)} stars input, center=({center_ra:.4f}, {center_dec:.4f}), fov={fov:.4f}, res={resolution}") if len(stars) == 0: # Still draw crosshair even if no stars @@ -310,12 +313,13 @@ def render_chart( # Convert to numpy arrays for vectorized operations t1 = time.time() - stars_array = np.array(stars) + # stars is already a numpy array (N, 3) + stars_array = stars ra_arr = stars_array[:, 0] dec_arr = stars_array[:, 1] mag_arr = stars_array[:, 2] t2 = time.time() - logger.debug(f" Array conversion: {(t2-t1)*1000:.1f}ms") + # logger.debug(f" Array conversion: {(t2-t1)*1000:.1f}ms") # Fast stereographic projection (vectorized) # Convert degrees to radians @@ -391,6 +395,8 @@ def render_chart( mag_visible = mag_arr[mask] ra_visible = ra_arr[mask] dec_visible = dec_arr[mask] + + logger.info(f"Render Chart: {len(x_visible)} stars visible on screen (of {len(stars)} total)") # Scale brightness based on magnitude range in current field # Brightest star in field → 255, faintest → 50 @@ -447,50 +453,12 @@ def render_chart( t_end = time.time() logger.debug(f" Total render time: {(t_end-t_start)*1000:.1f}ms") - return image - - def add_pulsating_crosshair(self, image: Image.Image) -> Image.Image: - """ - Add pulsating crosshair to center of image - Called each frame to animate - does not modify original image - - Args: - image: Base chart image (will be copied) - - Returns: - New image with crosshair overlay - """ - import time + # Tag image as a deep chart (not a loading placeholder) + # This enables the correct marking menu in UIObjectDetails + image.is_loading_placeholder = False # type: ignore[attr-defined] - # Copy image so we don't modify the cached version - result = image.copy() - width, height = result.size - draw = ImageDraw.Draw(result) - - # Center position - cx, cy = width / 2.0, height / 2.0 - - # Pulsate crosshair: full cycle every 2 seconds - pulse_period = 2.0 # seconds - t = time.time() % pulse_period - # Sine wave for smooth pulsation (0.5 to 1.0 range) - pulse_factor = 0.5 + 0.5 * np.sin(2 * np.pi * t / pulse_period) - - # Size pulsates between 3 and 7 pixels - outer = int(3 + 4 * pulse_factor) - inner = 2 # Fixed gap - - # Color pulsates in brightness (32 to 96) - color_intensity = int(32 + 64 * pulse_factor) - marker_color = (color_intensity, 0, 0) - - # Crosshair outline (4 short lines with gap in middle) - draw.line([cx - outer, cy, cx - inner, cy], fill=marker_color, width=1) # Left - draw.line([cx + inner, cy, cx + outer, cy], fill=marker_color, width=1) # Right - draw.line([cx, cy - outer, cx, cy - inner], fill=marker_color, width=1) # Top - draw.line([cx, cy + inner, cx, cy + outer], fill=marker_color, width=1) # Bottom + return image - return result def _draw_star_antialiased_fast(self, image_array, ix, iy, fx, fy, intensity): """ @@ -637,14 +605,16 @@ def get_limiting_magnitude(self, sqm) -> float: eyepiece_fl = eyepiece.focal_length_mm if eyepiece else None sqm_value = round(sqm.value, 1) if sqm and hasattr(sqm, 'value') and sqm.value else None - cache_key = (sqm_value, telescope_aperture, telescope_fl, eyepiece_fl) + # Include config mode and fixed value in cache key to handle mode switching + lm_mode = self.config.get_option("obj_chart_lm_mode") + lm_fixed = self.config.get_option("obj_chart_lm_fixed") + + cache_key = (sqm_value, telescope_aperture, telescope_fl, eyepiece_fl, lm_mode, lm_fixed) # Check cache - return cached value without logging if self._lm_cache is not None and self._lm_cache[0] == cache_key: return self._lm_cache[1] - lm_mode = self.config.get_option("obj_chart_lm_mode") - if lm_mode == "fixed": # Use fixed limiting magnitude from config lm = self.config.get_option("obj_chart_lm_fixed") diff --git a/python/PiFinder/solver.py b/python/PiFinder/solver.py index 049fb069d..777aa44b9 100644 --- a/python/PiFinder/solver.py +++ b/python/PiFinder/solver.py @@ -430,7 +430,7 @@ def solver( # Mark successful solve - use same timestamp as last_solve_attempt for comparison solved["last_solve_success"] = solved["last_solve_attempt"] - logger.info( + logger.debug( f"Solve SUCCESS - {len(centroids)} centroids → " f"{solved.get('Matches', 0)} matches, " f"RMSE: {solved.get('RMSE', 0):.1f}px" @@ -454,7 +454,7 @@ def solver( else: # Centroids found but solve failed - clear Matches solved["Matches"] = 0 - logger.warning( + logger.debug( f"Solve FAILED - {len(centroids)} centroids detected but " f"pattern match failed (FOV est: 12.0°, max err: 4.0°)" ) diff --git a/python/PiFinder/sqm/noise_floor.py b/python/PiFinder/sqm/noise_floor.py index 046dde6b5..fcccc64b9 100644 --- a/python/PiFinder/sqm/noise_floor.py +++ b/python/PiFinder/sqm/noise_floor.py @@ -251,7 +251,7 @@ def update_with_zero_sec_sample(self, zero_sec_image: np.ndarray) -> None: } ) - logger.info( + logger.debug( f"Zero-sec sample: bias={measured_bias:.1f} ADU, " f"read_noise={measured_std:.2f} ADU" ) @@ -279,7 +279,7 @@ def update_with_zero_sec_sample(self, zero_sec_image: np.ndarray) -> None: alpha * avg_read_noise + (1 - alpha) * self.profile.read_noise_adu ) - logger.info( + logger.debug( f"Updated camera profile: " f"bias {old_bias:.1f} → {self.profile.bias_offset:.1f}, " f"read_noise {old_noise:.2f} → {self.profile.read_noise_adu:.2f}" diff --git a/python/PiFinder/sqm/sqm.py b/python/PiFinder/sqm/sqm.py index be71c0af5..4c59c891c 100644 --- a/python/PiFinder/sqm/sqm.py +++ b/python/PiFinder/sqm/sqm.py @@ -446,7 +446,7 @@ def calculate( ) pedestal = noise_floor - logger.info( + logger.debug( f"Adaptive noise floor: {noise_floor:.1f} ADU " f"(dark_px={noise_floor_details['dark_pixel_smoothed']:.1f}, " f"theory={noise_floor_details['theoretical_floor']:.1f}, " @@ -455,7 +455,7 @@ def calculate( # Check if zero-sec sample requested if noise_floor_details.get("request_zero_sec_sample"): - logger.info( + logger.debug( "Zero-second calibration sample requested by noise estimator " "(will be captured in next cycle)" ) diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 38631db37..f43d11cac 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -12,8 +12,10 @@ - Proper motion corrections """ +import hashlib import json import logging +import math import struct import threading import time @@ -50,6 +52,12 @@ ('pmdec', 'i1'), # Proper motion Dec (mas/yr / 50) ]) +# Index cache size limit (tiles per magnitude band) +# At ~50 bytes per tile entry, 10000 tiles = ~500KB per band +# With 6 mag bands, total cache size ~3MB (acceptable on Pi) +# This accommodates the full mag 0-6 index (6465 tiles) without trimming +MAX_INDEX_CACHE_SIZE = 10000 + class CatalogState(Enum): """Catalog loading state""" @@ -59,6 +67,202 @@ class CatalogState(Enum): READY = 2 +class TileBloomFilter: + """ + Bloom filter for tile existence checks. + + A space-efficient probabilistic data structure for testing set membership. + False positives are possible (might say a tile exists when it doesn't), + but false negatives are impossible (never says a tile doesn't exist when it does). + + Uses k hash functions with optimal sizing for target false positive rate. + Typical configuration: 10 bits per element for 1% false positive rate. + """ + + def __init__(self, capacity: int, fp_rate: float = 0.01): + """ + Initialize bloom filter. + + Args: + capacity: Expected number of items (tiles) to store + fp_rate: Target false positive rate (e.g., 0.01 = 1%) + """ + self.capacity = capacity + self.fp_rate = fp_rate + self.num_bits = self._optimal_num_bits(capacity, fp_rate) + self.num_hashes = self._optimal_num_hashes(self.num_bits, capacity) + self.bit_array = bytearray((self.num_bits + 7) // 8) + + @staticmethod + def _optimal_num_bits(n: int, p: float) -> int: + """ + Calculate optimal bit array size. + + Formula: m = -(n * ln(p)) / (ln(2)^2) + + Args: + n: Number of elements (capacity) + p: Target false positive rate + + Returns: + Optimal number of bits + """ + m = -(n * math.log(p)) / (math.log(2) ** 2) + return int(m) + + @staticmethod + def _optimal_num_hashes(m: int, n: int) -> int: + """ + Calculate optimal number of hash functions. + + Formula: k = (m/n) * ln(2) + + Args: + m: Number of bits in array + n: Number of elements (capacity) + + Returns: + Optimal number of hash functions + """ + if n == 0: + return 1 # Avoid division by zero for empty filter + k = (m / n) * math.log(2) + return max(1, int(k)) + + def _hash(self, item: int, seed: int) -> int: + """ + Hash function using MD5 with seed mixing. + + Args: + item: Tile ID to hash + seed: Seed for this hash function (0 to k-1) + + Returns: + Bit position in range [0, num_bits) + """ + h = hashlib.md5(f"{item}:{seed}".encode()).digest() + return int.from_bytes(h[:4], 'little') % self.num_bits + + def add(self, tile_id: int) -> None: + """ + Add tile_id to bloom filter. + + Args: + tile_id: HEALPix tile ID to add + """ + for i in range(self.num_hashes): + bit_pos = self._hash(tile_id, i) + byte_pos = bit_pos // 8 + bit_offset = bit_pos % 8 + self.bit_array[byte_pos] |= (1 << bit_offset) + + def might_contain(self, tile_id: int) -> bool: + """ + Check if tile_id might exist in the set. + + Args: + tile_id: HEALPix tile ID to check + + Returns: + True: tile might exist (or false positive) + False: tile definitely does not exist + """ + for i in range(self.num_hashes): + bit_pos = self._hash(tile_id, i) + byte_pos = bit_pos // 8 + bit_offset = bit_pos % 8 + if not (self.bit_array[byte_pos] & (1 << bit_offset)): + return False # Definitely not in set + return True # Probably in set + + def save(self, path: Path) -> None: + """ + Save bloom filter to binary file. + + Format: + Header (24 bytes): + [version:4][capacity:4][fp_rate:8][num_bits:4][num_hashes:4] + Body (variable): + [bit_array:N] where N = (num_bits + 7) / 8 bytes + + Args: + path: Path to save bloom filter + """ + with open(path, 'wb') as f: + # Header: version, capacity, fp_rate, num_bits, num_hashes + f.write(struct.pack(' 'TileBloomFilter': + """ + Load bloom filter from binary file. + + Args: + path: Path to bloom filter file + + Returns: + Loaded TileBloomFilter instance + + Raises: + ValueError: If file format is unsupported or corrupted + FileNotFoundError: If file doesn't exist + """ + with open(path, 'rb') as f: + # Read header (24 bytes) + header = f.read(24) + if len(header) < 24: + raise ValueError(f"Bloom filter file too small: {len(header)} bytes") + + version, capacity, fp_rate, num_bits, num_hashes = struct.unpack(' Optional[float]: + """ + Calculate actual false positive rate based on stored parameters. + + Formula: FP = (1 - e^(-k*n/m))^k + + Returns: + Estimated false positive rate, or None if capacity is 0 + """ + if self.capacity == 0: + return None + + # FP rate = (1 - e^(-k*n/m))^k + exponent = -self.num_hashes * self.capacity / self.num_bits + fp = (1 - math.exp(exponent)) ** self.num_hashes + return fp + + class DeepStarCatalog: """ HEALPix-indexed star catalog with background loading @@ -87,12 +291,16 @@ def __init__(self, catalog_path: str): self.limiting_magnitude: float = 12.0 self.visible_tiles: Optional[Set[int]] = None self.spatial_index: Optional[Any] = None - self.tile_cache: Dict[Tuple[int, float], List[Tuple[float, float, float, float, float]]] = {} + self.tile_cache: Dict[Tuple[int, float], np.ndarray] = {} self.cache_lock = threading.Lock() self.load_thread: Optional[threading.Thread] = None self.load_progress: str = "" # Status message for UI self.load_percent: int = 0 # Progress percentage (0-100) self._index_cache: Dict[str, Any] = {} + # Cache of existing tile IDs per magnitude band to avoid scanning for non-existent tiles + self._existing_tiles_cache: Dict[str, Set[int]] = {} + # Bloom filters for fast tile existence checks (space-efficient) + self._bloom_filters: Dict[str, TileBloomFilter] = {} t_init = (time.time() - t0) * 1000 logger.info(f">>> DeepStarCatalog.__init__() completed in {t_init:.1f}ms") @@ -156,6 +364,14 @@ def _background_load_worker(self): f">>> Catalog metadata ready: {star_count:,} stars, " f"mag limit {self.metadata.get('mag_limit', 0):.1f}, nside={self.nside}" ) + + # Log available bands + bands = self.metadata.get("mag_bands", []) + logger.info(f">>> Catalog mag bands: {json.dumps(bands)}") + + # Preload all bloom filters into memory (~12 MB total) + # This eliminates on-demand loading delays during chart generation + self._preload_bloom_filters() # Initialize empty structures (no preloading) self.spatial_index = {} @@ -274,7 +490,7 @@ def get_stars_for_fov_progressive( immediately while continuing to load fainter stars in the background. Blocks if state == LOADING (waits for load to complete) - Returns empty list if state == NOT_LOADED + Returns empty array if state == NOT_LOADED Args: ra_deg: Center RA in degrees @@ -284,12 +500,12 @@ def get_stars_for_fov_progressive( Yields: (stars, is_complete) tuples where: - - stars: List of (ra, dec, mag) tuples with proper motion corrected + - stars: Numpy array (N, 3) of (ra, dec, mag) with proper motion corrected - is_complete: True if this is the final yield with all stars """ if self.state == CatalogState.NOT_LOADED: logger.warning("Catalog not loaded") - yield ([], True) + yield (np.empty((0, 3)), True) return # Wait for catalog to be loaded @@ -302,24 +518,24 @@ def get_stars_for_fov_progressive( if not _HEALPY_AVAILABLE: logger.error("healpy not available - cannot perform HEALPix queries") - yield ([], True) + yield (np.empty((0, 3)), True) return # Calculate HEALPix tiles covering FOV vec = hp.ang2vec(ra_deg, dec_deg, lonlat=True) radius_rad = np.radians(fov_deg * 0.85) tiles = hp.query_disc(self.nside, vec, radius_rad) - logger.info(f"HEALPix PROGRESSIVE: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") + logger.debug(f"HEALPix PROGRESSIVE: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") # Filter by visible hemisphere if self.visible_tiles: tiles = [t for t in tiles if t in self.visible_tiles] # Load stars progressively by magnitude band (bright to faint) - all_stars = [] + all_stars_list = [] if not self.metadata: - yield ([], True) + yield (np.empty((0, 3)), True) return for mag_band_info in self.metadata.get("mag_bands", []): @@ -330,32 +546,43 @@ def get_stars_for_fov_progressive( if mag_min >= mag_limit: break - logger.info(f">>> PROGRESSIVE: Loading mag band {mag_min}-{mag_max}, tiles={len(tiles)}, mag_limit={mag_limit}") + logger.debug(f">>> PROGRESSIVE: Loading mag band {mag_min}-{mag_max}, tiles={len(tiles)}, mag_limit={mag_limit}") import time t_band_start = time.time() # Load stars from this magnitude band only - logger.info(f">>> Calling _load_tiles_for_mag_band...") + # logger.info(f">>> Calling _load_tiles_for_mag_band...") band_stars = self._load_tiles_for_mag_band( tiles, mag_band_info, mag_limit, ra_deg, dec_deg, fov_deg ) - logger.info(f">>> _load_tiles_for_mag_band returned {len(band_stars)} stars") + # logger.info(f">>> _load_tiles_for_mag_band returned {len(band_stars)} stars") t_band_end = time.time() - logger.info(f"PROGRESSIVE: Mag band {mag_min}-{mag_max} loaded {len(band_stars)} stars in {(t_band_end-t_band_start)*1000:.1f}ms") + logger.debug(f"PROGRESSIVE: Mag band {mag_min}-{mag_max} loaded {len(band_stars)} stars in {(t_band_end-t_band_start)*1000:.1f}ms") # Add to cumulative list - all_stars.extend(band_stars) + if len(band_stars) > 0: + all_stars_list.append(band_stars) # Yield current results (not complete yet unless this is the last band) is_last_band = mag_max >= mag_limit - yield (all_stars.copy(), is_last_band) + + if all_stars_list: + current_total = np.concatenate(all_stars_list) + else: + current_total = np.empty((0, 3)) + + yield (current_total, is_last_band) if is_last_band: break # Final yield (should already be done above, but just in case) - logger.info(f"PROGRESSIVE: Complete! Total {len(all_stars)} stars loaded") + if all_stars_list: + final_total = np.concatenate(all_stars_list) + else: + final_total = np.empty((0, 3)) + logger.info(f"PROGRESSIVE: Complete! Total {len(final_total)} stars loaded") def get_stars_for_fov( self, @@ -363,12 +590,12 @@ def get_stars_for_fov( dec_deg: float, fov_deg: float, mag_limit: Optional[float] = None, - ) -> List[Tuple[float, float, float]]: + ) -> np.ndarray: """ Query stars in field of view Blocks if state == LOADING (waits for load to complete) - Returns empty list if state == NOT_LOADED + Returns empty array if state == NOT_LOADED Args: ra_deg: Center RA in degrees @@ -377,11 +604,11 @@ def get_stars_for_fov( mag_limit: Limiting magnitude (uses catalog default if None) Returns: - List of (ra, dec, mag) tuples with proper motion corrected + Numpy array (N, 3) of (ra, dec, mag) with proper motion corrected """ if self.state == CatalogState.NOT_LOADED: logger.warning("Catalog not loaded") - return [] + return np.empty((0, 3)) if self.state == CatalogState.LOADING: # Wait for loading to complete (with timeout) @@ -392,7 +619,7 @@ def get_stars_for_fov( time.sleep(0.1) if time.time() - start > timeout: logger.error("Catalog loading timeout") - return [] + return np.empty((0, 3)) # State is READY - metadata must be loaded by now assert self.metadata is not None, "metadata should be loaded when state is READY" @@ -402,7 +629,7 @@ def get_stars_for_fov( if not _HEALPY_AVAILABLE: logger.error("healpy not installed") - return [] + return np.empty((0, 3)) # Calculate HEALPix tiles covering FOV # Query larger area to account for rectangular screen and rotation @@ -411,14 +638,14 @@ def get_stars_for_fov( # Use full diagonal + margin to ensure corners are covered even when rotated radius_rad = np.radians(fov_deg * 0.85) # sqrt(2)/2 ≈ 0.707, add extra for rotation tiles = hp.query_disc(self.nside, vec, radius_rad) - logger.info(f"HEALPix: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") + logger.debug(f"HEALPix: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") # Filter by visible hemisphere if self.visible_tiles: tiles = [t for t in tiles if t in self.visible_tiles] # Load stars from tiles (batch load for better performance) - stars: List[Tuple[float, float, float]] = [] + stars: np.ndarray = np.empty((0, 3)) tile_star_counts = {} # Try batch loading if catalog is compact format @@ -439,7 +666,7 @@ def get_stars_for_fov( logger.info(f"Using SINGLE-TILE loading for {len(tiles)} tiles (compact={is_compact})") import time t_single_start = time.time() - stars_raw: List[Tuple[float, float, float, float, float]] = [] + stars_raw_list = [] # To prevent UI blocking, limit the number of tiles loaded at once # For small FOVs (<1°), 20-30 tiles is more than enough @@ -457,10 +684,13 @@ def get_stars_for_fov( # Check if this tile is cached (for performance tracking) cache_key = (tile_id, mag_limit) was_cached = cache_key in self.tile_cache - + + # Returns (N, 5) array tile_stars = self._load_tile_data(tile_id, mag_limit) tile_star_counts[tile_id] = len(tile_stars) - stars_raw.extend(tile_stars) + + if len(tile_stars) > 0: + stars_raw_list.append(tile_stars) if was_cached: cache_hits += 1 @@ -470,14 +700,16 @@ def get_stars_for_fov( # Log progress every 25 tiles if (i + 1) % 25 == 0: elapsed = (time.time() - t_single_start) * 1000 - logger.info(f"Progress: {i+1}/{len(tiles)} tiles loaded ({elapsed:.0f}ms elapsed)") + logger.debug(f"Progress: {i+1}/{len(tiles)} tiles loaded ({elapsed:.0f}ms elapsed)") t_single_end = time.time() elapsed_ms = (t_single_end - t_single_start) * 1000 # Log cache performance - logger.info(f"Tile cache: {cache_hits} hits, {cache_misses} misses ({cache_hits/(cache_hits+cache_misses)*100:.1f}% hit rate)") - logger.info(f"Single-tile loading complete: {len(stars_raw)} stars in {elapsed_ms:.1f}ms ({elapsed_ms/len(tiles):.2f}ms/tile)") + logger.debug(f"Tile cache: {cache_hits} hits, {cache_misses} misses ({cache_hits/(cache_hits+cache_misses)*100:.1f}% hit rate)") + + total_raw = sum(len(x) for x in stars_raw_list) + logger.debug(f"Single-tile loading complete: {total_raw} stars in {elapsed_ms:.1f}ms ({elapsed_ms/len(tiles):.2f}ms/tile)") # Log tile loading stats if tile_star_counts: @@ -487,9 +719,20 @@ def get_stars_for_fov( # Apply proper motion correction (for non-batch path only) t_pm_start = time.time() - stars = self._apply_proper_motion(stars_raw) + + if stars_raw_list: + stars_raw_combined = np.concatenate(stars_raw_list) + ras = stars_raw_combined[:, 0] + decs = stars_raw_combined[:, 1] + mags = stars_raw_combined[:, 2] + pmras = stars_raw_combined[:, 3] + pmdecs = stars_raw_combined[:, 4] + stars = self._apply_proper_motion((ras, decs, mags, pmras, pmdecs)) + else: + stars = np.empty((0, 3)) + t_pm_end = time.time() - logger.info(f"Proper motion correction: {len(stars)} stars in {(t_pm_end-t_pm_start)*1000:.1f}ms") + logger.debug(f"Proper motion correction: {len(stars)} stars in {(t_pm_end-t_pm_start)*1000:.1f}ms") return stars @@ -501,7 +744,7 @@ def _load_tiles_for_mag_band( ra_deg: float, dec_deg: float, fov_deg: float, - ) -> List[Tuple[float, float, float]]: + ) -> np.ndarray: """ Load tiles for a specific magnitude band (used by progressive loading) @@ -514,49 +757,57 @@ def _load_tiles_for_mag_band( fov_deg: Field of view (for logging) Returns: - List of (ra, dec, mag) tuples with proper motion corrected + Numpy array (N, 3) of (ra, dec, mag) with proper motion corrected """ mag_min = mag_band_info["min"] mag_max = mag_band_info["max"] band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" - logger.info(f">>> _load_tiles_for_mag_band: mag {mag_min}-{mag_max}, band_dir={band_dir}, tiles={len(tile_ids)}") + # logger.info(f">>> _load_tiles_for_mag_band: mag {mag_min}-{mag_max}, band_dir={band_dir}, tiles={len(tile_ids)}") # Check if this band directory exists if not band_dir.exists(): logger.warning(f">>> Magnitude band directory not found: {band_dir}") - return [] + return np.empty((0, 3)) # For compact format, use vectorized batch loading per band assert self.metadata is not None, "metadata must be loaded" is_compact = self.metadata.get("format") == "compact" - logger.info(f">>> Format is_compact={is_compact}, calling _load_tiles_batch_single_band...") + # logger.info(f">>> Format is_compact={is_compact}, calling _load_tiles_batch_single_band...") if is_compact: result = self._load_tiles_batch_single_band( tile_ids, mag_band_info, mag_limit ) - logger.info(f">>> _load_tiles_batch_single_band returned {len(result)} stars") + # logger.info(f">>> _load_tiles_batch_single_band returned {len(result)} stars") return result else: # Legacy format - load tiles one by one (will load all bands for each tile) # This is less efficient but legacy format doesn't support per-band loading - stars_raw = [] + stars_raw_list = [] for tile_id in tile_ids: tile_stars = self._load_tile_data(tile_id, mag_limit) # Filter to just this magnitude band - tile_stars_filtered = [ - (ra, dec, mag, pmra, pmdec) - for ra, dec, mag, pmra, pmdec in tile_stars - if mag_min <= mag < mag_max - ] - stars_raw.extend(tile_stars_filtered) - - # Apply proper motion - return self._apply_proper_motion(stars_raw) + # tile_stars is (N, 5) + if len(tile_stars) > 0: + mags = tile_stars[:, 2] + mask = (mags >= mag_min) & (mags < mag_max) + if np.any(mask): + stars_raw_list.append(tile_stars[mask]) + + if stars_raw_list: + stars_raw_combined = np.concatenate(stars_raw_list) + ras = stars_raw_combined[:, 0] + decs = stars_raw_combined[:, 1] + mags = stars_raw_combined[:, 2] + pmras = stars_raw_combined[:, 3] + pmdecs = stars_raw_combined[:, 4] + return self._apply_proper_motion((ras, decs, mags, pmras, pmdecs)) + else: + return np.empty((0, 3)) def _load_tile_data( self, tile_id: int, mag_limit: float - ) -> List[Tuple[float, float, float, float, float]]: + ) -> np.ndarray: """ Load star data for a HEALPix tile @@ -565,7 +816,7 @@ def _load_tile_data( mag_limit: Maximum magnitude to load Returns: - List of (ra, dec, mag, pmra, pmdec) tuples + Numpy array of shape (N, 5) containing (ra, dec, mag, pmra, pmdec) """ assert self.metadata is not None, "metadata must be loaded before calling _load_tile_data" @@ -577,7 +828,7 @@ def _load_tile_data( return self.tile_cache[cache_key] # Load from disk - stars = [] + stars_list = [] # Check catalog format is_compact = self.metadata.get("format") == "compact" @@ -594,18 +845,30 @@ def _load_tile_data( if is_compact: # Compact format: read from consolidated file using index - tile_stars = self._load_tile_compact(band_dir, tile_id, mag_min, mag_max) + ras, decs, mags, pmras, pmdecs = self._load_tile_compact(band_dir, tile_id, mag_min, mag_max) else: # Legacy format: one file per tile tile_file = band_dir / f"tile_{tile_id:06d}.bin" if tile_file.exists(): - tile_stars = self._load_tile_from_file(tile_file, mag_min, mag_max) + ras, decs, mags, pmras, pmdecs = self._load_tile_from_file(tile_file, mag_min, mag_max) else: - tile_stars = [] + ras, decs, mags, pmras, pmdecs = (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) + + if len(ras) > 0: + # Filter by magnitude + mask = mags <= mag_limit + if np.any(mask): + # Stack into (N, 5) array for this band + band_stars = np.column_stack((ras[mask], decs[mask], mags[mask], pmras[mask], pmdecs[mask])) + stars_list.append(band_stars) + logger.debug(f" Tile {tile_id} Band {mag_min}-{mag_max}: {len(band_stars)} stars (file: {tile_file if not is_compact else 'compact'})") + else: + logger.debug(f" Tile {tile_id} Band {mag_min}-{mag_max}: 0 stars (mask empty)") - # Filter by magnitude - tile_stars = [s for s in tile_stars if s[2] <= mag_limit] - stars.extend(tile_stars) + if not stars_list: + stars = np.empty((0, 5)) + else: + stars = np.concatenate(stars_list) # Cache result with self.cache_lock: @@ -620,7 +883,7 @@ def _load_tile_data( def _load_tile_from_file( self, tile_file: Path, mag_min: float, mag_max: float - ) -> List[Tuple[float, float, float, float, float]]: + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ Load stars from a tile file @@ -630,53 +893,20 @@ def _load_tile_from_file( mag_max: Maximum magnitude in this band Returns: - List of (ra, dec, mag, pmra, pmdec) tuples + Tuple of (ras, decs, mags, pmras, pmdecs) arrays """ if not _HEALPY_AVAILABLE: - return [] + return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) # Read entire file at once with open(tile_file, "rb") as f: data = f.read() - if len(data) == 0: - return [] - - # VECTORIZED: Parse all records at once - num_records = len(data) // STAR_RECORD_SIZE - records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - - # Mask healpix to 24 bits - healpix_pixels = records['healpix'] & 0xFFFFFF - - # VECTORIZED: Get all pixel centers at once - pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) - - # Calculate pixel size once (not per star!) - pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 - - # VECTORIZED: Decode all offsets at once - ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec - dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - - # VECTORIZED: Calculate final positions - decs = pixel_decs + dec_offset_arcsec / 3600.0 - ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) - - # VECTORIZED: Decode magnitudes and proper motions - mags = records['mag'] / 10.0 - pmras = records['pmra'] * 50 - pmdecs = records['pmdec'] * 50 - - # Build result list - stars = [(ras[i], decs[i], mags[i], pmras[i], pmdecs[i]) for i in range(num_records)] - - return stars + return self._parse_records(data) def _load_tile_compact( self, band_dir: Path, tile_id: int, mag_min: float, mag_max: float - ) -> List[Tuple[float, float, float, float, float]]: + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ Load stars from compact format (consolidated tiles.bin + index.json) @@ -687,10 +917,10 @@ def _load_tile_compact( mag_max: Maximum magnitude Returns: - List of (ra, dec, mag, pmra, pmdec) tuples + Tuple of (ras, decs, mags, pmras, pmdecs) arrays """ if not _HEALPY_AVAILABLE: - return [] + return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) # Try binary index first, fall back to JSON for backward compat index_file_bin = band_dir / "index.bin" @@ -698,7 +928,7 @@ def _load_tile_compact( tiles_file = band_dir / "tiles.bin" if not tiles_file.exists(): - return [] + return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) # Determine index format if index_file_bin.exists(): @@ -708,7 +938,7 @@ def _load_tile_compact( index_file = index_file_json is_binary = False else: - return [] + return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) # Load index (cached per band) tile_key = str(tile_id) @@ -724,7 +954,7 @@ def _load_tile_compact( index = self._index_cache[cache_key] if tile_key not in index: - return [] # No stars in this tile + return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) # Get tile offset and size tile_info = index[tile_key] @@ -733,7 +963,6 @@ def _load_tile_compact( compressed_size = tile_info.get("compressed_size") # Read tile data - stars = [] with open(tiles_file, "rb") as f: f.seek(offset) @@ -746,39 +975,49 @@ def _load_tile_compact( # Uncompressed tile data = f.read(size) - # VECTORIZED: Decode all records in this tile at once - num_records = len(data) // STAR_RECORD_SIZE + return self._parse_records(data) - # Parse all records using numpy - records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) + def _parse_records(self, data: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: + """ + Parse binary star records into numpy arrays (VECTORIZED) - # Mask healpix to 24 bits - healpix_pixels = records['healpix'] & 0xFFFFFF + Args: + data: Binary data containing star records - # VECTORIZED: Get all pixel centers at once - pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) + Returns: + Tuple of (ras, decs, mags, pmras, pmdecs) as numpy arrays + """ + if len(data) == 0: + return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) - # Calculate pixel size once (not per star!) - pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + # Parse all records using numpy + num_records = len(data) // STAR_RECORD_SIZE + records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - # VECTORIZED: Decode all offsets at once - ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec - dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec + # Mask healpix to 24 bits + healpix_pixels = records['healpix'] & 0xFFFFFF - # VECTORIZED: Calculate final positions - decs = pixel_decs + dec_offset_arcsec / 3600.0 - ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) + # Get all pixel centers at once + pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) - # VECTORIZED: Decode magnitudes and proper motions - mags = records['mag'] / 10.0 - pmras = records['pmra'] * 50 - pmdecs = records['pmdec'] * 50 + # Calculate pixel size once + pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) + max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 - # Build result list - stars = [(ras[i], decs[i], mags[i], pmras[i], pmdecs[i]) for i in range(num_records)] + # Decode all offsets + ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec + dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - return stars + # Calculate final positions + decs = pixel_decs + dec_offset_arcsec / 3600.0 + ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) + + # Decode magnitudes and proper motions + mags = records['mag'] / 10.0 + pmras = records['pmra'] * 50 + pmdecs = records['pmdec'] * 50 + + return ras, decs, mags, pmras, pmdecs def _read_binary_index(self, index_file: Path, needed_tiles: Optional[set] = None) -> dict: """ @@ -802,122 +1041,448 @@ def _read_binary_index(self, index_file: Path, needed_tiles: Optional[set] = Non """ index = {} + if not index_file.exists(): + return {} + with open(index_file, "rb") as f: # Read header header = f.read(8) + if len(header) < 8: + return {} + version, num_tiles = struct.unpack("100K tiles), only load what we need if a subset is specified - # This avoids loading millions of entries when we only need a few dozen - if needed_tiles is not None and num_tiles > 100000 and len(needed_tiles) < num_tiles / 100: - logger.info(f">>> Large index detected ({num_tiles:,} tiles), loading only {len(needed_tiles)} needed tiles") - entry_size = 16 if version == 1 else 20 + # Define dtypes for vectorized reading + if version == 1: + # [tile_id:4][offset:8][size:4] + dtype = np.dtype([ + ('tile_id', '= len(needed_int): - break # Found all needed tiles - else: # version == 2 - tile_data = f.read(20) - if len(tile_data) < 20: - break - tile_id, offset, compressed_size, uncompressed_size = struct.unpack("= len(needed_int): - break # Found all needed tiles - - logger.info(f">>> Loaded {len(index)} entries from large index (scanned to find matches)") + logger.debug(f">>> Selective loading: {len(needed_list)} tiles out of {num_tiles:,} total") + + # Use range query for spatially localized tiles + # Range query does binary search + sequential read, much faster than chunked scan + index = self._load_tile_range(index_file, needed_list, version, entry_size, num_tiles) + + logger.info(f">>> Loaded {len(index)} entries using range query") return index - # For small indices or when we need everything, load all entries + # For small indices or when we need everything, load all entries at once + data = f.read() + records = np.frombuffer(data, dtype=dtype) + + # Convert to dictionary (this part is still Python loop but unavoidable for dict creation) + # However, iterating over numpy array is faster than struct.unpack loop + + # Pre-allocate dict for speed? Not easily possible in Python + # But we can use a comprehension which is slightly faster + if version == 1: - # Uncompressed format - for _ in range(num_tiles): - tile_data = f.read(16) - if len(tile_data) < 16: - break + for record in records: + index[str(record['tile_id'])] = { + "offset": int(record['offset']), + "size": int(record['size']) + } + else: + for record in records: + index[str(record['tile_id'])] = { + "offset": int(record['offset']), + "size": int(record['uncompressed_size']), + "compressed_size": int(record['compressed_size']) + } - tile_id, offset, size = struct.unpack(" None: + """ + Preload all bloom filters into memory during catalog initialization. - tile_id, offset, compressed_size, uncompressed_size = struct.unpack(">> No metadata available, skipping bloom filter preload") + return + + t0_total = time.time() + total_bytes = 0 + bands_loaded = 0 + + logger.info(">>> Preloading bloom filters for all magnitude bands...") + + for band_info in self.metadata["mag_bands"]: + mag_min = int(band_info["min"]) + mag_max = int(band_info["max"]) + cache_key = f"index_{mag_min}_{mag_max}" + + bloom_file = self.catalog_path / f"mag_{mag_min:02d}_{mag_max:02d}" / "bloom.bin" + + if not bloom_file.exists(): + logger.warning( + f">>> Bloom filter missing for {cache_key}: {bloom_file} - " + f"Run catalog_tools/generate_bloom_filters.py" + ) + continue + + t0 = time.time() + self._bloom_filters[cache_key] = TileBloomFilter.load(bloom_file) + t_load = (time.time() - t0) * 1000 + + bloom = self._bloom_filters[cache_key] + bloom_bytes = len(bloom.bit_array) + total_bytes += bloom_bytes + bands_loaded += 1 + + logger.info( + f">>> Loaded bloom filter {cache_key}: " + f"{bloom.capacity:,} tiles, {bloom_bytes / 1024:.1f} KB, " + f"FP={bloom.get_actual_fp_rate():.2%} in {t_load:.1f}ms" + ) + + t_total = (time.time() - t0_total) * 1000 + logger.info( + f">>> Bloom filter preload complete: {bands_loaded} filters, " + f"{total_bytes / 1024 / 1024:.1f} MB total in {t_total:.1f}ms" + ) + + def _ensure_bloom_filter(self, cache_key: str, mag_min: int, mag_max: int) -> None: + """ + Ensure bloom filter is loaded for given magnitude band. + + This is a fallback in case preloading failed for a specific band. + Normally all bloom filters are preloaded during catalog initialization. + + Args: + cache_key: Cache key for this magnitude band (e.g., "index_12_14") + mag_min: Minimum magnitude for this band + mag_max: Maximum magnitude for this band + + Raises: + FileNotFoundError: If bloom filter file is missing (catalog corruption) + """ + if cache_key in self._bloom_filters: + return # Already loaded (normal case - preloaded at startup) + + # Fallback: load on-demand if preloading missed this band + logger.warning(f">>> Bloom filter {cache_key} not preloaded, loading on-demand...") + + bloom_file = self.catalog_path / f"mag_{mag_min:02d}_{mag_max:02d}" / "bloom.bin" + + if not bloom_file.exists(): + raise FileNotFoundError( + f"Bloom filter missing for {cache_key}: {bloom_file}\n" + f"Catalog may be corrupted or incomplete. " + f"Run catalog_tools/generate_bloom_filters.py to create missing bloom filters." + ) + + t0 = time.time() + self._bloom_filters[cache_key] = TileBloomFilter.load(bloom_file) + t_load = (time.time() - t0) * 1000 + + bloom = self._bloom_filters[cache_key] + actual_fp = bloom.get_actual_fp_rate() + logger.info( + f">>> Loaded bloom filter for {cache_key}: {bloom.capacity} tiles, " + f"{len(bloom.bit_array)} bytes, FP rate={actual_fp:.2%}, load_time={t_load:.1f}ms" + ) + + def _binary_search_tile_position( + self, + f, + target_tile_id: int, + num_tiles: int, + entry_size: int, + find_first: bool = True + ) -> Optional[int]: + """ + Binary search for tile position in sorted binary index file. + + Args: + f: Open file handle positioned after header + target_tile_id: Tile ID to search for + num_tiles: Total number of tiles in index + entry_size: Size of each entry in bytes (16 or 20) + find_first: If True, find first tile >= target. If False, find last tile <= target. + + Returns: + File position (offset from file start) of matching entry, or None if not found + """ + left, right = 0, num_tiles - 1 + result_pos = None + + while left <= right: + mid = (left + right) // 2 + pos = 8 + mid * entry_size # 8-byte header + entry offset + + f.seek(pos) + tile_id_bytes = f.read(4) + if len(tile_id_bytes) < 4: + break + + tile_id = struct.unpack(" target_tile_id + if find_first: + result_pos = pos # Keep track of smallest tile > target + right = mid - 1 + + return result_pos + + def _load_tile_range( + self, + index_file: Path, + tile_ids: List[int], + version: int, + entry_size: int, + num_tiles: int + ) -> Dict[str, Any]: + """ + Load a contiguous range of tiles using binary search + sequential read. + + This is much faster than seeking to each individual tile, especially on SD cards + where random seeks are expensive. + + Args: + index_file: Path to binary index file + tile_ids: List of tile IDs to load (must be sorted) + version: Index file version (1 or 2) + entry_size: Size of each entry (16 for v1, 20 for v2) + num_tiles: Total number of tiles in index + + Returns: + Dictionary mapping tile_id (as string) to tile metadata + """ + if not tile_ids: + return {} + + # Determine range to load + min_tile = min(tile_ids) + max_tile = max(tile_ids) + tile_set = set(tile_ids) + + index = {} + + with open(index_file, "rb") as f: + # Find starting position (first tile >= min_tile) + start_pos = self._binary_search_tile_position( + f, min_tile, num_tiles, entry_size, find_first=True + ) + + if start_pos is None: + logger.debug(f">>> No tiles found in range [{min_tile}, {max_tile}]") return {} + # Sequential read from start_pos until we pass max_tile + f.seek(start_pos) + tiles_read = 0 + tiles_matched = 0 + + while True: + entry_data = f.read(entry_size) + if len(entry_data) < entry_size: + break # End of file + + tiles_read += 1 + + if version == 1: + tile_id, offset, size = struct.unpack(" max_tile: + break # Passed our range + + if tile_id in tile_set: + index[str(tile_id)] = { + "offset": int(offset), + "size": int(size) + } + tiles_matched += 1 + else: # version == 2 + tile_id, offset, compressed_size, uncompressed_size = struct.unpack(" max_tile: + break # Passed our range + + if tile_id in tile_set: + index[str(tile_id)] = { + "offset": int(offset), + "size": int(uncompressed_size), + "compressed_size": int(compressed_size) + } + tiles_matched += 1 + + # Early exit if we've found all requested tiles + if tiles_matched >= len(tile_set): + break + + logger.debug( + f">>> Range query: read {tiles_read} entries, " + f"matched {tiles_matched}/{len(tile_ids)} requested tiles" + ) + return index - def _apply_proper_motion( - self, stars: List[Tuple[float, float, float, float, float]] - ) -> List[Tuple[float, float, float]]: + def _load_existing_tiles_set(self, index_file: Path) -> Set[int]: + """ + Quickly load the set of all existing tile IDs from an index file. + This is much faster than scanning for specific tiles when we just need + to know "does this tile exist?" to avoid wasteful searches. + + Args: + index_file: Path to binary index file + + Returns: + Set of existing tile IDs (as integers) """ - Apply proper motion corrections from J2016.0 to current epoch + existing_tiles: set[int] = set() - Shows stars at their current positions in the sky (today), not historical - J2000 positions. This provides the most accurate representation for - real-time telescope pointing. + if not index_file.exists(): + return existing_tiles + + with open(index_file, "rb") as f: + # Read header + header = f.read(8) + if len(header) < 8: + return existing_tiles + + version, num_tiles = struct.unpack(" np.ndarray: + """ + Apply proper motion corrections from J2016.0 to current epoch (VECTORIZED) Args: - stars: List of (ra, dec, mag, pmra, pmdec) tuples in J2016.0 + stars: Tuple of (ras, decs, mags, pmras, pmdecs) arrays Returns: - List of (ra, dec, mag) tuples with positions corrected to current epoch + Numpy array of shape (N, 3) containing (ra, dec, mag) """ + ras, decs, mags, pmras, pmdecs = stars + + if len(ras) == 0: + return np.empty((0, 3)) + # Calculate years from J2016.0 to current date current_year = datetime.now().year + (datetime.now().timetuple().tm_yday / 365.25) years_elapsed = current_year - 2016.0 - corrected = [] - for ra, dec, mag, pmra, pmdec in stars: - # Apply proper motion forward to current epoch - # pmra is in mas/year and needs cos(dec) correction for RA - ra_correction = (pmra / 1000 / 3600) / np.cos(np.radians(dec)) * years_elapsed - dec_correction = (pmdec / 1000 / 3600) * years_elapsed + # Apply proper motion forward to current epoch + # pmra is in mas/year and needs cos(dec) correction for RA + # Vectorized calculation + ra_corrections = (pmras / 1000 / 3600) / np.cos(np.radians(decs)) * years_elapsed + dec_corrections = (pmdecs / 1000 / 3600) * years_elapsed + + ra_corrected = ras + ra_corrections + dec_corrected = decs + dec_corrections + + # Keep dec in valid range + dec_corrected = np.clip(dec_corrected, -90, 90) + + # Stack into (N, 3) array + return np.column_stack((ra_corrected, dec_corrected, mags)) - ra_corrected = ra + ra_correction - dec_corrected = dec + dec_correction + def _trim_index_cache(self, cache_key: str, protected_tile_ids: List[int]) -> None: + """ + Trim index cache to stay within MAX_INDEX_CACHE_SIZE limit. + + Strategy: Remove oldest tiles not in the current request (protected_tile_ids). + This ensures we keep tiles needed for the current chart while evicting others. + + Args: + cache_key: Cache key (e.g., "index_12_14") + protected_tile_ids: Tile IDs that must NOT be evicted (current FOV) + """ + index = self._index_cache.get(cache_key) + if not index: + return + + cache_size = len(index) + if cache_size <= MAX_INDEX_CACHE_SIZE: + return # Within limit, nothing to do + + # Calculate how many to remove + tiles_to_remove = cache_size - MAX_INDEX_CACHE_SIZE + logger.info(f">>> Cache {cache_key} exceeds limit ({cache_size} > {MAX_INDEX_CACHE_SIZE}), removing {tiles_to_remove} tiles") + + # Build set of protected tiles + protected_set = {str(tid) for tid in protected_tile_ids} - # Keep dec in valid range - dec_corrected = max(-90, min(90, dec_corrected)) + # Find eviction candidates (tiles not in current request) + candidates = [tile_key for tile_key in index.keys() if tile_key not in protected_set] - corrected.append((ra_corrected, dec_corrected, mag)) + if len(candidates) < tiles_to_remove: + # Not enough non-protected tiles, just remove what we can + logger.warning(f">>> Only {len(candidates)} evictable tiles, removing all of them") + tiles_to_remove = len(candidates) - return corrected + # Remove the first N candidates (simple FIFO-ish eviction) + # Could enhance this with LRU tracking later + for i in range(tiles_to_remove): + tile_key = candidates[i] + del index[tile_key] + + logger.info(f">>> Cache trimmed: {cache_size} → {len(index)} tiles") def _load_tiles_batch_single_band( self, tile_ids: List[int], mag_band_info: dict, mag_limit: float, - ) -> List[Tuple[float, float, float]]: + ) -> np.ndarray: """ Batch load multiple tiles for a SINGLE magnitude band (compact format only) Used by progressive loading to load one mag band at a time @@ -928,10 +1493,10 @@ def _load_tiles_batch_single_band( mag_limit: Maximum magnitude Returns: - List of (ra, dec, mag) tuples (already PM-corrected) + Numpy array of shape (N, 3) containing (ra, dec, mag) """ if not _HEALPY_AVAILABLE: - return [] + return np.empty((0, 3)) mag_min = mag_band_info["min"] mag_max = mag_band_info["max"] @@ -942,31 +1507,89 @@ def _load_tiles_batch_single_band( tiles_file = band_dir / "tiles.bin" if not tiles_file.exists(): - return [] + return np.empty((0, 3)) # Load index cache_key = f"index_{mag_min}_{mag_max}" if not hasattr(self, '_index_cache'): self._index_cache = {} + t_index_start = time.time() logger.info(f">>> Checking index cache for {cache_key}, in_cache={cache_key in self._index_cache}") if cache_key not in self._index_cache: if index_file_bin.exists(): logger.info(f">>> Reading binary index from {index_file_bin}") - # Pass needed tiles for optimization + # Pass needed tiles for optimization (only for initial load) + # This creates a PARTIAL index with only these tiles + t0 = time.time() self._index_cache[cache_key] = self._read_binary_index(index_file_bin, needed_tiles=set(tile_ids)) - logger.info(f">>> Binary index loaded, {len(self._index_cache[cache_key])} tiles in index") + t_read_index = (time.time() - t0) * 1000 + logger.info(f">>> Partial index loaded, {len(self._index_cache[cache_key])} tiles in cache in {t_read_index:.1f}ms") elif index_file_json.exists(): logger.info(f">>> Reading JSON index from {index_file_json}") with open(index_file_json, "r") as f: self._index_cache[cache_key] = json.load(f) - logger.info(f">>> JSON index loaded, {len(self._index_cache[cache_key])} tiles in index") + logger.info(f">>> JSON index loaded, {len(self._index_cache[cache_key])} tiles in cache") else: logger.warning(f">>> No index file found for {cache_key}") - return [] + return np.empty((0, 3)) + else: + # Check if cached index has all the tiles we need + # If missing tiles, load just those tiles and merge into cache + index = self._index_cache[cache_key] + missing_tile_ids = [tid for tid in tile_ids if str(tid) not in index] + if missing_tile_ids: + # Use bloom filter to pre-screen tiles (fast, space-efficient) + # Load bloom filter if not already cached + self._ensure_bloom_filter(cache_key, mag_min, mag_max) + bloom = self._bloom_filters[cache_key] + + # Filter missing tiles through bloom filter + t0 = time.time() + tiles_to_load = [tid for tid in missing_tile_ids if bloom.might_contain(tid)] + t_bloom = (time.time() - t0) * 1000 + tiles_filtered_out = len(missing_tile_ids) - len(tiles_to_load) + + if tiles_filtered_out > 0: + logger.debug( + f">>> Bloom filter: {len(missing_tile_ids)} missing → " + f"{len(tiles_to_load)} candidates (filtered {tiles_filtered_out}) " + f"in {t_bloom:.2f}ms" + ) + + if tiles_to_load: + t0 = time.time() + logger.info(f">>> Cached index missing {len(tiles_to_load)}/{len(tile_ids)} tiles, loading them...") + if index_file_bin.exists(): + # Load only the missing tiles that actually exist + missing_index = self._read_binary_index(index_file_bin, needed_tiles=set(tiles_to_load)) + # Merge into existing cache + self._index_cache[cache_key].update(missing_index) + t_missing = (time.time() - t0) * 1000 + logger.info(f">>> Added {len(missing_index)} tiles to cache in {t_missing:.1f}ms, now {len(self._index_cache[cache_key])} tiles total") + + # Trim cache if it exceeds limit + self._trim_index_cache(cache_key, tile_ids) + elif index_file_json.exists(): + # For JSON, we have to load the full file (no selective loading) + with open(index_file_json, "r") as f: + full_index = json.load(f) + # Merge only the missing tiles that we're loading + for tid in tiles_to_load: + tile_key = str(tid) + if tile_key in full_index: + self._index_cache[cache_key][tile_key] = full_index[tile_key] + logger.info(f">>> Added missing tiles to cache, now {len(self._index_cache[cache_key])} tiles total") + + # Trim cache if it exceeds limit + self._trim_index_cache(cache_key, tile_ids) index = self._index_cache[cache_key] - logger.info(f">>> Index ready, building read_ops for {len(tile_ids)} tiles...") + t_index_total = (time.time() - t_index_start) * 1000 + logger.debug(f">>> Index cache operations took {t_index_total:.1f}ms") + + t_readops_start = time.time() + logger.debug(f">>> Building read_ops for {len(tile_ids)} tiles...") # Collect all tile read operations read_ops = [] @@ -977,22 +1600,35 @@ def _load_tiles_batch_single_band( read_ops.append((tile_id, tile_info)) if not read_ops: - return [] + logger.debug(f">>> No tiles to load (all {len(tile_ids)} requested tiles are empty)") + return np.empty((0, 3)) # Sort by offset to minimize seeks read_ops.sort(key=lambda x: x[1]["offset"]) + t_readops = (time.time() - t_readops_start) * 1000 + logger.debug(f">>> Built {len(read_ops)} read_ops in {t_readops:.1f}ms") # Read data in larger sequential chunks when possible MAX_GAP = 100 * 1024 # 100KB gap tolerance - all_stars = [] - + + # Accumulate arrays + all_ras = [] + all_decs = [] + all_mags = [] + all_pmras = [] + all_pmdecs = [] + + t_io_start = time.time() + t_decompress_total = 0.0 + t_decode_total = 0.0 + bytes_read = 0 logger.info(f">>> Batch loading {len(read_ops)} tiles for mag {mag_min}-{mag_max}") with open(tiles_file, "rb") as f: i = 0 chunk_num = 0 while i < len(read_ops): chunk_num += 1 - logger.debug(f">>> Processing chunk {chunk_num}, tile {i+1}/{len(read_ops)}") + # logger.debug(f">>> Processing chunk {chunk_num}, tile {i+1}/{len(read_ops)}") tile_id, tile_info = read_ops[i] offset = tile_info["offset"] @@ -1019,67 +1655,76 @@ def _load_tiles_batch_single_band( # Read entire chunk chunk_size = chunk_end - offset - logger.debug(f">>> Reading chunk: {len(tiles_in_chunk)} tiles, size={chunk_size} bytes") + # logger.debug(f">>> Reading chunk: {len(tiles_in_chunk)} tiles, size={chunk_size} bytes") f.seek(offset) chunk_data = f.read(chunk_size) - logger.debug(f">>> Chunk read complete, processing tiles...") + bytes_read += chunk_size + # logger.debug(f">>> Chunk read complete, processing tiles...") # Process each tile in chunk for tile_idx, (tile_id, tile_info) in enumerate(tiles_in_chunk): - logger.debug(f">>> Processing tile {tile_idx+1}/{len(tiles_in_chunk)} (id={tile_id})") + # logger.debug(f">>> Processing tile {tile_idx+1}/{len(tiles_in_chunk)} (id={tile_id})") tile_offset = tile_info["offset"] - offset compressed_size = tile_info.get("compressed_size") size = tile_info["size"] if compressed_size: + t_decomp_start = time.time() import zlib compressed_data = chunk_data[tile_offset:tile_offset + compressed_size] data = zlib.decompress(compressed_data) + t_decompress_total += (time.time() - t_decomp_start) else: data = chunk_data[tile_offset:tile_offset + size] - # VECTORIZED: Parse all star records at once - num_records = len(data) // STAR_RECORD_SIZE - records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - - # Mask healpix to 24 bits - healpix_pixels = records['healpix'] & 0xFFFFFF - - # VECTORIZED: Get all pixel centers at once - pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) - - # Calculate pixel size once - pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 - - # VECTORIZED: Decode all offsets - ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec - dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - - # VECTORIZED: Calculate final positions - decs = pixel_decs + dec_offset_arcsec / 3600.0 - ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) - - # VECTORIZED: Decode magnitudes and proper motions - mags = records['mag'] / 10.0 - pmras = records['pmra'] * 50 - pmdecs = records['pmdec'] * 50 - + # Parse records using shared helper + t_decode_start = time.time() + ras, decs, mags, pmras, pmdecs = self._parse_records(data) + t_decode_total += (time.time() - t_decode_start) + # Filter by magnitude - mag_mask = mags < mag_limit - - # Collect stars - for idx in np.where(mag_mask)[0]: - all_stars.append((ras[idx], decs[idx], mags[idx], pmras[idx], pmdecs[idx])) + mask = mags < mag_limit + + if np.any(mask): + all_ras.append(ras[mask]) + all_decs.append(decs[mask]) + all_mags.append(mags[mask]) + all_pmras.append(pmras[mask]) + all_pmdecs.append(pmdecs[mask]) i = j + if not all_ras: + return np.empty((0, 3)) + + # Concatenate all arrays + t_concat_start = time.time() + ras_final = np.concatenate(all_ras) + decs_final = np.concatenate(all_decs) + mags_final = np.concatenate(all_mags) + pmras_final = np.concatenate(all_pmras) + pmdecs_final = np.concatenate(all_pmdecs) + t_concat = (time.time() - t_concat_start) * 1000 + # Apply proper motion - return self._apply_proper_motion(all_stars) + t_pm_start = time.time() + result = self._apply_proper_motion((ras_final, decs_final, mags_final, pmras_final, pmdecs_final)) + t_pm = (time.time() - t_pm_start) * 1000 + + # Log performance breakdown + t_io_total = (time.time() - t_io_start) * 1000 + logger.info( + f">>> Tile I/O performance for mag {mag_min}-{mag_max}: " + f"total={t_io_total:.1f}ms, decompress={t_decompress_total*1000:.1f}ms, " + f"decode={t_decode_total*1000:.1f}ms, concat={t_concat:.1f}ms, pm={t_pm:.1f}ms, " + f"bytes={bytes_read/1024:.1f}KB, stars={len(result)}" + ) + + return result def _load_tiles_batch( self, tile_ids: List[int], mag_limit: float - ) -> List[Tuple[float, float, float]]: + ) -> np.ndarray: """ Batch load multiple tiles efficiently (compact format only) Much faster than loading tiles one-by-one due to reduced I/O overhead @@ -1089,14 +1734,18 @@ def _load_tiles_batch( mag_limit: Maximum magnitude Returns: - List of (ra, dec, mag) tuples (already PM-corrected) + Numpy array of shape (N, 3) containing (ra, dec, mag) """ assert self.metadata is not None, "metadata must be loaded before calling _load_tiles_batch" if not _HEALPY_AVAILABLE: - return [] + return np.empty((0, 3)) - all_stars = [] + all_ras = [] + all_decs = [] + all_mags = [] + all_pmras = [] + all_pmdecs = [] logger.info(f"_load_tiles_batch: Starting batch load of {len(tile_ids)} tiles") @@ -1197,52 +1846,40 @@ def _load_tiles_batch( if compressed_size: import zlib compressed_data = chunk_data[tile_offset:tile_offset + compressed_size] - logger.info(f"_load_tiles_batch: Decompressing tile {tile_id}, {compressed_size} → {size} bytes") + # logger.info(f"_load_tiles_batch: Decompressing tile {tile_id}, {compressed_size} → {size} bytes") data = zlib.decompress(compressed_data) else: data = chunk_data[tile_offset:tile_offset + size] - # VECTORIZED: Parse all star records at once using numpy - num_records = len(data) // STAR_RECORD_SIZE - logger.info(f"_load_tiles_batch: Decoding {num_records} stars from tile {tile_id} (vectorized)") - - records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - - # Mask healpix to 24 bits - healpix_pixels = records['healpix'] & 0xFFFFFF - - # VECTORIZED: Get all pixel centers at once (healpy handles arrays efficiently) - pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) - - # Calculate pixel size once (not per star!) - pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 - - # VECTORIZED: Decode all offsets at once - ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec - dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - - # VECTORIZED: Calculate final positions - decs = pixel_decs + dec_offset_arcsec / 3600.0 - ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) - - # VECTORIZED: Decode magnitudes and proper motions - mags = records['mag'] / 10.0 - pmras = records['pmra'] * 50 - pmdecs = records['pmdec'] * 50 - - # VECTORIZED: Filter by magnitude - mag_mask = mags <= mag_limit + # Parse records using shared helper + ras, decs, mags, pmras, pmdecs = self._parse_records(data) - # Collect stars that pass magnitude filter - for i in np.where(mag_mask)[0]: - all_stars.append((ras[i], decs[i], mags[i], pmras[i], pmdecs[i])) + # Filter by magnitude + mask = mags <= mag_limit + + if np.any(mask): + all_ras.append(ras[mask]) + all_decs.append(decs[mask]) + all_mags.append(mags[mask]) + all_pmras.append(pmras[mask]) + all_pmdecs.append(pmdecs[mask]) # Move to next chunk i = j - logger.info(f"_load_tiles_batch: Loaded {len(all_stars)} stars total, applying proper motion") + logger.info(f"_load_tiles_batch: Loaded {len(all_ras)} batches of stars, applying proper motion") + + if not all_ras: + return np.empty((0, 3)) + + # Concatenate all arrays + ras_final = np.concatenate(all_ras) + decs_final = np.concatenate(all_decs) + mags_final = np.concatenate(all_mags) + pmras_final = np.concatenate(all_pmras) + pmdecs_final = np.concatenate(all_pmdecs) + # Apply proper motion - result = self._apply_proper_motion(all_stars) + result = self._apply_proper_motion((ras_final, decs_final, mags_final, pmras_final, pmdecs_final)) logger.info(f"_load_tiles_batch: Complete, returning {len(result)} stars") return result diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 1e25e4d87..2a0138859 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -59,7 +59,7 @@ def __init__(self, *args, **kwargs): self._chart_generator = None # Active generator for progressive chart updates self._is_showing_loading_chart = False # Track if showing "Loading..." for deep chart self._force_deep_chart = False # Toggle: force deep chart even if POSS image exists - self._is_deep_chart = False # Track if currently showing a deep chart (auto or forced) + self._force_deep_chart = False # Toggle: force deep chart even if POSS image exists # Default Marking Menu self._default_marking_menu = MarkingMenu( @@ -169,7 +169,7 @@ def update_object_info(self): """ Generates object text and loads object images """ - logger.info(f">>> update_object_info() called for {self.object.display_name if self.object else 'None'}") + # logger.info(f">>> update_object_info() called for {self.object.display_name if self.object else 'None'}") # CRITICAL: Clear loading flag at START to prevent recursive update() calls # during generator consumption. If we don't do this, calling self.update() @@ -299,7 +299,12 @@ def update_object_info(self): # Detect if we're showing a deep chart (forced or automatic due to no POSS image) # Deep charts are identified by the is_loading_placeholder attribute (loading or False) - self._is_deep_chart = ( + # self._is_deep_chart is now a property + pass + + @property + def _is_deep_chart(self): + return ( self.object_image is not None and hasattr(self.object_image, 'is_loading_placeholder') and self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART] @@ -346,31 +351,52 @@ def _get_pulse_factor(self): def _draw_crosshair_simple(self, pulse=False): """ - Draw simple crosshair with 4 lines and center gap + Draw simple crosshair with 4 lines and center gap using inverted pixels Args: pulse: If True, apply pulsation effect """ + import numpy as np + width, height = self.display_class.resolution - cx, cy = width / 2.0, height / 2.0 + cx, cy = int(width / 2.0), int(height / 2.0) if pulse: - _, size_mult, color_intensity = self._get_pulse_factor() + _, size_mult, _ = self._get_pulse_factor() # Size pulsates from 6 down to 3 pixels (inverted - more steps) - outer = 6.0 - (3.0 * size_mult) # 6.0 down to 3.0 (more visible integer steps) - marker_color = self.colors.get(color_intensity) + outer = int(6.0 - (3.0 * size_mult)) # 6.0 down to 3.0 (more visible integer steps) else: - # Fixed size and brightness + # Fixed size outer = 4 - marker_color = self.colors.get(64) inner = 2 # Fixed gap (small center) - # Crosshair outline (4 short lines with gap in middle) - self.draw.line([cx - outer, cy, cx - inner, cy], fill=marker_color, width=1) # Left - self.draw.line([cx + inner, cy, cx + outer, cy], fill=marker_color, width=1) # Right - self.draw.line([cx, cy - outer, cx, cy - inner], fill=marker_color, width=1) # Top - self.draw.line([cx, cy + inner, cx, cy + outer], fill=marker_color, width=1) # Bottom + # Get screen buffer as numpy array for pixel inversion + pixels = np.array(self.screen) + + # Invert pixels to create crosshair (always visible on any background) + # Horizontal lines (left and right of center) + for x in range(max(0, cx - outer), max(0, cx - inner)): + if 0 <= x < width and 0 <= cy < height: + pixels[cy, x, 0] = 255 - pixels[cy, x, 0] # Invert red channel + for x in range(min(width, cx + inner), min(width, cx + outer + 1)): + if 0 <= x < width and 0 <= cy < height: + pixels[cy, x, 0] = 255 - pixels[cy, x, 0] # Invert red channel + + # Vertical lines (top and bottom of center) + for y in range(max(0, cy - outer), max(0, cy - inner)): + if 0 <= y < height and 0 <= cx < width: + pixels[y, cx, 0] = 255 - pixels[y, cx, 0] # Invert red channel + for y in range(min(height, cy + inner), min(height, cy + outer + 1)): + if 0 <= y < height and 0 <= cx < width: + pixels[y, cx, 0] = 255 - pixels[y, cx, 0] # Invert red channel + + # Update screen buffer with inverted pixels + from PIL import Image + self.screen = Image.fromarray(pixels, mode="RGB") + # Re-create draw object since we replaced the image + from PIL import ImageDraw + self.draw = ImageDraw.Draw(self.screen) def _draw_crosshair_circle(self, pulse=False): """ @@ -684,12 +710,20 @@ def update(self, force=True): if hasattr(self, '_chart_generator') and self._chart_generator is not None: try: next_image = next(self._chart_generator) - logger.info(f">>> update(): Consumed next chart yield: {type(next_image)}") + # logger.debug(f">>> update(): Consumed next chart yield: {type(next_image)}") self.object_image = next_image force = True # Force screen update for progressive chart except StopIteration: logger.info(">>> update(): Chart generator exhausted") self._chart_generator = None # Generator exhausted + + # Update loading flag based on current image + if self.object_image is not None: + self._is_showing_loading_chart = ( + hasattr(self.object_image, 'is_loading_placeholder') + and self.object_image.is_loading_placeholder + and self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART] + ) # Check if we're showing "Loading..." for a deep chart # and if catalog is now ready, regenerate the image @@ -700,11 +734,11 @@ def update(self, force=True): # Use cached chart generator to preserve catalog state chart_gen = self._get_chart_generator() state = chart_gen.get_catalog_state() - logger.info(f">>> Update check: catalog state = {state}") + # logger.debug(f">>> Update check: catalog state = {state}") if state == CatalogState.READY: # Catalog ready! Regenerate display - logger.info(">>> Catalog READY! Regenerating image...") + # logger.info(">>> Catalog READY! Regenerating image...") self._is_showing_loading_chart = False self.update_object_info() force = True # Force screen update @@ -715,13 +749,14 @@ def update(self, force=True): self.clear_screen() # paste image - logger.info(f">>> update(): object_display_mode={self.object_display_mode}, DM_POSS={DM_POSS}, DM_SDSS={DM_SDSS}, DM_CHART={DM_CHART}, will_paste={self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART]}") - logger.info(f">>> update(): object_image type={type(self.object_image)}, size={self.object_image.size if self.object_image else None}") + # paste image + # logger.debug(f">>> update(): object_display_mode={self.object_display_mode}...") + # logger.debug(f">>> update(): object_image type={type(self.object_image)}...") # DEBUG: Check if object_image has the is_loading_placeholder attribute (indicates it's a chart) if self.object_image: is_chart = hasattr(self.object_image, 'is_loading_placeholder') - logger.info(f">>> update(): object_image has is_loading_placeholder={is_chart}, _force_deep_chart={self._force_deep_chart}") + # logger.debug(f">>> update(): object_image has is_loading_placeholder={is_chart}...") if self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART]: # DEBUG: Check if image has any non-black pixels @@ -730,23 +765,13 @@ def update(self, force=True): img_array = np.array(self.object_image) non_zero = np.count_nonzero(img_array) max_val = np.max(img_array) - logger.info(f">>> CHART IMAGE DEBUG: non-zero pixels={non_zero}, max_value={max_val}, shape={img_array.shape}") + # logger.debug(f">>> CHART IMAGE DEBUG: non-zero pixels={non_zero}, max_value={max_val}, shape={img_array.shape}") self.screen.paste(self.object_image) - logger.info(f">>> Image pasted to screen") + # logger.debug(f">>> Image pasted to screen") # DEBUG: Save screen buffer to file for inspection - if self.object_display_mode == DM_CHART and self._force_deep_chart: - try: - import os - debug_path = "/tmp/pifinder_chart_debug.png" - self.object_image.save(debug_path) - logger.info(f">>> SAVED object_image to {debug_path}") - screen_path = "/tmp/pifinder_screen_debug.png" - self.screen.save(screen_path) - logger.info(f">>> SAVED screen buffer to {screen_path}") - except Exception as e: - logger.error(f">>> Failed to save debug images: {e}") + # (Removed per user request) # If showing deep chart, draw crosshair based on config if self._force_deep_chart and self.object_image is not None: diff --git a/python/tests/test_bloom_filter.py b/python/tests/test_bloom_filter.py new file mode 100644 index 000000000..3f4be6b36 --- /dev/null +++ b/python/tests/test_bloom_filter.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python3 +""" +Unit tests for TileBloomFilter implementation. + +Tests cover: +- Basic functionality (add, might_contain) +- False positive rate validation +- Save/load persistence +- Edge cases (empty filter, large capacity) +- Integration with star catalog +""" + +import math +import tempfile +from pathlib import Path + +import pytest + +from PiFinder.star_catalog import TileBloomFilter + + +class TestTileBloomFilterBasics: + """Test basic bloom filter operations.""" + + def test_empty_filter(self): + """Test empty filter returns False for all queries.""" + bloom = TileBloomFilter(capacity=100, fp_rate=0.01) + + # Empty filter should never contain anything + assert not bloom.might_contain(1) + assert not bloom.might_contain(100) + assert not bloom.might_contain(999999) + + def test_single_item(self): + """Test filter with single item.""" + bloom = TileBloomFilter(capacity=100, fp_rate=0.01) + + bloom.add(42) + + # Should definitely contain the added item + assert bloom.might_contain(42) + + # Should not contain other items (with high probability) + # Note: Can't guarantee 100% due to false positives, but very unlikely for single item + assert not bloom.might_contain(1) + assert not bloom.might_contain(43) + + def test_multiple_items(self): + """Test filter with multiple items.""" + bloom = TileBloomFilter(capacity=100, fp_rate=0.01) + + items = [10, 20, 30, 40, 50] + for item in items: + bloom.add(item) + + # All added items should be present + for item in items: + assert bloom.might_contain(item), f"Item {item} should be in filter" + + # Items not added should mostly not be present + # (allowing for false positives) + not_added = [11, 21, 31, 41, 51, 100, 200, 300] + false_positives = sum(1 for item in not_added if bloom.might_contain(item)) + + # With 1% FP rate and 8 queries, expect ~0.08 false positives + # Allow up to 3 for statistical variation + assert false_positives <= 3, f"Too many false positives: {false_positives}/8" + + def test_large_dataset(self): + """Test filter with many items.""" + capacity = 1000 + bloom = TileBloomFilter(capacity=capacity, fp_rate=0.01) + + # Add 1000 items + items = list(range(1000, 2000)) + for item in items: + bloom.add(item) + + # All added items should be present + for item in items: + assert bloom.might_contain(item) + + # Check false positive rate on items not added + not_added = list(range(3000, 4000)) # 1000 items + false_positives = sum(1 for item in not_added if bloom.might_contain(item)) + actual_fp_rate = false_positives / len(not_added) + + # Should be close to 1% (allow 0-3% due to statistical variation) + assert 0 <= actual_fp_rate <= 0.03, f"FP rate {actual_fp_rate:.2%} outside expected range" + + def test_duplicate_adds(self): + """Test that adding same item multiple times doesn't break filter.""" + bloom = TileBloomFilter(capacity=100, fp_rate=0.01) + + # Add same item multiple times + for _ in range(10): + bloom.add(42) + + # Should still contain the item + assert bloom.might_contain(42) + + # Should not affect other items + assert not bloom.might_contain(43) + + +class TestTileBloomFilterMath: + """Test bloom filter mathematical properties.""" + + def test_optimal_bit_calculation(self): + """Test optimal bit array size calculation.""" + # Formula: m = -(n * ln(p)) / (ln(2)^2) + # For n=1000, p=0.01: m ≈ 9586 bits + + bits = TileBloomFilter._optimal_num_bits(1000, 0.01) + + expected = -(1000 * math.log(0.01)) / (math.log(2) ** 2) + assert abs(bits - expected) < 1 # Allow for rounding + + def test_optimal_hash_calculation(self): + """Test optimal number of hash functions calculation.""" + # Formula: k = (m/n) * ln(2) + # For m=9586, n=1000: k ≈ 7 + + num_hashes = TileBloomFilter._optimal_num_hashes(9586, 1000) + + expected = (9586 / 1000) * math.log(2) + assert abs(num_hashes - expected) < 1 + + # Should always have at least 1 hash function + assert num_hashes >= 1 + + def test_actual_fp_rate_calculation(self): + """Test actual false positive rate calculation.""" + bloom = TileBloomFilter(capacity=1000, fp_rate=0.01) + + actual_fp = bloom.get_actual_fp_rate() + + # Should be close to configured 1% + assert actual_fp is not None + assert 0.005 <= actual_fp <= 0.015 # Within 0.5%-1.5% + + def test_zero_capacity(self): + """Test filter with zero capacity.""" + bloom = TileBloomFilter(capacity=0, fp_rate=0.01) + + # get_actual_fp_rate should return None for empty filter + assert bloom.get_actual_fp_rate() is None + + +class TestTileBloomFilterPersistence: + """Test bloom filter save/load functionality.""" + + def test_save_and_load(self): + """Test saving and loading bloom filter.""" + # Create and populate filter + bloom1 = TileBloomFilter(capacity=100, fp_rate=0.01) + items = [10, 20, 30, 40, 50] + for item in items: + bloom1.add(item) + + # Save to temporary file + with tempfile.NamedTemporaryFile(suffix=".bin", delete=False) as f: + temp_path = Path(f.name) + + try: + bloom1.save(temp_path) + + # Load from file + bloom2 = TileBloomFilter.load(temp_path) + + # Check all properties match + assert bloom2.capacity == bloom1.capacity + assert bloom2.fp_rate == bloom1.fp_rate + assert bloom2.num_bits == bloom1.num_bits + assert bloom2.num_hashes == bloom1.num_hashes + assert bloom2.bit_array == bloom1.bit_array + + # Check functionality preserved + for item in items: + assert bloom2.might_contain(item), f"Item {item} should be in loaded filter" + + assert not bloom2.might_contain(99) + + finally: + temp_path.unlink(missing_ok=True) + + def test_load_nonexistent_file(self): + """Test loading from non-existent file.""" + with pytest.raises(FileNotFoundError): + TileBloomFilter.load(Path("/nonexistent/bloom.bin")) + + def test_load_corrupted_file(self): + """Test loading from corrupted file.""" + # Create corrupted file (too small) + with tempfile.NamedTemporaryFile(suffix=".bin", delete=False) as f: + temp_path = Path(f.name) + f.write(b"corrupted") + + try: + with pytest.raises(ValueError, match="too small"): + TileBloomFilter.load(temp_path) + finally: + temp_path.unlink(missing_ok=True) + + def test_save_creates_valid_format(self): + """Test that saved file has correct binary format.""" + bloom = TileBloomFilter(capacity=100, fp_rate=0.02) + bloom.add(42) + + with tempfile.NamedTemporaryFile(suffix=".bin", delete=False) as f: + temp_path = Path(f.name) + + try: + bloom.save(temp_path) + + # Verify file structure + with open(temp_path, "rb") as f: + # Header should be 24 bytes + import struct + header = f.read(24) + assert len(header) == 24 + + version, capacity, fp_rate, num_bits, num_hashes = struct.unpack(' 3_000_000 # ~3 MB + + def test_high_fp_rate(self): + """Test filter with high false positive rate (10%).""" + bloom = TileBloomFilter(capacity=100, fp_rate=0.10) + + items = list(range(100)) + for item in items: + bloom.add(item) + + # Should still contain all added items + for item in items: + assert bloom.might_contain(item) + + # FP rate should be close to 10% + actual_fp = bloom.get_actual_fp_rate() + assert 0.05 <= actual_fp <= 0.15 # Allow 5-15% range + + def test_low_fp_rate(self): + """Test filter with very low false positive rate (0.1%).""" + bloom = TileBloomFilter(capacity=100, fp_rate=0.001) + + items = list(range(100)) + for item in items: + bloom.add(item) + + # Should contain all added items + for item in items: + assert bloom.might_contain(item) + + # Bit array should be larger (lower FP rate = more bits) + bloom_high_fp = TileBloomFilter(capacity=100, fp_rate=0.01) + assert len(bloom.bit_array) > len(bloom_high_fp.bit_array) + + def test_tile_id_range(self): + """Test with realistic HEALPix tile IDs.""" + # HEALPix nside=512 has 3,145,728 tiles + # Tile IDs range from 0 to 3,145,727 + bloom = TileBloomFilter(capacity=10000, fp_rate=0.01) + + # Add some tiles from different parts of the sky + tiles = [0, 1, 100, 1000, 10000, 100000, 1000000, 3145727] + for tile_id in tiles: + bloom.add(tile_id) + + # All should be present + for tile_id in tiles: + assert bloom.might_contain(tile_id), f"Tile {tile_id} should be in filter" + + def test_hash_distribution(self): + """Test that hash function distributes items evenly.""" + bloom = TileBloomFilter(capacity=1000, fp_rate=0.01) + + # Add 1000 sequential tile IDs + for tile_id in range(1000): + bloom.add(tile_id) + + # Count set bits + set_bits = sum( + 1 for byte in bloom.bit_array + for bit in range(8) + if byte & (1 << bit) + ) + + # With good hash distribution, ~50-70% of bits should be set + # (depends on num_hashes and capacity) + bit_fill_ratio = set_bits / bloom.num_bits + assert 0.3 <= bit_fill_ratio <= 0.8, f"Bit fill ratio {bit_fill_ratio:.2%} suggests poor distribution" + + +class TestTileBloomFilterIntegration: + """Test integration with star catalog use cases.""" + + def test_sparse_sky_coverage(self): + """Test bloom filter behavior with sparse tile coverage (like mag 0-6).""" + # Mag 0-6 has only 6,465 tiles out of 3.1M possible + # Most queries will be for non-existent tiles + bloom = TileBloomFilter(capacity=6465, fp_rate=0.01) + + # Add actual tiles (scattered across sky) + actual_tiles = [i * 500 for i in range(6465)] # Sparse distribution + for tile_id in actual_tiles: + bloom.add(tile_id) + + # Query for tiles in a typical FOV (48 tiles) + query_tiles = list(range(1000, 1048)) # Probably no bright stars here + + # Most should be filtered out (not in bloom filter) + passed = [t for t in query_tiles if bloom.might_contain(t)] + + # Expect ~1% false positive rate: 48 * 0.01 = 0.48, so 0-2 tiles + assert len(passed) <= 3, f"Too many tiles passed filter: {len(passed)}/48" + + def test_dense_sky_coverage(self): + """Test bloom filter behavior with dense tile coverage (like mag 14-17).""" + # Mag 14-17 has 3.1M tiles (98% coverage) + # Most queries will find tiles + bloom = TileBloomFilter(capacity=3_100_000, fp_rate=0.01) + + # Add most tiles (simulating 98% coverage) + # For testing, add every tile except multiples of 50 + for tile_id in range(3_100_000): + if tile_id % 50 != 0: + bloom.add(tile_id) + + # Query for tiles in a typical FOV (48 tiles) + query_tiles = list(range(1000000, 1000048)) + + # Most should pass filter (they exist) + passed = [t for t in query_tiles if bloom.might_contain(t)] + + # Should pass most tiles (minus the 2% that don't exist + some FP) + assert len(passed) >= 44, f"Too few tiles passed: {len(passed)}/48" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/python/tests/test_star_catalog.py b/python/tests/test_star_catalog.py new file mode 100644 index 000000000..883450da9 --- /dev/null +++ b/python/tests/test_star_catalog.py @@ -0,0 +1,138 @@ + +import unittest +import numpy as np +import struct +import os +import tempfile +import shutil +from pathlib import Path +from unittest.mock import MagicMock, patch +import sys + +from PiFinder.star_catalog import DeepStarCatalog, STAR_RECORD_DTYPE, STAR_RECORD_SIZE + +class TestDeepStarCatalog(unittest.TestCase): + def setUp(self): + self.test_dir = tempfile.mkdtemp() + self.catalog_path = Path(self.test_dir) + self.catalog = DeepStarCatalog(str(self.catalog_path)) + self.catalog.nside = 512 + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def test_parse_records(self): + # Create a fake star record + # Format: Date: Mon, 24 Nov 2025 12:57:13 +0100 Subject: [PATCH 22/27] Compressed indexes and disabling bloom --- python/PiFinder/deep_chart.py | 181 +++++++++++++- python/PiFinder/gps_ubx_parser.py | 2 +- python/PiFinder/star_catalog.py | 356 ++++++++++++++++++--------- python/PiFinder/ui/object_details.py | 12 +- 4 files changed, 419 insertions(+), 132 deletions(-) diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index 9a21af256..864f67dda 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -202,14 +202,39 @@ def generate_chart( image_rotate += roll # Progressive rendering: Yield image after each magnitude band loads + # Use INCREMENTAL rendering to avoid re-rendering all stars on each band final_image = None + base_image = None # Accumulate star rendering incrementally + previous_star_count = 0 + for stars, is_complete in stars_generator: t_render_start = time.time() - # Render chart with rotation applied to star coordinates - image = self.render_chart( - stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query - ) + # Incremental rendering: only render NEW stars from this band + new_star_count = len(stars) - previous_star_count + + if new_star_count > 0 or base_image is None: + # Render incrementally: pass base_image and only new stars + # render_chart will draw new stars onto the base + new_stars = stars[previous_star_count:] if previous_star_count > 0 else stars + base_image = self.render_chart_incremental( + all_stars=stars, # All stars for intensity scaling + new_stars=new_stars, # Only new stars to draw + base_image=base_image, # Existing image or None + center_ra=catalog_object.ra, + center_dec=catalog_object.dec, + fov=fov, + resolution=resolution, + magnification=mag, + rotation=image_rotate, + mag_limit=mag_limit_query + ) + logger.info(f"PROGRESSIVE: Incremental render of {new_star_count} new stars (total {len(stars)})") + else: + logger.info(f"PROGRESSIVE: No new stars in this band (total {len(stars)})") + + previous_star_count = len(stars) + image = base_image.copy() # Work on a copy for overlays # Add FOV circle BEFORE text overlays so it appears behind them if burn_in and display_class is not None: @@ -237,8 +262,8 @@ def generate_chart( t_render_end = time.time() logger.info( - f"PROGRESSIVE: Rendered {len(stars)} stars in {(t_render_end-t_render_start)*1000:.1f}ms " - f"(complete={is_complete}, mag_limit={mag_limit_query:.1f})" + f"PROGRESSIVE: Total render time {(t_render_end-t_render_start)*1000:.1f}ms " + f"(complete={is_complete}, total_stars={len(stars)})" ) final_image = image @@ -459,6 +484,150 @@ def render_chart( return image + def render_chart_incremental( + self, + all_stars: np.ndarray, + new_stars: np.ndarray, + base_image: Optional[Image.Image], + center_ra: float, + center_dec: float, + fov: float, + resolution: Tuple[int, int], + magnification: float = 50.0, + rotation: float = 0.0, + mag_limit: float = 17.0, + ) -> Image.Image: + """ + Incrementally render new stars onto existing base image. + Uses intensity scaling from ALL stars to maintain consistent brightness. + + Args: + all_stars: All stars loaded so far (for intensity scaling) + new_stars: Only the new stars to render + base_image: Existing image to draw onto (None for first render) + center_ra: Center RA in degrees + center_dec: Center Dec in degrees + fov: Field of view in degrees + resolution: (width, height) tuple + magnification: Magnification factor + rotation: Rotation angle in degrees + mag_limit: Limiting magnitude + + Returns: + PIL Image with new stars added + """ + import time + t_start = time.time() + + width, height = resolution + + # Start with base image or create new blank one + if base_image is None: + image_array = np.zeros((height, width, 3), dtype=np.uint8) + else: + image_array = np.array(base_image) + + logger.info(f"Render Chart INCREMENTAL: {len(new_stars)} new stars, {len(all_stars)} total") + + if len(new_stars) == 0: + return Image.fromarray(image_array, mode="RGB") + + # Calculate intensity scaling from ALL stars (for consistency across bands) + all_mags = all_stars[:, 2] + brightest_mag = np.min(all_mags) + faintest_mag = np.max(all_mags) + + # Convert new stars to numpy arrays + ra_arr = new_stars[:, 0] + dec_arr = new_stars[:, 1] + mag_arr = new_stars[:, 2] + + # Projection (same as render_chart) + center_ra_rad = np.radians(center_ra) + center_dec_rad = np.radians(center_dec) + ra_rad = np.radians(ra_arr) + dec_rad = np.radians(dec_arr) + + cos_center_dec = np.cos(center_dec_rad) + + dra = ra_rad - center_ra_rad + dra = np.where(dra > np.pi, dra - 2*np.pi, dra) + dra = np.where(dra < -np.pi, dra + 2*np.pi, dra) + ddec = dec_rad - center_dec_rad + + x_proj = dra * cos_center_dec + y_proj = ddec + + pixel_scale = width / np.radians(fov) + + x_screen = width / 2.0 - x_proj * pixel_scale + y_screen = height / 2.0 - y_proj * pixel_scale + + # Apply rotation + if rotation != 0: + rot_rad = np.radians(rotation) + cos_rot = np.cos(rot_rad) + sin_rot = np.sin(rot_rad) + + center_x = width / 2.0 + center_y = height / 2.0 + x_rel = x_screen - center_x + y_rel = y_screen - center_y + + x_rotated = x_rel * cos_rot - y_rel * sin_rot + y_rotated = x_rel * sin_rot + y_rel * cos_rot + + x_screen = x_rotated + center_x + y_screen = y_rotated + center_y + + # Filter visible stars + mask = ( + (x_screen >= 0) + & (x_screen < width) + & (y_screen >= 0) + & (y_screen < height) + ) + + x_visible = x_screen[mask] + y_visible = y_screen[mask] + mag_visible = mag_arr[mask] + + logger.info(f"Render Chart INCREMENTAL: {len(x_visible)} of {len(new_stars)} new stars visible") + + # Calculate intensities using GLOBAL magnitude range (from all_stars) + if len(mag_visible) == 0: + intensities = np.array([]) + elif faintest_mag - brightest_mag < 0.1: + intensities = np.full_like(mag_visible, 255, dtype=int) + else: + # Use global magnitude range for consistent scaling + intensities = 255 - ((mag_visible - brightest_mag) / (faintest_mag - brightest_mag) * 205) + intensities = intensities.astype(int) + + # Draw new stars + ix = np.round(x_visible).astype(int) + iy = np.round(y_visible).astype(int) + + for i in range(len(ix)): + px = ix[i] + py = iy[i] + intensity = intensities[i] + + if 0 <= px < width and 0 <= py < height: + image_array[py, px, 0] = min(255, image_array[py, px, 0] + intensity) + + np.clip(image_array[:, :, 0], 0, 255, out=image_array[:, :, 0]) + + image = Image.fromarray(image_array, mode="RGB") + + # Tag as deep chart + image.is_loading_placeholder = False # type: ignore[attr-defined] + + t_end = time.time() + logger.debug(f" Incremental render time: {(t_end-t_start)*1000:.1f}ms") + + return image + def _draw_star_antialiased_fast(self, image_array, ix, iy, fx, fy, intensity): """ diff --git a/python/PiFinder/gps_ubx_parser.py b/python/PiFinder/gps_ubx_parser.py index 5627b1af6..7c14a760c 100644 --- a/python/PiFinder/gps_ubx_parser.py +++ b/python/PiFinder/gps_ubx_parser.py @@ -159,7 +159,7 @@ async def connect(cls, log_queue, host="127.0.0.1", port=2947, max_attempts=5): async def from_file(cls, file_path: str): """Create a UBXParser instance from a file.""" f = await aiofiles.open(file_path, "rb") - return cls(log_queue=None, reader=f, file_path=file_path) # type:ignore[arg-type] + return cls(log_queue=None, reader=f, file_path=file_path) # type: ignore[arg-type] async def close(self): """Clean up resources and close the connection.""" diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index f43d11cac..f0306c913 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -16,6 +16,7 @@ import json import logging import math +import mmap import struct import threading import time @@ -67,6 +68,106 @@ class CatalogState(Enum): READY = 2 +class CompressedIndex: + """ + Memory-efficient compressed index reader with mmap support. + + Uses run-length encoding format: + - Header: version(4), num_tiles(4), num_runs(4) + - Run directory: [start_tile_id(4), data_offset(8)] per run (in RAM) + - Run data: [length(2), offset_base(8), sizes...] (mmap'd) + """ + + def __init__(self, index_file: Path): + """Load compressed index with run directory in memory""" + self.index_file = index_file + self.run_directory: List[Tuple[int, int]] = [] # (start_tile_id, data_offset) + + # Open file for mmap + self._file = open(index_file, 'rb') + self._mm = mmap.mmap(self._file.fileno(), 0, access=mmap.ACCESS_READ) + + # Read header + version, self.num_tiles, num_runs = struct.unpack_from(" Optional[Tuple[int, int]]: + """ + Get (offset, size) for a tile ID. + + Returns None if tile doesn't exist. + """ + # Binary search in run directory + left, right = 0, len(self.run_directory) - 1 + run_idx = -1 + + while left <= right: + mid = (left + right) // 2 + start_tile = self.run_directory[mid][0] + + # Check if tile is in this run + if mid < len(self.run_directory) - 1: + next_start = self.run_directory[mid + 1][0] + if start_tile <= tile_id < next_start: + run_idx = mid + break + else: + # Last run + if start_tile <= tile_id: + run_idx = mid + break + + if tile_id < start_tile: + right = mid - 1 + else: + left = mid + 1 + + if run_idx == -1: + return None + + # Read run data from mmap + start_tile, data_offset = self.run_directory[run_idx] + offset_in_run = tile_id - start_tile + + # Read run header + run_length, offset_base = struct.unpack_from("= run_length: + return None + + # Read sizes up to and including our tile + sizes_offset = data_offset + 10 # After length(2) + offset_base(8) + sizes_data = self._mm[sizes_offset:sizes_offset + (offset_in_run + 1) * 2] + sizes = struct.unpack(f"<{offset_in_run + 1}H", sizes_data) + + # Calculate tile offset and size + tile_offset = offset_base + sum(sizes[:-1]) + tile_size = sizes[-1] + + return (tile_offset, tile_size) + + def close(self): + """Close mmap and file""" + if self._mm: + self._mm.close() + if self._file: + self._file.close() + + def __del__(self): + """Cleanup on deletion""" + self.close() + + class TileBloomFilter: """ Bloom filter for tile existence checks. @@ -290,7 +391,6 @@ def __init__(self, catalog_path: str): self.observer_lat: Optional[float] = None self.limiting_magnitude: float = 12.0 self.visible_tiles: Optional[Set[int]] = None - self.spatial_index: Optional[Any] = None self.tile_cache: Dict[Tuple[int, float], np.ndarray] = {} self.cache_lock = threading.Lock() self.load_thread: Optional[threading.Thread] = None @@ -370,11 +470,14 @@ def _background_load_worker(self): logger.info(f">>> Catalog mag bands: {json.dumps(bands)}") # Preload all bloom filters into memory (~12 MB total) - # This eliminates on-demand loading delays during chart generation - self._preload_bloom_filters() + # DISABLED: Bloom filters not currently used (testing performance on Pi) + # self._preload_bloom_filters() + + # Preload all compressed indices (run directories) into memory (~2-12 MB total) + # This eliminates first-query delays (70ms per band → 420ms total stuttering) + self._preload_compressed_indices() # Initialize empty structures (no preloading) - self.spatial_index = {} self.visible_tiles = None # Load full sky on-demand # Mark ready @@ -389,46 +492,6 @@ def _background_load_worker(self): self.load_progress = f"Error: {str(e)}" self.state = CatalogState.NOT_LOADED - def _load_binary_spatial_index(self, bin_path: Path) -> dict: - """ - Load binary spatial index - - Binary format: - - Header: [version: 4][num_tiles: 4][nside: 4] - - Per tile: [tile_id: 4][num_bands: 1][bands: (mag_min:1, mag_max:1)*num_bands] - - Returns: - Dict mapping tile_id -> [(mag_min, mag_max), ...] - """ - with open(bin_path, "rb") as f: - # Read header - header = f.read(12) - version, num_tiles, nside = struct.unpack(" Optional[Set[int]]: """ @@ -522,10 +585,13 @@ def get_stars_for_fov_progressive( return # Calculate HEALPix tiles covering FOV + # fov_deg is the diagonal field width, query_disc expects radius + # For square FOV rotated arbitrarily, need circumscribed circle radius = diagonal/2 + # Add 10% margin to ensure edge tiles are fully covered vec = hp.ang2vec(ra_deg, dec_deg, lonlat=True) - radius_rad = np.radians(fov_deg * 0.85) + radius_rad = np.radians(fov_deg / 2 * 1.1) tiles = hp.query_disc(self.nside, vec, radius_rad) - logger.debug(f"HEALPix PROGRESSIVE: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") + logger.debug(f"HEALPix PROGRESSIVE: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° (radius={np.degrees(radius_rad):.3f}°) at nside={self.nside}") # Filter by visible hemisphere if self.visible_tiles: @@ -557,22 +623,35 @@ def get_stars_for_fov_progressive( ) # logger.info(f">>> _load_tiles_for_mag_band returned {len(band_stars)} stars") - t_band_end = time.time() - logger.debug(f"PROGRESSIVE: Mag band {mag_min}-{mag_max} loaded {len(band_stars)} stars in {(t_band_end-t_band_start)*1000:.1f}ms") + t_load = (time.time() - t_band_start) * 1000 # Add to cumulative list + t_append_start = time.time() if len(band_stars) > 0: all_stars_list.append(band_stars) + t_append = (time.time() - t_append_start) * 1000 # Yield current results (not complete yet unless this is the last band) is_last_band = mag_max >= mag_limit - + + t_concat_start = time.time() if all_stars_list: current_total = np.concatenate(all_stars_list) else: current_total = np.empty((0, 3)) - + t_concat = (time.time() - t_concat_start) * 1000 + + t_yield_start = time.time() yield (current_total, is_last_band) + t_yield = (time.time() - t_yield_start) * 1000 + + logger.info( + f">>> PROGRESSIVE TIMING: mag {mag_min}-{mag_max}: " + f"load={t_load:.1f}ms, append={t_append:.1f}ms, " + f"concat={t_concat:.1f}ms, yield={t_yield:.1f}ms, " + f"total={(t_load+t_append+t_concat+t_yield):.1f}ms, " + f"stars={len(band_stars)}, cumulative={len(current_total)}" + ) if is_last_band: break @@ -632,13 +711,13 @@ def get_stars_for_fov( return np.empty((0, 3)) # Calculate HEALPix tiles covering FOV - # Query larger area to account for rectangular screen and rotation - # Diagonal of square is sqrt(2) * side, with rotation could be any angle + # fov_deg is the diagonal field width, query_disc expects radius + # For square FOV rotated arbitrarily, need circumscribed circle radius = diagonal/2 + # Add 10% margin to ensure edge tiles are fully covered vec = hp.ang2vec(ra_deg, dec_deg, lonlat=True) - # Use full diagonal + margin to ensure corners are covered even when rotated - radius_rad = np.radians(fov_deg * 0.85) # sqrt(2)/2 ≈ 0.707, add extra for rotation + radius_rad = np.radians(fov_deg / 2 * 1.1) tiles = hp.query_disc(self.nside, vec, radius_rad) - logger.debug(f"HEALPix: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° at nside={self.nside}") + logger.debug(f"HEALPix: Querying {len(tiles)} tiles for FOV={fov_deg:.2f}° (radius={np.degrees(radius_rad):.3f}°) at nside={self.nside}") # Filter by visible hemisphere if self.visible_tiles: @@ -1169,6 +1248,61 @@ def _preload_bloom_filters(self) -> None: f"{total_bytes / 1024 / 1024:.1f} MB total in {t_total:.1f}ms" ) + def _preload_compressed_indices(self) -> None: + """ + Preload all compressed indices (run directories) into memory during startup. + + Loads compressed index run directories (~2-12 MB total) to eliminate first-query + delays during chart generation. Each compressed index loads its run directory + into RAM for fast binary search, while keeping run data in mmap. + + This runs in background thread during catalog startup and trades a one-time + ~200ms startup cost for eliminating 6 × 70ms = 420ms of stuttering during + first chart generation. + """ + if not self.metadata or "mag_bands" not in self.metadata: + logger.warning(">>> No metadata available, skipping compressed index preload") + return + + t0_total = time.time() + bands_loaded = 0 + + logger.info(">>> Preloading compressed indices for all magnitude bands...") + + for band_info in self.metadata["mag_bands"]: + mag_min = int(band_info["min"]) + mag_max = int(band_info["max"]) + cache_key = f"index_{mag_min}_{mag_max}" + + # Try compressed index first (v3) + index_file_v3 = self.catalog_path / f"mag_{mag_min:02d}_{mag_max:02d}" / "index_v3.bin" + + if not index_file_v3.exists(): + logger.debug( + f">>> Compressed index not found for {cache_key}: {index_file_v3} - " + f"Will fall back to v1/v2 index on first query" + ) + continue + + t0 = time.time() + self._index_cache[cache_key] = CompressedIndex(index_file_v3) + t_load = (time.time() - t0) * 1000 + + compressed_idx = self._index_cache[cache_key] + bands_loaded += 1 + + logger.info( + f">>> Loaded compressed index {cache_key}: " + f"{compressed_idx.num_tiles:,} tiles, {len(compressed_idx.run_directory):,} runs " + f"in {t_load:.1f}ms" + ) + + t_total = (time.time() - t0_total) * 1000 + logger.info( + f">>> Compressed index preload complete: {bands_loaded} indices " + f"in {t_total:.1f}ms" + ) + def _ensure_bloom_filter(self, cache_key: str, mag_min: int, mag_max: int) -> None: """ Ensure bloom filter is loaded for given magnitude band. @@ -1509,22 +1643,44 @@ def _load_tiles_batch_single_band( if not tiles_file.exists(): return np.empty((0, 3)) - # Load index cache_key = f"index_{mag_min}_{mag_max}" + + # Bloom filter pre-check: DISABLED for performance testing + # TODO: Re-enable after Pi performance comparison + # Saves ~4ms per query by checking bloom filter (0.24ms) vs compressed index (2.4ms) + # Trade-off: 12 MB RAM for bloom filters vs 4ms per query + # + # if cache_key in self._bloom_filters: + # bloom = self._bloom_filters[cache_key] + # has_any_tile = any(bloom.might_contain(tile_id) for tile_id in tile_ids) + # if not has_any_tile: + # logger.debug( + # f">>> Bloom filter: No tiles exist in {cache_key} for query region, " + # f"skipping band" + # ) + # return np.empty((0, 3)) + + # Load index - prefer compressed v3 format if not hasattr(self, '_index_cache'): self._index_cache = {} t_index_start = time.time() logger.info(f">>> Checking index cache for {cache_key}, in_cache={cache_key in self._index_cache}") if cache_key not in self._index_cache: - if index_file_bin.exists(): - logger.info(f">>> Reading binary index from {index_file_bin}") - # Pass needed tiles for optimization (only for initial load) - # This creates a PARTIAL index with only these tiles + # Try compressed index first (v3) + index_file_v3 = band_dir / "index_v3.bin" + if index_file_v3.exists(): + logger.info(f">>> Loading compressed index from {index_file_v3}") t0 = time.time() - self._index_cache[cache_key] = self._read_binary_index(index_file_bin, needed_tiles=set(tile_ids)) + self._index_cache[cache_key] = CompressedIndex(index_file_v3) t_read_index = (time.time() - t0) * 1000 - logger.info(f">>> Partial index loaded, {len(self._index_cache[cache_key])} tiles in cache in {t_read_index:.1f}ms") + logger.info(f">>> Compressed index loaded in {t_read_index:.1f}ms") + elif index_file_bin.exists(): + logger.info(f">>> Loading FULL index from {index_file_bin} (v1/v2 format)") + t0 = time.time() + self._index_cache[cache_key] = self._read_binary_index(index_file_bin, needed_tiles=None) + t_read_index = (time.time() - t0) * 1000 + logger.info(f">>> FULL index loaded, {len(self._index_cache[cache_key])} tiles in {t_read_index:.1f}ms") elif index_file_json.exists(): logger.info(f">>> Reading JSON index from {index_file_json}") with open(index_file_json, "r") as f: @@ -1534,55 +1690,7 @@ def _load_tiles_batch_single_band( logger.warning(f">>> No index file found for {cache_key}") return np.empty((0, 3)) else: - # Check if cached index has all the tiles we need - # If missing tiles, load just those tiles and merge into cache - index = self._index_cache[cache_key] - missing_tile_ids = [tid for tid in tile_ids if str(tid) not in index] - if missing_tile_ids: - # Use bloom filter to pre-screen tiles (fast, space-efficient) - # Load bloom filter if not already cached - self._ensure_bloom_filter(cache_key, mag_min, mag_max) - bloom = self._bloom_filters[cache_key] - - # Filter missing tiles through bloom filter - t0 = time.time() - tiles_to_load = [tid for tid in missing_tile_ids if bloom.might_contain(tid)] - t_bloom = (time.time() - t0) * 1000 - tiles_filtered_out = len(missing_tile_ids) - len(tiles_to_load) - - if tiles_filtered_out > 0: - logger.debug( - f">>> Bloom filter: {len(missing_tile_ids)} missing → " - f"{len(tiles_to_load)} candidates (filtered {tiles_filtered_out}) " - f"in {t_bloom:.2f}ms" - ) - - if tiles_to_load: - t0 = time.time() - logger.info(f">>> Cached index missing {len(tiles_to_load)}/{len(tile_ids)} tiles, loading them...") - if index_file_bin.exists(): - # Load only the missing tiles that actually exist - missing_index = self._read_binary_index(index_file_bin, needed_tiles=set(tiles_to_load)) - # Merge into existing cache - self._index_cache[cache_key].update(missing_index) - t_missing = (time.time() - t0) * 1000 - logger.info(f">>> Added {len(missing_index)} tiles to cache in {t_missing:.1f}ms, now {len(self._index_cache[cache_key])} tiles total") - - # Trim cache if it exceeds limit - self._trim_index_cache(cache_key, tile_ids) - elif index_file_json.exists(): - # For JSON, we have to load the full file (no selective loading) - with open(index_file_json, "r") as f: - full_index = json.load(f) - # Merge only the missing tiles that we're loading - for tid in tiles_to_load: - tile_key = str(tid) - if tile_key in full_index: - self._index_cache[cache_key][tile_key] = full_index[tile_key] - logger.info(f">>> Added missing tiles to cache, now {len(self._index_cache[cache_key])} tiles total") - - # Trim cache if it exceeds limit - self._trim_index_cache(cache_key, tile_ids) + logger.debug(f">>> Using cached index for {cache_key}") index = self._index_cache[cache_key] t_index_total = (time.time() - t_index_start) * 1000 @@ -1592,12 +1700,22 @@ def _load_tiles_batch_single_band( logger.debug(f">>> Building read_ops for {len(tile_ids)} tiles...") # Collect all tile read operations - read_ops = [] - for tile_id in tile_ids: - tile_key = str(tile_id) - if tile_key in index: - tile_info = index[tile_key] - read_ops.append((tile_id, tile_info)) + # Handle both CompressedIndex and dict formats + read_ops: List[Tuple[int, Dict[str, int]]] = [] + if isinstance(index, CompressedIndex): + # Compressed index: use .get() method + for tile_id in tile_ids: + tile_tuple = index.get(tile_id) + if tile_tuple: + offset, size = tile_tuple + read_ops.append((tile_id, {"offset": offset, "size": size})) + else: + # Dict-based index (v1/v2 or JSON) + for tile_id in tile_ids: + tile_key = str(tile_id) + if tile_key in index: + tile_info: Dict[str, int] = index[tile_key] + read_ops.append((tile_id, tile_info)) if not read_ops: logger.debug(f">>> No tiles to load (all {len(tile_ids)} requested tiles are empty)") @@ -1635,7 +1753,7 @@ def _load_tiles_batch_single_band( chunk_end = offset + tile_info.get("compressed_size", tile_info["size"]) # Find consecutive tiles for chunk reading - tiles_in_chunk = [(tile_id, tile_info)] + tiles_in_chunk: List[Tuple[int, Dict[str, int]]] = [(tile_id, tile_info)] j = i + 1 inner_iterations = 0 while j < len(read_ops): @@ -1683,7 +1801,7 @@ def _load_tiles_batch_single_band( t_decode_total += (time.time() - t_decode_start) # Filter by magnitude - mask = mags < mag_limit + mask = mags <= mag_limit if np.any(mask): all_ras.append(ras[mask]) diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 2a0138859..d1f711335 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -371,25 +371,25 @@ def _draw_crosshair_simple(self, pulse=False): inner = 2 # Fixed gap (small center) - # Get screen buffer as numpy array for pixel inversion + # Get screen buffer as numpy array for pixel manipulation pixels = np.array(self.screen) - # Invert pixels to create crosshair (always visible on any background) + # Invert crosshair pixels (red channel only) for visibility # Horizontal lines (left and right of center) for x in range(max(0, cx - outer), max(0, cx - inner)): if 0 <= x < width and 0 <= cy < height: - pixels[cy, x, 0] = 255 - pixels[cy, x, 0] # Invert red channel + pixels[cy, x, 0] = 255 - pixels[cy, x, 0] for x in range(min(width, cx + inner), min(width, cx + outer + 1)): if 0 <= x < width and 0 <= cy < height: - pixels[cy, x, 0] = 255 - pixels[cy, x, 0] # Invert red channel + pixels[cy, x, 0] = 255 - pixels[cy, x, 0] # Vertical lines (top and bottom of center) for y in range(max(0, cy - outer), max(0, cy - inner)): if 0 <= y < height and 0 <= cx < width: - pixels[y, cx, 0] = 255 - pixels[y, cx, 0] # Invert red channel + pixels[y, cx, 0] = 255 - pixels[y, cx, 0] for y in range(min(height, cy + inner), min(height, cy + outer + 1)): if 0 <= y < height and 0 <= cx < width: - pixels[y, cx, 0] = 255 - pixels[y, cx, 0] # Invert red channel + pixels[y, cx, 0] = 255 - pixels[y, cx, 0] # Update screen buffer with inverted pixels from PIL import Image From adec3f80f6e9075af05b106b7a3df8a679972de9 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Tue, 25 Nov 2025 12:32:59 +0100 Subject: [PATCH 23/27] fixed healpix boundaries bug --- python/PiFinder/deep_chart.py | 103 ++- python/PiFinder/star_catalog.py | 980 ++++++--------------------- python/PiFinder/ui/object_details.py | 5 +- python/tests/test_bloom_filter.py | 382 ----------- 4 files changed, 246 insertions(+), 1224 deletions(-) delete mode 100644 python/tests/test_bloom_filter.py diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index 864f67dda..0a768726b 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -202,39 +202,16 @@ def generate_chart( image_rotate += roll # Progressive rendering: Yield image after each magnitude band loads - # Use INCREMENTAL rendering to avoid re-rendering all stars on each band + # Re-render all stars each time (simple, correct, fast enough) final_image = None - base_image = None # Accumulate star rendering incrementally - previous_star_count = 0 for stars, is_complete in stars_generator: t_render_start = time.time() - # Incremental rendering: only render NEW stars from this band - new_star_count = len(stars) - previous_star_count - - if new_star_count > 0 or base_image is None: - # Render incrementally: pass base_image and only new stars - # render_chart will draw new stars onto the base - new_stars = stars[previous_star_count:] if previous_star_count > 0 else stars - base_image = self.render_chart_incremental( - all_stars=stars, # All stars for intensity scaling - new_stars=new_stars, # Only new stars to draw - base_image=base_image, # Existing image or None - center_ra=catalog_object.ra, - center_dec=catalog_object.dec, - fov=fov, - resolution=resolution, - magnification=mag, - rotation=image_rotate, - mag_limit=mag_limit_query - ) - logger.info(f"PROGRESSIVE: Incremental render of {new_star_count} new stars (total {len(stars)})") - else: - logger.info(f"PROGRESSIVE: No new stars in this band (total {len(stars)})") - - previous_star_count = len(stars) - image = base_image.copy() # Work on a copy for overlays + # Render ALL stars from scratch + image = self.render_chart( + stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query + ) # Add FOV circle BEFORE text overlays so it appears behind them if burn_in and display_class is not None: @@ -327,17 +304,6 @@ def render_chart( logger.info(f"Render Chart: {len(stars)} stars input, center=({center_ra:.4f}, {center_dec:.4f}), fov={fov:.4f}, res={resolution}") - if len(stars) == 0: - # Still draw crosshair even if no stars - cx, cy = width // 2, height // 2 - marker_color = (128, 0, 0) - size = 5 - draw.line([cx - size, cy, cx + size, cy], fill=marker_color, width=1) - draw.line([cx, cy - size, cx, cy + size], fill=marker_color, width=1) - return image - - # Convert to numpy arrays for vectorized operations - t1 = time.time() # stars is already a numpy array (N, 3) stars_array = stars ra_arr = stars_array[:, 0] @@ -423,24 +389,25 @@ def render_chart( logger.info(f"Render Chart: {len(x_visible)} stars visible on screen (of {len(stars)} total)") - # Scale brightness based on magnitude range in current field - # Brightest star in field → 255, faintest → 50 - # This auto-adjusts contrast for any FOV + # Scale brightness based on FIXED magnitude range + # Use brightest visible star and LIMITING MAGNITUDE (not faintest loaded star) + # This ensures consistent intensity scaling across progressive renders if len(mag_visible) == 0: intensities = np.array([]) else: brightest_mag = np.min(mag_visible) - faintest_mag = np.max(mag_visible) - - if faintest_mag - brightest_mag < 0.1: - # All stars same magnitude - use full brightness - intensities = np.full_like(mag_visible, 255, dtype=int) - else: - # Linear scaling from brightest (255) to faintest (50) - # Note: Lower magnitude = brighter star - intensities = 255 - ((mag_visible - brightest_mag) / (faintest_mag - brightest_mag) * 205) - intensities = intensities.astype(int) + faintest_mag = mag_limit # Use limiting magnitude, not max(mag_visible) + + # Always use proper magnitude scaling + # Linear scaling from brightest (255) to limiting magnitude (50) + # Note: Lower magnitude = brighter star + mag_range = faintest_mag - brightest_mag + if mag_range < 0.01: + mag_range = 0.01 # Avoid division by zero + + intensities = 255 - ((mag_visible - brightest_mag) / mag_range * 205) + intensities = np.clip(intensities, 50, 255).astype(int) # Render stars: crosses for bright ones, single pixels for faint t3 = time.time() @@ -456,7 +423,8 @@ def render_chart( # Draw all stars as single pixels (no crosses) if 0 <= px < width and 0 <= py < height: - image_array[py, px, 0] = min(255, image_array[py, px, 0] + intensity) + # Use max to avoid bright blobs from overlapping stars + image_array[py, px, 0] = max(image_array[py, px, 0], intensity) np.clip(image_array[:, :, 0], 0, 255, out=image_array[:, :, 0]) t5 = time.time() @@ -486,7 +454,6 @@ def render_chart( def render_chart_incremental( self, - all_stars: np.ndarray, new_stars: np.ndarray, base_image: Optional[Image.Image], center_ra: float, @@ -496,13 +463,14 @@ def render_chart_incremental( magnification: float = 50.0, rotation: float = 0.0, mag_limit: float = 17.0, + fixed_brightest_mag: Optional[float] = None, + fixed_faintest_mag: Optional[float] = None, ) -> Image.Image: """ Incrementally render new stars onto existing base image. - Uses intensity scaling from ALL stars to maintain consistent brightness. + Uses FIXED intensity scaling to maintain consistent brightness across bands. Args: - all_stars: All stars loaded so far (for intensity scaling) new_stars: Only the new stars to render base_image: Existing image to draw onto (None for first render) center_ra: Center RA in degrees @@ -512,6 +480,8 @@ def render_chart_incremental( magnification: Magnification factor rotation: Rotation angle in degrees mag_limit: Limiting magnitude + fixed_brightest_mag: Brightest magnitude for intensity scaling (from first band) + fixed_faintest_mag: Faintest magnitude for intensity scaling (limiting mag) Returns: PIL Image with new stars added @@ -527,15 +497,21 @@ def render_chart_incremental( else: image_array = np.array(base_image) - logger.info(f"Render Chart INCREMENTAL: {len(new_stars)} new stars, {len(all_stars)} total") + logger.info(f"Render Chart INCREMENTAL: {len(new_stars)} new stars") if len(new_stars) == 0: return Image.fromarray(image_array, mode="RGB") - # Calculate intensity scaling from ALL stars (for consistency across bands) - all_mags = all_stars[:, 2] - brightest_mag = np.min(all_mags) - faintest_mag = np.max(all_mags) + # Use FIXED intensity scaling (established from first band + limiting mag) + if fixed_brightest_mag is None or fixed_faintest_mag is None: + # Fallback: calculate from new stars only + new_mags = new_stars[:, 2] + brightest_mag = np.min(new_mags) + faintest_mag = np.max(new_mags) + logger.warning(f"INCREMENTAL: No fixed scale provided, using fallback: {brightest_mag:.2f} to {faintest_mag:.2f}") + else: + brightest_mag = fixed_brightest_mag + faintest_mag = fixed_faintest_mag # Convert new stars to numpy arrays ra_arr = new_stars[:, 0] @@ -614,7 +590,8 @@ def render_chart_incremental( intensity = intensities[i] if 0 <= px < width and 0 <= py < height: - image_array[py, px, 0] = min(255, image_array[py, px, 0] + intensity) + # Use max instead of add to avoid bright blobs from overlapping stars + image_array[py, px, 0] = max(image_array[py, px, 0], intensity) np.clip(image_array[:, :, 0], 0, 255, out=image_array[:, :, 0]) @@ -897,4 +874,4 @@ def get_cache_key(self, catalog_object) -> str: def invalidate_cache(self): """Clear chart cache (call when equipment changes)""" self.chart_cache.clear() - logger.debug("Chart cache invalidated") + logger.debug("Chart cache invalidated") \ No newline at end of file diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index f0306c913..95108baf3 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -12,7 +12,6 @@ - Proper motion corrections """ -import hashlib import json import logging import math @@ -38,19 +37,19 @@ logger = logging.getLogger("PiFinder.StarCatalog") -# Star record format (must match healpix_builder.py) -STAR_RECORD_FORMAT = " int: - """ - Calculate optimal bit array size. - - Formula: m = -(n * ln(p)) / (ln(2)^2) - - Args: - n: Number of elements (capacity) - p: Target false positive rate - - Returns: - Optimal number of bits - """ - m = -(n * math.log(p)) / (math.log(2) ** 2) - return int(m) - - @staticmethod - def _optimal_num_hashes(m: int, n: int) -> int: - """ - Calculate optimal number of hash functions. - - Formula: k = (m/n) * ln(2) - - Args: - m: Number of bits in array - n: Number of elements (capacity) - - Returns: - Optimal number of hash functions - """ - if n == 0: - return 1 # Avoid division by zero for empty filter - k = (m / n) * math.log(2) - return max(1, int(k)) - - def _hash(self, item: int, seed: int) -> int: - """ - Hash function using MD5 with seed mixing. - - Args: - item: Tile ID to hash - seed: Seed for this hash function (0 to k-1) - - Returns: - Bit position in range [0, num_bits) - """ - h = hashlib.md5(f"{item}:{seed}".encode()).digest() - return int.from_bytes(h[:4], 'little') % self.num_bits - - def add(self, tile_id: int) -> None: - """ - Add tile_id to bloom filter. - - Args: - tile_id: HEALPix tile ID to add - """ - for i in range(self.num_hashes): - bit_pos = self._hash(tile_id, i) - byte_pos = bit_pos // 8 - bit_offset = bit_pos % 8 - self.bit_array[byte_pos] |= (1 << bit_offset) - - def might_contain(self, tile_id: int) -> bool: - """ - Check if tile_id might exist in the set. - - Args: - tile_id: HEALPix tile ID to check - - Returns: - True: tile might exist (or false positive) - False: tile definitely does not exist - """ - for i in range(self.num_hashes): - bit_pos = self._hash(tile_id, i) - byte_pos = bit_pos // 8 - bit_offset = bit_pos % 8 - if not (self.bit_array[byte_pos] & (1 << bit_offset)): - return False # Definitely not in set - return True # Probably in set - - def save(self, path: Path) -> None: - """ - Save bloom filter to binary file. - - Format: - Header (24 bytes): - [version:4][capacity:4][fp_rate:8][num_bits:4][num_hashes:4] - Body (variable): - [bit_array:N] where N = (num_bits + 7) / 8 bytes - - Args: - path: Path to save bloom filter - """ - with open(path, 'wb') as f: - # Header: version, capacity, fp_rate, num_bits, num_hashes - f.write(struct.pack(' 'TileBloomFilter': - """ - Load bloom filter from binary file. - - Args: - path: Path to bloom filter file - - Returns: - Loaded TileBloomFilter instance - - Raises: - ValueError: If file format is unsupported or corrupted - FileNotFoundError: If file doesn't exist - """ - with open(path, 'rb') as f: - # Read header (24 bytes) - header = f.read(24) - if len(header) < 24: - raise ValueError(f"Bloom filter file too small: {len(header)} bytes") - - version, capacity, fp_rate, num_bits, num_hashes = struct.unpack(' Optional[float]: - """ - Calculate actual false positive rate based on stored parameters. - - Formula: FP = (1 - e^(-k*n/m))^k - - Returns: - Estimated false positive rate, or None if capacity is 0 - """ - if self.capacity == 0: - return None - - # FP rate = (1 - e^(-k*n/m))^k - exponent = -self.num_hashes * self.capacity / self.num_bits - fp = (1 - math.exp(exponent)) ** self.num_hashes - return fp - - class DeepStarCatalog: """ HEALPix-indexed star catalog with background loading @@ -383,7 +186,6 @@ def __init__(self, catalog_path: str): catalog_path: Path to deep_stars directory containing metadata.json """ logger.info(f">>> DeepStarCatalog.__init__() called with path: {catalog_path}") - t0 = time.time() self.catalog_path = Path(catalog_path) self.state = CatalogState.NOT_LOADED self.metadata: Optional[Dict[str, Any]] = None @@ -399,10 +201,7 @@ def __init__(self, catalog_path: str): self._index_cache: Dict[str, Any] = {} # Cache of existing tile IDs per magnitude band to avoid scanning for non-existent tiles self._existing_tiles_cache: Dict[str, Set[int]] = {} - # Bloom filters for fast tile existence checks (space-efficient) - self._bloom_filters: Dict[str, TileBloomFilter] = {} - t_init = (time.time() - t0) * 1000 - logger.info(f">>> DeepStarCatalog.__init__() completed in {t_init:.1f}ms") + logger.info(f">>> DeepStarCatalog.__init__() completed") def start_background_load( self, observer_lat: Optional[float] = None, limiting_mag: float = 12.0 @@ -436,7 +235,6 @@ def start_background_load( def _background_load_worker(self): """Background worker - just loads metadata""" logger.info(">>> _background_load_worker() started") - t_worker_start = time.time() try: # Load metadata self.load_progress = "Loading..." @@ -452,11 +250,9 @@ def _background_load_worker(self): self.state = CatalogState.NOT_LOADED return - t0 = time.time() with open(metadata_file, "r") as f: self.metadata = json.load(f) - t_json = (time.time() - t0) * 1000 - logger.info(f">>> metadata.json loaded in {t_json:.1f}ms") + logger.info(f">>> metadata.json loaded") self.nside = self.metadata.get("nside", 512) star_count = self.metadata.get('star_count', 0) @@ -469,10 +265,6 @@ def _background_load_worker(self): bands = self.metadata.get("mag_bands", []) logger.info(f">>> Catalog mag bands: {json.dumps(bands)}") - # Preload all bloom filters into memory (~12 MB total) - # DISABLED: Bloom filters not currently used (testing performance on Pi) - # self._preload_bloom_filters() - # Preload all compressed indices (run directories) into memory (~2-12 MB total) # This eliminates first-query delays (70ms per band → 420ms total stuttering) self._preload_compressed_indices() @@ -484,8 +276,7 @@ def _background_load_worker(self): self.load_progress = "Ready" self.load_percent = 100 self.state = CatalogState.READY - t_worker_total = (time.time() - t_worker_start) * 1000 - logger.info(f">>> _background_load_worker() completed in {t_worker_total:.1f}ms, state: {self.state}") + logger.info(f">>> _background_load_worker() completed, state: {self.state}") except Exception as e: logger.error(f">>> Catalog loading failed: {e}", exc_info=True) @@ -552,6 +343,10 @@ def get_stars_for_fov_progressive( magnitude band is loaded. This allows the UI to display bright stars immediately while continuing to load fainter stars in the background. + Uses background thread to load magnitude bands asynchronously, eliminating + UI event loop blocking. The UI consumes results at its own pace (~10 FPS) + while catalog loading continues uninterrupted. + Blocks if state == LOADING (waits for load to complete) Returns empty array if state == NOT_LOADED @@ -597,71 +392,100 @@ def get_stars_for_fov_progressive( if self.visible_tiles: tiles = [t for t in tiles if t in self.visible_tiles] - # Load stars progressively by magnitude band (bright to faint) - all_stars_list = [] - if not self.metadata: yield (np.empty((0, 3)), True) return - for mag_band_info in self.metadata.get("mag_bands", []): - mag_min = mag_band_info["min"] - mag_max = mag_band_info["max"] + # Background loading using producer-consumer pattern + import queue + import threading + import time - # Skip bands fainter than limit - if mag_min >= mag_limit: - break + # Queue to pass star arrays from background thread to generator + result_queue: queue.Queue = queue.Queue(maxsize=6) # Buffer up to 6 magnitude bands - logger.debug(f">>> PROGRESSIVE: Loading mag band {mag_min}-{mag_max}, tiles={len(tiles)}, mag_limit={mag_limit}") - import time - t_band_start = time.time() + def load_bands_background(): + """Background thread that loads magnitude bands continuously""" + try: + all_stars_list = [] + mag_bands = self.metadata.get("mag_bands", []) - # Load stars from this magnitude band only - # logger.info(f">>> Calling _load_tiles_for_mag_band...") - band_stars = self._load_tiles_for_mag_band( - tiles, mag_band_info, mag_limit, ra_deg, dec_deg, fov_deg - ) - # logger.info(f">>> _load_tiles_for_mag_band returned {len(band_stars)} stars") + for i, mag_band_info in enumerate(mag_bands): + mag_min = mag_band_info["min"] + mag_max = mag_band_info["max"] - t_load = (time.time() - t_band_start) * 1000 + # Skip bands fainter than limit + if mag_min >= mag_limit: + break - # Add to cumulative list - t_append_start = time.time() - if len(band_stars) > 0: - all_stars_list.append(band_stars) - t_append = (time.time() - t_append_start) * 1000 + logger.debug(f">>> BACKGROUND: Loading mag band {mag_min}-{mag_max}, tiles={len(tiles)}") - # Yield current results (not complete yet unless this is the last band) - is_last_band = mag_max >= mag_limit + # Load stars from this magnitude band only + band_stars = self._load_tiles_for_mag_band( + tiles, mag_band_info, mag_limit, ra_deg, dec_deg, fov_deg + ) - t_concat_start = time.time() - if all_stars_list: - current_total = np.concatenate(all_stars_list) - else: - current_total = np.empty((0, 3)) - t_concat = (time.time() - t_concat_start) * 1000 + # Add to cumulative list + if len(band_stars) > 0: + all_stars_list.append(band_stars) - t_yield_start = time.time() - yield (current_total, is_last_band) - t_yield = (time.time() - t_yield_start) * 1000 + # Concatenate for this yield + if all_stars_list: + current_total = np.concatenate(all_stars_list) + else: + current_total = np.empty((0, 3)) - logger.info( - f">>> PROGRESSIVE TIMING: mag {mag_min}-{mag_max}: " - f"load={t_load:.1f}ms, append={t_append:.1f}ms, " - f"concat={t_concat:.1f}ms, yield={t_yield:.1f}ms, " - f"total={(t_load+t_append+t_concat+t_yield):.1f}ms, " - f"stars={len(band_stars)}, cumulative={len(current_total)}" - ) + is_last_band = mag_max >= mag_limit - if is_last_band: - break + # Push to queue (blocks if queue is full - back-pressure) + result_queue.put((current_total, is_last_band, len(band_stars))) - # Final yield (should already be done above, but just in case) - if all_stars_list: - final_total = np.concatenate(all_stars_list) - else: - final_total = np.empty((0, 3)) - logger.info(f"PROGRESSIVE: Complete! Total {len(final_total)} stars loaded") + logger.info( + f">>> BACKGROUND: mag {mag_min}-{mag_max}: " + f"stars={len(band_stars)}, cumulative={len(current_total)}" + ) + + if is_last_band: + break + + except Exception as e: + logger.error(f">>> BACKGROUND: Error loading bands: {e}", exc_info=True) + # Push error marker + result_queue.put((None, True, 0)) + + # Start background loading thread + loader_thread = threading.Thread(target=load_bands_background, daemon=True, name="StarCatalogLoader") + loader_thread.start() + logger.info(">>> PROGRESSIVE: Background loading thread started") + + # Yield results as they become available + while True: + try: + # Get next result from queue + # Use timeout to avoid blocking forever if thread crashes + current_total, is_last_band, band_star_count = result_queue.get(timeout=10.0) + + if current_total is None: + # Error in background thread + logger.error(">>> PROGRESSIVE: Background thread encountered error") + yield (np.empty((0, 3)), True) + break + + # Yield to consumer (UI) + yield (current_total, is_last_band) + + logger.info( + f">>> PROGRESSIVE: stars_in_band={band_star_count}, cumulative={len(current_total)}" + ) + + if is_last_band: + logger.info(f"PROGRESSIVE: Complete! Total {len(current_total)} stars loaded") + break + + except queue.Empty: + logger.error(">>> PROGRESSIVE: Timeout waiting for background thread") + yield (np.empty((0, 3)), True) + break def get_stars_for_fov( self, @@ -734,17 +558,12 @@ def get_stars_for_fov( # Batch load is much faster for many tiles # Note: batch loading returns PM-corrected (ra, dec, mag) tuples logger.info(f"Using BATCH loading for {len(tiles)} tiles") - import time - t_batch_start = time.time() stars = self._load_tiles_batch(tiles, mag_limit) - t_batch_end = time.time() - logger.info(f"Batch load complete: {len(stars)} stars in {(t_batch_end-t_batch_start)*1000:.1f}ms") + logger.info(f"Batch load complete: {len(stars)} stars") tile_star_counts = {t: 0 for t in tiles} # Don't track individual counts for batch else: # Load one by one (better for small queries or legacy format) logger.info(f"Using SINGLE-TILE loading for {len(tiles)} tiles (compact={is_compact})") - import time - t_single_start = time.time() stars_raw_list = [] # To prevent UI blocking, limit the number of tiles loaded at once @@ -776,19 +595,11 @@ def get_stars_for_fov( else: cache_misses += 1 - # Log progress every 25 tiles - if (i + 1) % 25 == 0: - elapsed = (time.time() - t_single_start) * 1000 - logger.debug(f"Progress: {i+1}/{len(tiles)} tiles loaded ({elapsed:.0f}ms elapsed)") - - t_single_end = time.time() - elapsed_ms = (t_single_end - t_single_start) * 1000 - # Log cache performance logger.debug(f"Tile cache: {cache_hits} hits, {cache_misses} misses ({cache_hits/(cache_hits+cache_misses)*100:.1f}% hit rate)") - + total_raw = sum(len(x) for x in stars_raw_list) - logger.debug(f"Single-tile loading complete: {total_raw} stars in {elapsed_ms:.1f}ms ({elapsed_ms/len(tiles):.2f}ms/tile)") + logger.debug(f"Single-tile loading complete: {total_raw} stars") # Log tile loading stats if tile_star_counts: @@ -987,7 +798,7 @@ def _load_tile_compact( self, band_dir: Path, tile_id: int, mag_min: float, mag_max: float ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ - Load stars from compact format (consolidated tiles.bin + index.json) + Load stars from compact format (consolidated tiles.bin + v3 compressed index) Args: band_dir: Magnitude band directory @@ -1001,95 +812,88 @@ def _load_tile_compact( if not _HEALPY_AVAILABLE: return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) - # Try binary index first, fall back to JSON for backward compat - index_file_bin = band_dir / "index.bin" - index_file_json = band_dir / "index.json" + index_file = band_dir / "index.bin" tiles_file = band_dir / "tiles.bin" if not tiles_file.exists(): return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) - # Determine index format - if index_file_bin.exists(): - index_file = index_file_bin - is_binary = True - elif index_file_json.exists(): - index_file = index_file_json - is_binary = False - else: - return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) + if not index_file.exists(): + raise FileNotFoundError( + f"Compressed index not found: {index_file}\n" + f"This catalog requires v3 format. Please rebuild using healpix_builder_compact.py" + ) # Load index (cached per band) - tile_key = str(tile_id) - cache_key = f"index_{mag_min}_{mag_max}" if cache_key not in self._index_cache: - if is_binary: - self._index_cache[cache_key] = self._read_binary_index(index_file) - else: - with open(index_file, "r") as f: - self._index_cache[cache_key] = json.load(f) + self._index_cache[cache_key] = CompressedIndex(index_file) index = self._index_cache[cache_key] - if tile_key not in index: + # Get tile offset and size from compressed index + result = index.get(tile_id) + if result is None: return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) - - # Get tile offset and size - tile_info = index[tile_key] - offset = tile_info["offset"] - size = tile_info["size"] - compressed_size = tile_info.get("compressed_size") + offset, size = result # Read tile data with open(tiles_file, "rb") as f: f.seek(offset) - - if compressed_size: - # Compressed tile - decompress in memory - import zlib - compressed_data = f.read(compressed_size) - data = zlib.decompress(compressed_data) - else: - # Uncompressed tile - data = f.read(size) - + data = f.read(size) return self._parse_records(data) def _parse_records(self, data: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]: """ - Parse binary star records into numpy arrays (VECTORIZED) + Parse binary tile data into numpy arrays (VECTORIZED) + + New format: [Tile Header: 6 bytes][Star Records: 5 bytes each] Args: - data: Binary data containing star records + data: Binary tile data (header + star records) Returns: Tuple of (ras, decs, mags, pmras, pmdecs) as numpy arrays """ - if len(data) == 0: + if len(data) < TILE_HEADER_SIZE: return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) - # Parse all records using numpy - num_records = len(data) // STAR_RECORD_SIZE - records = np.frombuffer(data, dtype=STAR_RECORD_DTYPE, count=num_records) - - # Mask healpix to 24 bits - healpix_pixels = records['healpix'] & 0xFFFFFF + # Parse tile header + healpix_pixel, num_stars = struct.unpack(TILE_HEADER_FORMAT, data[:TILE_HEADER_SIZE]) + + # Extract star records + star_data = data[TILE_HEADER_SIZE:] + + if len(star_data) == 0: + return (np.array([]), np.array([]), np.array([]), np.array([]), np.array([])) + + # Verify data size matches expected + expected_size = num_stars * STAR_RECORD_SIZE + if len(star_data) != expected_size: + logger.warning( + f"Tile {healpix_pixel}: size mismatch. Expected {expected_size} bytes " + f"for {num_stars} stars, got {len(star_data)} bytes" + ) + # Truncate to valid records + num_stars = len(star_data) // STAR_RECORD_SIZE + + # Parse all star records using numpy + records = np.frombuffer(star_data, dtype=STAR_RECORD_DTYPE, count=num_stars) - # Get all pixel centers at once - pixel_ras, pixel_decs = hp.pix2ang(self.nside, healpix_pixels, lonlat=True) + # Get pixel center (same for all stars in this tile) + pixel_ra, pixel_dec = hp.pix2ang(self.nside, healpix_pixel, lonlat=True) # Calculate pixel size once pixel_size_deg = np.sqrt(hp.nside2pixarea(self.nside, degrees=True)) - max_offset_arcsec = pixel_size_deg * 3600.0 / 2.0 + max_offset_arcsec = pixel_size_deg * 3600.0 * 0.75 # Decode all offsets ra_offset_arcsec = (records['ra_offset'] / 127.5 - 1.0) * max_offset_arcsec dec_offset_arcsec = (records['dec_offset'] / 127.5 - 1.0) * max_offset_arcsec - # Calculate final positions - decs = pixel_decs + dec_offset_arcsec / 3600.0 - ras = pixel_ras + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) + # Calculate final positions (broadcast pixel center to all stars) + decs = pixel_dec + dec_offset_arcsec / 3600.0 + ras = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) # Decode magnitudes and proper motions mags = records['mag'] / 10.0 @@ -1098,159 +902,9 @@ def _parse_records(self, data: bytes) -> Tuple[np.ndarray, np.ndarray, np.ndarra return ras, decs, mags, pmras, pmdecs - def _read_binary_index(self, index_file: Path, needed_tiles: Optional[set] = None) -> dict: - """ - Read binary index file - optimized to only load needed tiles for large indices - - Format v1 (uncompressed): - Header: [version:4][num_tiles:4] - Per tile: [tile_id:4][offset:8][size:4] - - Format v2 (compressed): - Header: [version:4][num_tiles:4] - Per tile: [tile_id:4][offset:8][compressed_size:4][uncompressed_size:4] - - Args: - index_file: Path to the index file - needed_tiles: Set of tile IDs we actually need. If provided and index is large (>100K tiles), - only load these specific tiles instead of the whole index. - - Returns: - Dict mapping tile_id (as string) -> {"offset": int, "size": int, "compressed_size": int (optional)} - """ - index = {} - - if not index_file.exists(): - return {} - - with open(index_file, "rb") as f: - # Read header - header = f.read(8) - if len(header) < 8: - return {} - - version, num_tiles = struct.unpack(">> Selective loading: {len(needed_list)} tiles out of {num_tiles:,} total") - - # Use range query for spatially localized tiles - # Range query does binary search + sequential read, much faster than chunked scan - index = self._load_tile_range(index_file, needed_list, version, entry_size, num_tiles) - - logger.info(f">>> Loaded {len(index)} entries using range query") - return index - - # For small indices or when we need everything, load all entries at once - data = f.read() - records = np.frombuffer(data, dtype=dtype) - - # Convert to dictionary (this part is still Python loop but unavoidable for dict creation) - # However, iterating over numpy array is faster than struct.unpack loop - - # Pre-allocate dict for speed? Not easily possible in Python - # But we can use a comprehension which is slightly faster - - if version == 1: - for record in records: - index[str(record['tile_id'])] = { - "offset": int(record['offset']), - "size": int(record['size']) - } - else: - for record in records: - index[str(record['tile_id'])] = { - "offset": int(record['offset']), - "size": int(record['uncompressed_size']), - "compressed_size": int(record['compressed_size']) - } - - return index - - def _preload_bloom_filters(self) -> None: - """ - Preload all bloom filters into memory during catalog initialization. - - Loads all bloom filters (~12 MB total) to eliminate on-demand loading - delays during chart generation. Bloom filters provide fast tile existence - checks with minimal memory overhead. - - This runs in background thread during catalog startup. - """ - if not self.metadata or "mag_bands" not in self.metadata: - logger.warning(">>> No metadata available, skipping bloom filter preload") - return - - t0_total = time.time() - total_bytes = 0 - bands_loaded = 0 - - logger.info(">>> Preloading bloom filters for all magnitude bands...") - - for band_info in self.metadata["mag_bands"]: - mag_min = int(band_info["min"]) - mag_max = int(band_info["max"]) - cache_key = f"index_{mag_min}_{mag_max}" - - bloom_file = self.catalog_path / f"mag_{mag_min:02d}_{mag_max:02d}" / "bloom.bin" - - if not bloom_file.exists(): - logger.warning( - f">>> Bloom filter missing for {cache_key}: {bloom_file} - " - f"Run catalog_tools/generate_bloom_filters.py" - ) - continue - - t0 = time.time() - self._bloom_filters[cache_key] = TileBloomFilter.load(bloom_file) - t_load = (time.time() - t0) * 1000 - - bloom = self._bloom_filters[cache_key] - bloom_bytes = len(bloom.bit_array) - total_bytes += bloom_bytes - bands_loaded += 1 - - logger.info( - f">>> Loaded bloom filter {cache_key}: " - f"{bloom.capacity:,} tiles, {bloom_bytes / 1024:.1f} KB, " - f"FP={bloom.get_actual_fp_rate():.2%} in {t_load:.1f}ms" - ) - - t_total = (time.time() - t0_total) * 1000 - logger.info( - f">>> Bloom filter preload complete: {bands_loaded} filters, " - f"{total_bytes / 1024 / 1024:.1f} MB total in {t_total:.1f}ms" - ) - def _preload_compressed_indices(self) -> None: """ - Preload all compressed indices (run directories) into memory during startup. + Preload all v3 compressed indices (run directories) into memory during startup. Loads compressed index run directories (~2-12 MB total) to eliminate first-query delays during chart generation. Each compressed index loads its run directory @@ -1267,25 +921,26 @@ def _preload_compressed_indices(self) -> None: t0_total = time.time() bands_loaded = 0 - logger.info(">>> Preloading compressed indices for all magnitude bands...") + logger.info(">>> Preloading v3 compressed indices for all magnitude bands...") for band_info in self.metadata["mag_bands"]: mag_min = int(band_info["min"]) mag_max = int(band_info["max"]) cache_key = f"index_{mag_min}_{mag_max}" - # Try compressed index first (v3) - index_file_v3 = self.catalog_path / f"mag_{mag_min:02d}_{mag_max:02d}" / "index_v3.bin" + # Load compressed index (v3 format stored as index.bin) + index_file = self.catalog_path / f"mag_{mag_min:02d}_{mag_max:02d}" / "index.bin" - if not index_file_v3.exists(): - logger.debug( - f">>> Compressed index not found for {cache_key}: {index_file_v3} - " - f"Will fall back to v1/v2 index on first query" + if not index_file.exists(): + raise FileNotFoundError( + f"Compressed index not found: {index_file}\n" + f"This catalog requires v3 format. Please rebuild using healpix_builder_compact.py" ) - continue t0 = time.time() - self._index_cache[cache_key] = CompressedIndex(index_file_v3) + + # Load compressed index (v3 only) + self._index_cache[cache_key] = CompressedIndex(index_file) t_load = (time.time() - t0) * 1000 compressed_idx = self._index_cache[cache_key] @@ -1303,186 +958,6 @@ def _preload_compressed_indices(self) -> None: f"in {t_total:.1f}ms" ) - def _ensure_bloom_filter(self, cache_key: str, mag_min: int, mag_max: int) -> None: - """ - Ensure bloom filter is loaded for given magnitude band. - - This is a fallback in case preloading failed for a specific band. - Normally all bloom filters are preloaded during catalog initialization. - - Args: - cache_key: Cache key for this magnitude band (e.g., "index_12_14") - mag_min: Minimum magnitude for this band - mag_max: Maximum magnitude for this band - - Raises: - FileNotFoundError: If bloom filter file is missing (catalog corruption) - """ - if cache_key in self._bloom_filters: - return # Already loaded (normal case - preloaded at startup) - - # Fallback: load on-demand if preloading missed this band - logger.warning(f">>> Bloom filter {cache_key} not preloaded, loading on-demand...") - - bloom_file = self.catalog_path / f"mag_{mag_min:02d}_{mag_max:02d}" / "bloom.bin" - - if not bloom_file.exists(): - raise FileNotFoundError( - f"Bloom filter missing for {cache_key}: {bloom_file}\n" - f"Catalog may be corrupted or incomplete. " - f"Run catalog_tools/generate_bloom_filters.py to create missing bloom filters." - ) - - t0 = time.time() - self._bloom_filters[cache_key] = TileBloomFilter.load(bloom_file) - t_load = (time.time() - t0) * 1000 - - bloom = self._bloom_filters[cache_key] - actual_fp = bloom.get_actual_fp_rate() - logger.info( - f">>> Loaded bloom filter for {cache_key}: {bloom.capacity} tiles, " - f"{len(bloom.bit_array)} bytes, FP rate={actual_fp:.2%}, load_time={t_load:.1f}ms" - ) - - def _binary_search_tile_position( - self, - f, - target_tile_id: int, - num_tiles: int, - entry_size: int, - find_first: bool = True - ) -> Optional[int]: - """ - Binary search for tile position in sorted binary index file. - - Args: - f: Open file handle positioned after header - target_tile_id: Tile ID to search for - num_tiles: Total number of tiles in index - entry_size: Size of each entry in bytes (16 or 20) - find_first: If True, find first tile >= target. If False, find last tile <= target. - - Returns: - File position (offset from file start) of matching entry, or None if not found - """ - left, right = 0, num_tiles - 1 - result_pos = None - - while left <= right: - mid = (left + right) // 2 - pos = 8 + mid * entry_size # 8-byte header + entry offset - - f.seek(pos) - tile_id_bytes = f.read(4) - if len(tile_id_bytes) < 4: - break - - tile_id = struct.unpack(" target_tile_id - if find_first: - result_pos = pos # Keep track of smallest tile > target - right = mid - 1 - - return result_pos - - def _load_tile_range( - self, - index_file: Path, - tile_ids: List[int], - version: int, - entry_size: int, - num_tiles: int - ) -> Dict[str, Any]: - """ - Load a contiguous range of tiles using binary search + sequential read. - - This is much faster than seeking to each individual tile, especially on SD cards - where random seeks are expensive. - - Args: - index_file: Path to binary index file - tile_ids: List of tile IDs to load (must be sorted) - version: Index file version (1 or 2) - entry_size: Size of each entry (16 for v1, 20 for v2) - num_tiles: Total number of tiles in index - - Returns: - Dictionary mapping tile_id (as string) to tile metadata - """ - if not tile_ids: - return {} - - # Determine range to load - min_tile = min(tile_ids) - max_tile = max(tile_ids) - tile_set = set(tile_ids) - - index = {} - - with open(index_file, "rb") as f: - # Find starting position (first tile >= min_tile) - start_pos = self._binary_search_tile_position( - f, min_tile, num_tiles, entry_size, find_first=True - ) - - if start_pos is None: - logger.debug(f">>> No tiles found in range [{min_tile}, {max_tile}]") - return {} - - # Sequential read from start_pos until we pass max_tile - f.seek(start_pos) - tiles_read = 0 - tiles_matched = 0 - - while True: - entry_data = f.read(entry_size) - if len(entry_data) < entry_size: - break # End of file - - tiles_read += 1 - - if version == 1: - tile_id, offset, size = struct.unpack(" max_tile: - break # Passed our range - - if tile_id in tile_set: - index[str(tile_id)] = { - "offset": int(offset), - "size": int(size) - } - tiles_matched += 1 - else: # version == 2 - tile_id, offset, compressed_size, uncompressed_size = struct.unpack(" max_tile: - break # Passed our range - - if tile_id in tile_set: - index[str(tile_id)] = { - "offset": int(offset), - "size": int(uncompressed_size), - "compressed_size": int(compressed_size) - } - tiles_matched += 1 - - # Early exit if we've found all requested tiles - if tiles_matched >= len(tile_set): - break - - logger.debug( - f">>> Range query: read {tiles_read} entries, " - f"matched {tiles_matched}/{len(tile_ids)} requested tiles" - ) - - return index - def _load_existing_tiles_set(self, index_file: Path) -> Set[int]: """ Quickly load the set of all existing tile IDs from an index file. @@ -1636,59 +1111,32 @@ def _load_tiles_batch_single_band( mag_max = mag_band_info["max"] band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" - index_file_bin = band_dir / "index.bin" - index_file_json = band_dir / "index.json" + index_file = band_dir / "index.bin" tiles_file = band_dir / "tiles.bin" if not tiles_file.exists(): return np.empty((0, 3)) + if not index_file.exists(): + raise FileNotFoundError( + f"Compressed index not found: {index_file}\n" + f"This catalog requires v3 format. Please rebuild using healpix_builder_compact.py" + ) + cache_key = f"index_{mag_min}_{mag_max}" - # Bloom filter pre-check: DISABLED for performance testing - # TODO: Re-enable after Pi performance comparison - # Saves ~4ms per query by checking bloom filter (0.24ms) vs compressed index (2.4ms) - # Trade-off: 12 MB RAM for bloom filters vs 4ms per query - # - # if cache_key in self._bloom_filters: - # bloom = self._bloom_filters[cache_key] - # has_any_tile = any(bloom.might_contain(tile_id) for tile_id in tile_ids) - # if not has_any_tile: - # logger.debug( - # f">>> Bloom filter: No tiles exist in {cache_key} for query region, " - # f"skipping band" - # ) - # return np.empty((0, 3)) - - # Load index - prefer compressed v3 format + # Load v3 compressed index (cached) if not hasattr(self, '_index_cache'): self._index_cache = {} t_index_start = time.time() logger.info(f">>> Checking index cache for {cache_key}, in_cache={cache_key in self._index_cache}") if cache_key not in self._index_cache: - # Try compressed index first (v3) - index_file_v3 = band_dir / "index_v3.bin" - if index_file_v3.exists(): - logger.info(f">>> Loading compressed index from {index_file_v3}") - t0 = time.time() - self._index_cache[cache_key] = CompressedIndex(index_file_v3) - t_read_index = (time.time() - t0) * 1000 - logger.info(f">>> Compressed index loaded in {t_read_index:.1f}ms") - elif index_file_bin.exists(): - logger.info(f">>> Loading FULL index from {index_file_bin} (v1/v2 format)") - t0 = time.time() - self._index_cache[cache_key] = self._read_binary_index(index_file_bin, needed_tiles=None) - t_read_index = (time.time() - t0) * 1000 - logger.info(f">>> FULL index loaded, {len(self._index_cache[cache_key])} tiles in {t_read_index:.1f}ms") - elif index_file_json.exists(): - logger.info(f">>> Reading JSON index from {index_file_json}") - with open(index_file_json, "r") as f: - self._index_cache[cache_key] = json.load(f) - logger.info(f">>> JSON index loaded, {len(self._index_cache[cache_key])} tiles in cache") - else: - logger.warning(f">>> No index file found for {cache_key}") - return np.empty((0, 3)) + logger.info(f">>> Loading v3 compressed index from {index_file}") + t0 = time.time() + self._index_cache[cache_key] = CompressedIndex(index_file) + t_read_index = (time.time() - t0) * 1000 + logger.info(f">>> Compressed index loaded in {t_read_index:.1f}ms") else: logger.debug(f">>> Using cached index for {cache_key}") @@ -1699,23 +1147,19 @@ def _load_tiles_batch_single_band( t_readops_start = time.time() logger.debug(f">>> Building read_ops for {len(tile_ids)} tiles...") - # Collect all tile read operations - # Handle both CompressedIndex and dict formats + # Collect all tile read operations from v3 compressed index read_ops: List[Tuple[int, Dict[str, int]]] = [] - if isinstance(index, CompressedIndex): - # Compressed index: use .get() method - for tile_id in tile_ids: - tile_tuple = index.get(tile_id) - if tile_tuple: - offset, size = tile_tuple - read_ops.append((tile_id, {"offset": offset, "size": size})) - else: - # Dict-based index (v1/v2 or JSON) - for tile_id in tile_ids: - tile_key = str(tile_id) - if tile_key in index: - tile_info: Dict[str, int] = index[tile_key] - read_ops.append((tile_id, tile_info)) + missing_tiles = 0 + for tile_id in tile_ids: + tile_tuple = index.get(tile_id) + if tile_tuple: + offset, size = tile_tuple + read_ops.append((tile_id, {"offset": offset, "size": size})) + else: + missing_tiles += 1 + + if missing_tiles > 0: + logger.warning(f">>> {missing_tiles} of {len(tile_ids)} tiles missing from index for mag {mag_min}-{mag_max}") if not read_ops: logger.debug(f">>> No tiles to load (all {len(tile_ids)} requested tiles are empty)") @@ -1737,7 +1181,6 @@ def _load_tiles_batch_single_band( all_pmdecs = [] t_io_start = time.time() - t_decompress_total = 0.0 t_decode_total = 0.0 bytes_read = 0 logger.info(f">>> Batch loading {len(read_ops)} tiles for mag {mag_min}-{mag_max}") @@ -1750,7 +1193,7 @@ def _load_tiles_batch_single_band( tile_id, tile_info = read_ops[i] offset = tile_info["offset"] - chunk_end = offset + tile_info.get("compressed_size", tile_info["size"]) + chunk_end = offset + tile_info["size"] # Find consecutive tiles for chunk reading tiles_in_chunk: List[Tuple[int, Dict[str, int]]] = [(tile_id, tile_info)] @@ -1765,7 +1208,7 @@ def _load_tiles_batch_single_band( next_tile_id, next_tile_info = read_ops[j] next_offset = next_tile_info["offset"] if next_offset - chunk_end <= MAX_GAP: - chunk_end = next_offset + next_tile_info.get("compressed_size", next_tile_info["size"]) + chunk_end = next_offset + next_tile_info["size"] tiles_in_chunk.append((next_tile_id, next_tile_info)) j += 1 else: @@ -1783,17 +1226,8 @@ def _load_tiles_batch_single_band( for tile_idx, (tile_id, tile_info) in enumerate(tiles_in_chunk): # logger.debug(f">>> Processing tile {tile_idx+1}/{len(tiles_in_chunk)} (id={tile_id})") tile_offset = tile_info["offset"] - offset - compressed_size = tile_info.get("compressed_size") size = tile_info["size"] - - if compressed_size: - t_decomp_start = time.time() - import zlib - compressed_data = chunk_data[tile_offset:tile_offset + compressed_size] - data = zlib.decompress(compressed_data) - t_decompress_total += (time.time() - t_decomp_start) - else: - data = chunk_data[tile_offset:tile_offset + size] + data = chunk_data[tile_offset:tile_offset + size] # Parse records using shared helper t_decode_start = time.time() @@ -1833,7 +1267,7 @@ def _load_tiles_batch_single_band( t_io_total = (time.time() - t_io_start) * 1000 logger.info( f">>> Tile I/O performance for mag {mag_min}-{mag_max}: " - f"total={t_io_total:.1f}ms, decompress={t_decompress_total*1000:.1f}ms, " + f"total={t_io_total:.1f}ms, " f"decode={t_decode_total*1000:.1f}ms, concat={t_concat:.1f}ms, pm={t_pm:.1f}ms, " f"bytes={bytes_read/1024:.1f}KB, stars={len(result)}" ) @@ -1877,36 +1311,35 @@ def _load_tiles_batch( logger.info(f"_load_tiles_batch: Processing mag band {mag_min}-{mag_max}") band_dir = self.catalog_path / f"mag_{mag_min:02.0f}_{mag_max:02.0f}" - index_file_bin = band_dir / "index.bin" - index_file_json = band_dir / "index.json" + index_file = band_dir / "index.bin" tiles_file = band_dir / "tiles.bin" if not tiles_file.exists(): continue - # Load index + if not index_file.exists(): + raise FileNotFoundError( + f"Compressed index not found: {index_file}\n" + f"This catalog requires v3 format. Please rebuild using healpix_builder_compact.py" + ) + + # Load v3 compressed index cache_key = f"index_{mag_min}_{mag_max}" if not hasattr(self, '_index_cache'): self._index_cache = {} if cache_key not in self._index_cache: - if index_file_bin.exists(): - self._index_cache[cache_key] = self._read_binary_index(index_file_bin) - elif index_file_json.exists(): - with open(index_file_json, "r") as f: - self._index_cache[cache_key] = json.load(f) - else: - continue + self._index_cache[cache_key] = CompressedIndex(index_file) index = self._index_cache[cache_key] - # Collect all tile read operations + # Collect all tile read operations from v3 compressed index read_ops = [] for tile_id in tile_ids: - tile_key = str(tile_id) - if tile_key in index: - tile_info = index[tile_key] - read_ops.append((tile_id, tile_info)) + tile_tuple = index.get(tile_id) + if tile_tuple: + offset, size = tile_tuple + read_ops.append((tile_id, {"offset": offset, "size": size})) if not read_ops: continue @@ -1927,11 +1360,10 @@ def _load_tiles_batch( while i < len(read_ops): tile_id, tile_info = read_ops[i] offset = tile_info["offset"] - compressed_size = tile_info.get("compressed_size") size = tile_info["size"] # Check if next tiles are sequential (within gap tolerance) - chunk_end = offset + (compressed_size or size) + chunk_end = offset + size tiles_in_chunk = [(tile_id, tile_info)] j = i + 1 @@ -1942,7 +1374,7 @@ def _load_tiles_batch( # If next tile is within gap tolerance, include in chunk if next_offset - chunk_end <= MAX_GAP: tiles_in_chunk.append((next_tile_id, next_tile_info)) - next_size = next_tile_info.get("compressed_size") or next_tile_info["size"] + next_size = next_tile_info["size"] chunk_end = next_offset + next_size j += 1 else: @@ -1958,16 +1390,8 @@ def _load_tiles_batch( # Process each tile in the chunk using vectorized operations for tile_id, tile_info in tiles_in_chunk: tile_offset = tile_info["offset"] - offset # Relative offset in chunk - compressed_size = tile_info.get("compressed_size") size = tile_info["size"] - - if compressed_size: - import zlib - compressed_data = chunk_data[tile_offset:tile_offset + compressed_size] - # logger.info(f"_load_tiles_batch: Decompressing tile {tile_id}, {compressed_size} → {size} bytes") - data = zlib.decompress(compressed_data) - else: - data = chunk_data[tile_offset:tile_offset + size] + data = chunk_data[tile_offset:tile_offset + size] # Parse records using shared helper ras, decs, mags, pmras, pmdecs = self._parse_records(data) diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index d1f711335..9f10d4df9 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -704,6 +704,7 @@ def _get_chart_generator(self): def update(self, force=True): import logging + import time logger = logging.getLogger("ObjectDetails") # If we have a chart generator, consume one yield to get the next progressive update @@ -712,6 +713,7 @@ def update(self, force=True): next_image = next(self._chart_generator) # logger.debug(f">>> update(): Consumed next chart yield: {type(next_image)}") self.object_image = next_image + force = True # Force screen update for progressive chart except StopIteration: logger.info(">>> update(): Chart generator exhausted") @@ -845,7 +847,8 @@ def update(self, force=True): desc.set_available_lines(desc_available_lines) desc.draw((0, posy)) - return self.screen_update() + result = self.screen_update() + return result def cycle_display_mode(self): """ diff --git a/python/tests/test_bloom_filter.py b/python/tests/test_bloom_filter.py deleted file mode 100644 index 3f4be6b36..000000000 --- a/python/tests/test_bloom_filter.py +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/env python3 -""" -Unit tests for TileBloomFilter implementation. - -Tests cover: -- Basic functionality (add, might_contain) -- False positive rate validation -- Save/load persistence -- Edge cases (empty filter, large capacity) -- Integration with star catalog -""" - -import math -import tempfile -from pathlib import Path - -import pytest - -from PiFinder.star_catalog import TileBloomFilter - - -class TestTileBloomFilterBasics: - """Test basic bloom filter operations.""" - - def test_empty_filter(self): - """Test empty filter returns False for all queries.""" - bloom = TileBloomFilter(capacity=100, fp_rate=0.01) - - # Empty filter should never contain anything - assert not bloom.might_contain(1) - assert not bloom.might_contain(100) - assert not bloom.might_contain(999999) - - def test_single_item(self): - """Test filter with single item.""" - bloom = TileBloomFilter(capacity=100, fp_rate=0.01) - - bloom.add(42) - - # Should definitely contain the added item - assert bloom.might_contain(42) - - # Should not contain other items (with high probability) - # Note: Can't guarantee 100% due to false positives, but very unlikely for single item - assert not bloom.might_contain(1) - assert not bloom.might_contain(43) - - def test_multiple_items(self): - """Test filter with multiple items.""" - bloom = TileBloomFilter(capacity=100, fp_rate=0.01) - - items = [10, 20, 30, 40, 50] - for item in items: - bloom.add(item) - - # All added items should be present - for item in items: - assert bloom.might_contain(item), f"Item {item} should be in filter" - - # Items not added should mostly not be present - # (allowing for false positives) - not_added = [11, 21, 31, 41, 51, 100, 200, 300] - false_positives = sum(1 for item in not_added if bloom.might_contain(item)) - - # With 1% FP rate and 8 queries, expect ~0.08 false positives - # Allow up to 3 for statistical variation - assert false_positives <= 3, f"Too many false positives: {false_positives}/8" - - def test_large_dataset(self): - """Test filter with many items.""" - capacity = 1000 - bloom = TileBloomFilter(capacity=capacity, fp_rate=0.01) - - # Add 1000 items - items = list(range(1000, 2000)) - for item in items: - bloom.add(item) - - # All added items should be present - for item in items: - assert bloom.might_contain(item) - - # Check false positive rate on items not added - not_added = list(range(3000, 4000)) # 1000 items - false_positives = sum(1 for item in not_added if bloom.might_contain(item)) - actual_fp_rate = false_positives / len(not_added) - - # Should be close to 1% (allow 0-3% due to statistical variation) - assert 0 <= actual_fp_rate <= 0.03, f"FP rate {actual_fp_rate:.2%} outside expected range" - - def test_duplicate_adds(self): - """Test that adding same item multiple times doesn't break filter.""" - bloom = TileBloomFilter(capacity=100, fp_rate=0.01) - - # Add same item multiple times - for _ in range(10): - bloom.add(42) - - # Should still contain the item - assert bloom.might_contain(42) - - # Should not affect other items - assert not bloom.might_contain(43) - - -class TestTileBloomFilterMath: - """Test bloom filter mathematical properties.""" - - def test_optimal_bit_calculation(self): - """Test optimal bit array size calculation.""" - # Formula: m = -(n * ln(p)) / (ln(2)^2) - # For n=1000, p=0.01: m ≈ 9586 bits - - bits = TileBloomFilter._optimal_num_bits(1000, 0.01) - - expected = -(1000 * math.log(0.01)) / (math.log(2) ** 2) - assert abs(bits - expected) < 1 # Allow for rounding - - def test_optimal_hash_calculation(self): - """Test optimal number of hash functions calculation.""" - # Formula: k = (m/n) * ln(2) - # For m=9586, n=1000: k ≈ 7 - - num_hashes = TileBloomFilter._optimal_num_hashes(9586, 1000) - - expected = (9586 / 1000) * math.log(2) - assert abs(num_hashes - expected) < 1 - - # Should always have at least 1 hash function - assert num_hashes >= 1 - - def test_actual_fp_rate_calculation(self): - """Test actual false positive rate calculation.""" - bloom = TileBloomFilter(capacity=1000, fp_rate=0.01) - - actual_fp = bloom.get_actual_fp_rate() - - # Should be close to configured 1% - assert actual_fp is not None - assert 0.005 <= actual_fp <= 0.015 # Within 0.5%-1.5% - - def test_zero_capacity(self): - """Test filter with zero capacity.""" - bloom = TileBloomFilter(capacity=0, fp_rate=0.01) - - # get_actual_fp_rate should return None for empty filter - assert bloom.get_actual_fp_rate() is None - - -class TestTileBloomFilterPersistence: - """Test bloom filter save/load functionality.""" - - def test_save_and_load(self): - """Test saving and loading bloom filter.""" - # Create and populate filter - bloom1 = TileBloomFilter(capacity=100, fp_rate=0.01) - items = [10, 20, 30, 40, 50] - for item in items: - bloom1.add(item) - - # Save to temporary file - with tempfile.NamedTemporaryFile(suffix=".bin", delete=False) as f: - temp_path = Path(f.name) - - try: - bloom1.save(temp_path) - - # Load from file - bloom2 = TileBloomFilter.load(temp_path) - - # Check all properties match - assert bloom2.capacity == bloom1.capacity - assert bloom2.fp_rate == bloom1.fp_rate - assert bloom2.num_bits == bloom1.num_bits - assert bloom2.num_hashes == bloom1.num_hashes - assert bloom2.bit_array == bloom1.bit_array - - # Check functionality preserved - for item in items: - assert bloom2.might_contain(item), f"Item {item} should be in loaded filter" - - assert not bloom2.might_contain(99) - - finally: - temp_path.unlink(missing_ok=True) - - def test_load_nonexistent_file(self): - """Test loading from non-existent file.""" - with pytest.raises(FileNotFoundError): - TileBloomFilter.load(Path("/nonexistent/bloom.bin")) - - def test_load_corrupted_file(self): - """Test loading from corrupted file.""" - # Create corrupted file (too small) - with tempfile.NamedTemporaryFile(suffix=".bin", delete=False) as f: - temp_path = Path(f.name) - f.write(b"corrupted") - - try: - with pytest.raises(ValueError, match="too small"): - TileBloomFilter.load(temp_path) - finally: - temp_path.unlink(missing_ok=True) - - def test_save_creates_valid_format(self): - """Test that saved file has correct binary format.""" - bloom = TileBloomFilter(capacity=100, fp_rate=0.02) - bloom.add(42) - - with tempfile.NamedTemporaryFile(suffix=".bin", delete=False) as f: - temp_path = Path(f.name) - - try: - bloom.save(temp_path) - - # Verify file structure - with open(temp_path, "rb") as f: - # Header should be 24 bytes - import struct - header = f.read(24) - assert len(header) == 24 - - version, capacity, fp_rate, num_bits, num_hashes = struct.unpack(' 3_000_000 # ~3 MB - - def test_high_fp_rate(self): - """Test filter with high false positive rate (10%).""" - bloom = TileBloomFilter(capacity=100, fp_rate=0.10) - - items = list(range(100)) - for item in items: - bloom.add(item) - - # Should still contain all added items - for item in items: - assert bloom.might_contain(item) - - # FP rate should be close to 10% - actual_fp = bloom.get_actual_fp_rate() - assert 0.05 <= actual_fp <= 0.15 # Allow 5-15% range - - def test_low_fp_rate(self): - """Test filter with very low false positive rate (0.1%).""" - bloom = TileBloomFilter(capacity=100, fp_rate=0.001) - - items = list(range(100)) - for item in items: - bloom.add(item) - - # Should contain all added items - for item in items: - assert bloom.might_contain(item) - - # Bit array should be larger (lower FP rate = more bits) - bloom_high_fp = TileBloomFilter(capacity=100, fp_rate=0.01) - assert len(bloom.bit_array) > len(bloom_high_fp.bit_array) - - def test_tile_id_range(self): - """Test with realistic HEALPix tile IDs.""" - # HEALPix nside=512 has 3,145,728 tiles - # Tile IDs range from 0 to 3,145,727 - bloom = TileBloomFilter(capacity=10000, fp_rate=0.01) - - # Add some tiles from different parts of the sky - tiles = [0, 1, 100, 1000, 10000, 100000, 1000000, 3145727] - for tile_id in tiles: - bloom.add(tile_id) - - # All should be present - for tile_id in tiles: - assert bloom.might_contain(tile_id), f"Tile {tile_id} should be in filter" - - def test_hash_distribution(self): - """Test that hash function distributes items evenly.""" - bloom = TileBloomFilter(capacity=1000, fp_rate=0.01) - - # Add 1000 sequential tile IDs - for tile_id in range(1000): - bloom.add(tile_id) - - # Count set bits - set_bits = sum( - 1 for byte in bloom.bit_array - for bit in range(8) - if byte & (1 << bit) - ) - - # With good hash distribution, ~50-70% of bits should be set - # (depends on num_hashes and capacity) - bit_fill_ratio = set_bits / bloom.num_bits - assert 0.3 <= bit_fill_ratio <= 0.8, f"Bit fill ratio {bit_fill_ratio:.2%} suggests poor distribution" - - -class TestTileBloomFilterIntegration: - """Test integration with star catalog use cases.""" - - def test_sparse_sky_coverage(self): - """Test bloom filter behavior with sparse tile coverage (like mag 0-6).""" - # Mag 0-6 has only 6,465 tiles out of 3.1M possible - # Most queries will be for non-existent tiles - bloom = TileBloomFilter(capacity=6465, fp_rate=0.01) - - # Add actual tiles (scattered across sky) - actual_tiles = [i * 500 for i in range(6465)] # Sparse distribution - for tile_id in actual_tiles: - bloom.add(tile_id) - - # Query for tiles in a typical FOV (48 tiles) - query_tiles = list(range(1000, 1048)) # Probably no bright stars here - - # Most should be filtered out (not in bloom filter) - passed = [t for t in query_tiles if bloom.might_contain(t)] - - # Expect ~1% false positive rate: 48 * 0.01 = 0.48, so 0-2 tiles - assert len(passed) <= 3, f"Too many tiles passed filter: {len(passed)}/48" - - def test_dense_sky_coverage(self): - """Test bloom filter behavior with dense tile coverage (like mag 14-17).""" - # Mag 14-17 has 3.1M tiles (98% coverage) - # Most queries will find tiles - bloom = TileBloomFilter(capacity=3_100_000, fp_rate=0.01) - - # Add most tiles (simulating 98% coverage) - # For testing, add every tile except multiples of 50 - for tile_id in range(3_100_000): - if tile_id % 50 != 0: - bloom.add(tile_id) - - # Query for tiles in a typical FOV (48 tiles) - query_tiles = list(range(1000000, 1000048)) - - # Most should pass filter (they exist) - passed = [t for t in query_tiles if bloom.might_contain(t)] - - # Should pass most tiles (minus the 2% that don't exist + some FP) - assert len(passed) >= 44, f"Too few tiles passed: {len(passed)}/48" - - -if __name__ == "__main__": - pytest.main([__file__, "-v"]) From db71507c67f5cc9b6ca0b702918b4a62a7b1afde Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Wed, 26 Nov 2025 23:56:18 +0100 Subject: [PATCH 24/27] Working, pre-refactor commit --- default_config.json | 1 + python/PiFinder/catalog_base.py | 2 +- python/PiFinder/deep_chart.py | 24 +- python/PiFinder/displays.py | 16 + python/PiFinder/star_catalog.py | 51 +-- python/PiFinder/state.py | 2 +- python/PiFinder/ui/menu_structure.py | 29 ++ python/PiFinder/ui/object_details.py | 449 +++++++++++++++++++++----- python/PiFinder/ui/sqm_calibration.py | 2 +- 9 files changed, 465 insertions(+), 111 deletions(-) diff --git a/default_config.json b/default_config.json index 0b14bd1a4..d444017a0 100644 --- a/default_config.json +++ b/default_config.json @@ -17,6 +17,7 @@ "chart_constellations": 64, "obj_chart_crosshair": "pulse", "obj_chart_crosshair_style": "simple", + "obj_chart_crosshair_speed": "2.0", "obj_chart_lm_mode": "auto", "obj_chart_lm_fixed": 14.0, "solve_pixel": [256, 256], diff --git a/python/PiFinder/catalog_base.py b/python/PiFinder/catalog_base.py index 6f07f3f47..12ad7d2ea 100644 --- a/python/PiFinder/catalog_base.py +++ b/python/PiFinder/catalog_base.py @@ -171,7 +171,7 @@ def assign_virtual_object_ids(catalog, low_id: int) -> int: class TimerMixin: """Provides timer functionality via composition""" - def __init__(self): + def __init__(self) -> None: self.timer: Optional[threading.Timer] = None self.is_running: bool = False self.time_delay_seconds: Union[int, Callable[[], int]] = ( diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/deep_chart.py index 0a768726b..d15d46973 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/deep_chart.py @@ -138,15 +138,19 @@ def generate_chart( Returns: PIL Image in RGB (red colorspace), or None if catalog not ready """ + logger.info(f">>> generate_chart() ENTRY: object={catalog_object.display_name}") + # Ensure catalog is loading self.ensure_catalog_loading() # Check state if self.catalog.state != CatalogState.READY: - logger.info(f"Chart generation skipped: catalog state = {self.catalog.state}") + logger.info(f">>> Chart generation skipped: catalog state = {self.catalog.state}") yield None return + logger.info(f">>> Catalog state is READY, proceeding...") + # Check cache cache_key = self.get_cache_key(catalog_object) if cache_key in self.chart_cache: @@ -166,6 +170,8 @@ def generate_chart( if mag <= 0: mag = 50.0 # Default fallback + logger.info(f">>> Chart Generation: object={catalog_object.display_name}, center=({catalog_object.ra:.4f}, {catalog_object.dec:.4f}), fov={fov:.4f}°, mag={mag:.1f}x, eyepiece={equipment.active_eyepiece}") + sqm = self.shared_state.sqm() mag_limit_calculated = self.get_limiting_magnitude(sqm) # For query, cap at catalog max @@ -204,8 +210,12 @@ def generate_chart( # Progressive rendering: Yield image after each magnitude band loads # Re-render all stars each time (simple, correct, fast enough) final_image = None + iteration_count = 0 + logger.info(f">>> Starting star generator loop...") for stars, is_complete in stars_generator: + iteration_count += 1 + logger.info(f">>> Star generator iteration {iteration_count}: got {len(stars)} stars, complete={is_complete}") t_render_start = time.time() # Render ALL stars from scratch @@ -252,7 +262,10 @@ def generate_chart( # Final yield with complete image t1 = time.time() - logger.info(f"Chart complete: {(t1-t0)*1000:.1f}ms total") + logger.info(f">>> Star generator loop complete: {iteration_count} iterations, {(t1-t0)*1000:.1f}ms total") + + if iteration_count == 0: + logger.warning(f">>> WARNING: Star generator yielded NO results! FOV={fov:.4f}°, center=({catalog_object.ra:.4f}, {catalog_object.dec:.4f})") # Cache result (limit cache size to 10 charts) if final_image is not None: @@ -346,6 +359,13 @@ def render_chart( # fov degrees should map to width pixels pixel_scale = width / np.radians(fov) + if fov < 0.2: # Debug small FOVs + logger.info(f">>> SMALL FOV DEBUG: fov={fov:.4f}°, pixel_scale={pixel_scale:.1f} px/rad") + if len(stars) > 0: + logger.info(f">>> Star RA range: [{np.min(ra_arr):.4f}, {np.max(ra_arr):.4f}]") + logger.info(f">>> Star Dec range: [{np.min(dec_arr):.4f}, {np.max(dec_arr):.4f}]") + logger.info(f">>> Center: RA={center_ra:.4f}, Dec={center_dec:.4f}") + # Convert to screen coordinates FIRST # Center of field should always be at width/2, height/2 # IMPORTANT: Flip X-axis to match POSS image orientation diff --git a/python/PiFinder/displays.py b/python/PiFinder/displays.py index 9b53551b4..7b1055bca 100644 --- a/python/PiFinder/displays.py +++ b/python/PiFinder/displays.py @@ -70,6 +70,14 @@ class DisplayPygame_128(DisplayBase): def __init__(self): from luma.emulator.device import pygame + import pygame as pg + from pathlib import Path + + # Set window icon to welcome splash screen before creating display + icon_path = Path(__file__).parent.parent.parent / "images" / "welcome.png" + if icon_path.exists(): + icon = pg.image.load(str(icon_path)) + pg.display.set_icon(icon) # init display (SPI hardware) pygame = pygame( @@ -90,6 +98,14 @@ class DisplayPygame_320(DisplayBase): def __init__(self): from luma.emulator.device import pygame + import pygame as pg + from pathlib import Path + + # Set window icon to welcome splash screen before creating display + icon_path = Path(__file__).parent.parent.parent / "images" / "welcome.png" + if icon_path.exists(): + icon = pg.image.load(str(icon_path)) + pg.display.set_icon(icon) # init display (SPI hardware) pygame = pygame( diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/star_catalog.py index 95108baf3..8e139f587 100644 --- a/python/PiFinder/star_catalog.py +++ b/python/PiFinder/star_catalog.py @@ -38,18 +38,17 @@ logger = logging.getLogger("PiFinder.StarCatalog") # Optimized tile format: header + star records (no redundant HEALPix per star) -TILE_HEADER_FORMAT = " Tuple[np.ndarray, np.ndarray, np.ndarra decs = pixel_dec + dec_offset_arcsec / 3600.0 ras = pixel_ra + ra_offset_arcsec / 3600.0 / np.cos(np.radians(decs)) - # Decode magnitudes and proper motions + # Decode magnitudes mags = records['mag'] / 10.0 - pmras = records['pmra'] * 50 - pmdecs = records['pmdec'] * 50 + + # v2.1: Proper motion has been pre-applied at build time + # Return empty arrays for backward compatibility + pmras = np.zeros(len(records)) + pmdecs = np.zeros(len(records)) return ras, decs, mags, pmras, pmdecs @@ -1130,7 +1133,7 @@ def _load_tiles_batch_single_band( self._index_cache = {} t_index_start = time.time() - logger.info(f">>> Checking index cache for {cache_key}, in_cache={cache_key in self._index_cache}") + logger.debug(f"Checking index cache for {cache_key}") if cache_key not in self._index_cache: logger.info(f">>> Loading v3 compressed index from {index_file}") t0 = time.time() @@ -1145,30 +1148,32 @@ def _load_tiles_batch_single_band( logger.debug(f">>> Index cache operations took {t_index_total:.1f}ms") t_readops_start = time.time() - logger.debug(f">>> Building read_ops for {len(tile_ids)} tiles...") + logger.debug(f"Building read_ops for {len(tile_ids)} tiles...") # Collect all tile read operations from v3 compressed index read_ops: List[Tuple[int, Dict[str, int]]] = [] missing_tiles = 0 for tile_id in tile_ids: - tile_tuple = index.get(tile_id) + # Ensure tile_id is a Python int (not numpy.int64) + tile_id_int = int(tile_id) + tile_tuple = index.get(tile_id_int) if tile_tuple: offset, size = tile_tuple - read_ops.append((tile_id, {"offset": offset, "size": size})) + read_ops.append((tile_id_int, {"offset": offset, "size": size})) else: missing_tiles += 1 if missing_tiles > 0: - logger.warning(f">>> {missing_tiles} of {len(tile_ids)} tiles missing from index for mag {mag_min}-{mag_max}") + logger.debug(f"{missing_tiles} of {len(tile_ids)} tiles missing from index for mag {mag_min}-{mag_max}") if not read_ops: - logger.debug(f">>> No tiles to load (all {len(tile_ids)} requested tiles are empty)") + logger.debug(f"No tiles to load (all {len(tile_ids)} requested tiles are empty)") return np.empty((0, 3)) # Sort by offset to minimize seeks read_ops.sort(key=lambda x: x[1]["offset"]) t_readops = (time.time() - t_readops_start) * 1000 - logger.debug(f">>> Built {len(read_ops)} read_ops in {t_readops:.1f}ms") + logger.debug(f"Built {len(read_ops)} read_ops in {t_readops:.1f}ms") # Read data in larger sequential chunks when possible MAX_GAP = 100 * 1024 # 100KB gap tolerance @@ -1183,7 +1188,7 @@ def _load_tiles_batch_single_band( t_io_start = time.time() t_decode_total = 0.0 bytes_read = 0 - logger.info(f">>> Batch loading {len(read_ops)} tiles for mag {mag_min}-{mag_max}") + logger.debug(f"Batch loading {len(read_ops)} tiles for mag {mag_min}-{mag_max}") with open(tiles_file, "rb") as f: i = 0 chunk_num = 0 @@ -1265,11 +1270,9 @@ def _load_tiles_batch_single_band( # Log performance breakdown t_io_total = (time.time() - t_io_start) * 1000 - logger.info( - f">>> Tile I/O performance for mag {mag_min}-{mag_max}: " - f"total={t_io_total:.1f}ms, " - f"decode={t_decode_total*1000:.1f}ms, concat={t_concat:.1f}ms, pm={t_pm:.1f}ms, " - f"bytes={bytes_read/1024:.1f}KB, stars={len(result)}" + logger.debug( + f"Tile I/O for mag {mag_min}-{mag_max}: " + f"{t_io_total:.1f}ms, {len(result)} stars, {bytes_read/1024:.1f}KB" ) return result diff --git a/python/PiFinder/state.py b/python/PiFinder/state.py index 6fde5cef3..956153334 100644 --- a/python/PiFinder/state.py +++ b/python/PiFinder/state.py @@ -244,7 +244,7 @@ def from_json(cls, json_str): class SharedStateObj: - def __init__(self): + def __init__(self) -> None: self.__power_state = 1 self.__solve_state = None self.__ui_state = None diff --git a/python/PiFinder/ui/menu_structure.py b/python/PiFinder/ui/menu_structure.py index b10fa85b5..eef91a284 100644 --- a/python/PiFinder/ui/menu_structure.py +++ b/python/PiFinder/ui/menu_structure.py @@ -849,6 +849,10 @@ def _(key: str) -> Any: "name": _("Pulse"), "value": "pulse", }, + { + "name": _("Fade"), + "value": "fade", + }, ], }, { @@ -884,6 +888,31 @@ def _(key: str) -> Any: }, ], }, + { + "name": _("Speed"), + "class": UITextMenu, + "select": "single", + "label": "obj_chart_speed", + "config_option": "obj_chart_crosshair_speed", + "items": [ + { + "name": _("Fast (1s)"), + "value": "1.0", + }, + { + "name": _("Medium (2s)"), + "value": "2.0", + }, + { + "name": _("Slow (3s)"), + "value": "3.0", + }, + { + "name": _("Very Slow (4s)"), + "value": "4.0", + }, + ], + }, { "name": _("Set LM"), "class": UITextMenu, diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 9f10d4df9..11ebf2987 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -23,6 +23,7 @@ import functools from PiFinder.db.observations_db import ObservationsDatabase +from PIL import Image, ImageDraw, ImageChops import logging import numpy as np import time @@ -37,6 +38,64 @@ DM_CHART = 4 # Display mode for deep chart +class EyepieceInput: + """ + Handles custom eyepiece focal length input (1-99mm) + """ + + def __init__(self): + self.focal_length_mm = 0 + self.digits = [] + self.last_input_time = 0 + + def append_digit(self, digit: int) -> bool: + """ + Append a digit to the input. + Returns True if input is complete (2 digits or auto-timeout) + """ + import time + + self.digits.append(digit) + self.last_input_time = time.time() + + # Update focal length + if len(self.digits) == 1: + self.focal_length_mm = digit + else: + self.focal_length_mm = self.digits[0] * 10 + self.digits[1] + + # Auto-complete after 2 digits + return len(self.digits) >= 2 + + def is_complete(self) -> bool: + """Check if input has timed out (1.5 seconds)""" + import time + if len(self.digits) == 0: + return False + if len(self.digits) >= 2: + return True + return time.time() - self.last_input_time > 1.5 + + def reset(self): + """Clear the input""" + self.digits = [] + self.focal_length_mm = 0 + self.last_input_time = 0 + + def has_input(self) -> bool: + """Check if any digits have been entered""" + return len(self.digits) > 0 + + def __str__(self): + """Return display string for popup""" + if len(self.digits) == 0: + return "__" + elif len(self.digits) == 1: + return f"{self.digits[0]}_" + else: + return f"{self.digits[0]}{self.digits[1]}" + + class UIObjectDetails(UIModule): """ Shows details about an object @@ -59,7 +118,9 @@ def __init__(self, *args, **kwargs): self._chart_generator = None # Active generator for progressive chart updates self._is_showing_loading_chart = False # Track if showing "Loading..." for deep chart self._force_deep_chart = False # Toggle: force deep chart even if POSS image exists - self._force_deep_chart = False # Toggle: force deep chart even if POSS image exists + self.eyepiece_input = EyepieceInput() # Custom eyepiece input handler + self.eyepiece_input_display = False # Show eyepiece input popup + self._custom_eyepiece = None # Reference to custom eyepiece object in equipment list (None = not active) # Default Marking Menu self._default_marking_menu = MarkingMenu( @@ -249,7 +310,16 @@ def update_object_info(self): if solution: roll = solution["Roll"] + # Calculate magnification and TFOV using current active eyepiece (custom or configured) magnification = self.config_object.equipment.calc_magnification() + tfov = self.config_object.equipment.calc_tfov() + eyepiece_text = str(self.config_object.equipment.active_eyepiece) + + if self._custom_eyepiece is not None: + logger.info(f">>> Using custom eyepiece: {eyepiece_text}, tfov={tfov}, mag={magnification}") + else: + logger.info(f">>> Using configured eyepiece: {eyepiece_text}, tfov={tfov}, mag={magnification}") + prev_object_image = self.object_image # Get or create chart generator (owned by UI layer, not cat_images) @@ -262,8 +332,8 @@ def update_object_info(self): # get_display_image returns either an image directly (POSS) or a generator (deep chart) result = cat_images.get_display_image( self.object, - str(self.config_object.equipment.active_eyepiece), - self.config_object.equipment.calc_tfov(), + eyepiece_text, + tfov, roll, self.display_class, burn_in=self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART], @@ -335,8 +405,9 @@ def _get_pulse_factor(self): import time import numpy as np - # Pulsate: full cycle every 2 seconds - pulse_period = 2.0 # seconds + # Get pulse period from config (default 2.0 seconds) + pulse_period = float(self.config_object.get_option("obj_chart_crosshair_speed", "2.0")) + t = time.time() % pulse_period # Sine wave for smooth pulsation (0.0 to 1.0 range) pulse_factor = 0.5 + 0.5 * np.sin(2 * np.pi * t / pulse_period) @@ -349,27 +420,50 @@ def _get_pulse_factor(self): return pulse_factor, size_multiplier, color_intensity - def _draw_crosshair_simple(self, pulse=False): + def _get_fade_factor(self): + """ + Calculate current fade factor for animations + Returns color_intensity that fades from 0 to 128 + - Crosshair stays at minimum size + - Only brightness changes + """ + import time + import numpy as np + + # Get fade period from config (default 2.0 seconds) + fade_period = float(self.config_object.get_option("obj_chart_crosshair_speed", "2.0")) + + t = time.time() % fade_period + # Sine wave for smooth fading (0.0 to 1.0 range) + fade_factor = 0.5 + 0.5 * np.sin(2 * np.pi * t / fade_period) + + # Color intensity: 0 to 128 (fade from invisible to half brightness) + # Use round instead of int for better distribution + color_intensity = round(128 * fade_factor) + + return color_intensity + + def _draw_crosshair_simple(self, mode="off"): """ Draw simple crosshair with 4 lines and center gap using inverted pixels Args: - pulse: If True, apply pulsation effect + mode: Animation mode - "off", "pulse", or "fade" (fade not supported for inverted pixels) """ import numpy as np width, height = self.display_class.resolution cx, cy = int(width / 2.0), int(height / 2.0) - if pulse: - _, size_mult, _ = self._get_pulse_factor() - # Size pulsates from 6 down to 3 pixels (inverted - more steps) - outer = int(6.0 - (3.0 * size_mult)) # 6.0 down to 3.0 (more visible integer steps) + if mode == "pulse": + pulse_factor, _, _ = self._get_pulse_factor() + # Size pulsates from 7 down to 4 pixels (inverted - more steps) + outer = int(7.0 - (3.0 * pulse_factor)) # 7.0 down to 4.0 (smooth animation) else: - # Fixed size - outer = 4 + # Fixed size (fade mode not supported for inverted pixels) + outer = 5 - inner = 2 # Fixed gap (small center) + inner = 3 # Fixed gap (slightly larger center hole) # Get screen buffer as numpy array for pixel manipulation pixels = np.array(self.screen) @@ -379,7 +473,7 @@ def _draw_crosshair_simple(self, pulse=False): for x in range(max(0, cx - outer), max(0, cx - inner)): if 0 <= x < width and 0 <= cy < height: pixels[cy, x, 0] = 255 - pixels[cy, x, 0] - for x in range(min(width, cx + inner), min(width, cx + outer + 1)): + for x in range(min(width, cx + inner), min(width, cx + outer)): if 0 <= x < width and 0 <= cy < height: pixels[cy, x, 0] = 255 - pixels[cy, x, 0] @@ -387,121 +481,161 @@ def _draw_crosshair_simple(self, pulse=False): for y in range(max(0, cy - outer), max(0, cy - inner)): if 0 <= y < height and 0 <= cx < width: pixels[y, cx, 0] = 255 - pixels[y, cx, 0] - for y in range(min(height, cy + inner), min(height, cy + outer + 1)): + for y in range(min(height, cy + inner), min(height, cy + outer)): if 0 <= y < height and 0 <= cx < width: pixels[y, cx, 0] = 255 - pixels[y, cx, 0] # Update screen buffer with inverted pixels - from PIL import Image self.screen = Image.fromarray(pixels, mode="RGB") # Re-create draw object since we replaced the image - from PIL import ImageDraw self.draw = ImageDraw.Draw(self.screen) - def _draw_crosshair_circle(self, pulse=False): + def _draw_crosshair_circle(self, mode="off"): """ Draw circle reticle Args: - pulse: If True, apply pulsation effect + mode: Animation mode - "off", "pulse", or "fade" """ width, height = self.display_class.resolution cx, cy = width / 2.0, height / 2.0 - if pulse: - _, size_mult, color_intensity = self._get_pulse_factor() - radius = 8.0 - (4.0 * size_mult) # 8.0 down to 4.0 (more steps) - marker_color = self.colors.get(color_intensity) + if mode == "pulse": + pulse_factor, _, color_intensity = self._get_pulse_factor() + radius = 8.0 - (4.0 * pulse_factor) # 8.0 down to 4.0 (smooth animation) + elif mode == "fade": + color_intensity = self._get_fade_factor() + radius = 4 # Fixed minimum size else: + color_intensity = 64 radius = 4 # Smaller fixed size - marker_color = self.colors.get(64) - # Draw circle + # Create a separate layer for the crosshair + crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) + crosshair_draw = ImageDraw.Draw(crosshair_layer) + + # Draw circle on the layer + marker_color = (color_intensity, 0, 0) bbox = [cx - radius, cy - radius, cx + radius, cy + radius] - self.draw.ellipse(bbox, outline=marker_color, width=1) + crosshair_draw.ellipse(bbox, outline=marker_color, width=1) - # Small center dot - self.draw.ellipse([cx - 1, cy - 1, cx + 1, cy + 1], fill=marker_color) + # Use lighten blend: take the lighter of the two values for each pixel + self.screen = ImageChops.lighter(self.screen, crosshair_layer) + self.draw = ImageDraw.Draw(self.screen) - def _draw_crosshair_bullseye(self, pulse=False): + def _draw_crosshair_bullseye(self, mode="off"): """ Draw concentric circles (bullseye) Args: - pulse: If True, apply pulsation effect + mode: Animation mode - "off", "pulse", or "fade" """ width, height = self.display_class.resolution cx, cy = width / 2.0, height / 2.0 - if pulse: - _, size_mult, color_intensity = self._get_pulse_factor() - marker_color = self.colors.get(color_intensity) - # Pulsate from larger to smaller (more visible steps) - radii = [4.0 - (2.0 * size_mult), 8.0 - (4.0 * size_mult), 12.0 - (6.0 * size_mult)] # 4→2, 8→4, 12→6 + if mode == "pulse": + pulse_factor, _, color_intensity = self._get_pulse_factor() + # Pulsate from larger to smaller (smooth animation) + radii = [4.0 - (2.0 * pulse_factor), 8.0 - (4.0 * pulse_factor), 12.0 - (6.0 * pulse_factor)] # 4→2, 8→4, 12→6 + elif mode == "fade": + color_intensity = self._get_fade_factor() + radii = [2, 4, 6] # Fixed minimum radii else: - marker_color = self.colors.get(64) + color_intensity = 64 radii = [2, 4, 6] # Smaller fixed radii - # Draw concentric circles + # Create a separate layer for the crosshair + crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) + crosshair_draw = ImageDraw.Draw(crosshair_layer) + + # Draw concentric circles on the layer + marker_color = (color_intensity, 0, 0) for radius in radii: bbox = [cx - radius, cy - radius, cx + radius, cy + radius] - self.draw.ellipse(bbox, outline=marker_color, width=1) + crosshair_draw.ellipse(bbox, outline=marker_color, width=1) + + # Use lighten blend + self.screen = ImageChops.lighter(self.screen, crosshair_layer) + self.draw = ImageDraw.Draw(self.screen) - def _draw_crosshair_brackets(self, pulse=False): + def _draw_crosshair_brackets(self, mode="off"): """ Draw corner brackets (frame corners) Args: - pulse: If True, apply pulsation effect + mode: Animation mode - "off", "pulse", or "fade" """ width, height = self.display_class.resolution - cx, cy = width / 2.0, height / 2.0 + cx, cy = int(width / 2.0), int(height / 2.0) - if pulse: - _, size_mult, color_intensity = self._get_pulse_factor() - size = 8.0 - (4.0 * size_mult) # 8.0 down to 4.0 (more steps) - length = 5.0 - (2.0 * size_mult) # 5.0 down to 3.0 (more steps) - marker_color = self.colors.get(color_intensity) + if mode == "pulse": + pulse_factor, _, color_intensity = self._get_pulse_factor() + size = int(8.0 - (4.0 * pulse_factor)) # 8.0 down to 4.0 (smooth animation) + length = int(5.0 - (2.0 * pulse_factor)) # 5.0 down to 3.0 (smooth animation) + elif mode == "fade": + color_intensity = self._get_fade_factor() + size = 4 # Fixed minimum size + length = 3 # Fixed minimum length else: + color_intensity = 64 size = 4 # Smaller distance from center to bracket corner length = 3 # Shorter bracket arms - marker_color = self.colors.get(64) + # Create a separate layer for the crosshair + crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) + crosshair_draw = ImageDraw.Draw(crosshair_layer) + + marker_color = (color_intensity, 0, 0) + + # Draw brackets on the layer # Top-left bracket - self.draw.line([cx - size, cy - size, cx - size + length, cy - size], fill=marker_color, width=1) - self.draw.line([cx - size, cy - size, cx - size, cy - size + length], fill=marker_color, width=1) + crosshair_draw.line([cx - size, cy - size, cx - size + length, cy - size], fill=marker_color, width=1) + crosshair_draw.line([cx - size, cy - size, cx - size, cy - size + length], fill=marker_color, width=1) # Top-right bracket - self.draw.line([cx + size, cy - size, cx + size - length, cy - size], fill=marker_color, width=1) - self.draw.line([cx + size, cy - size, cx + size, cy - size + length], fill=marker_color, width=1) + crosshair_draw.line([cx + size - length, cy - size, cx + size, cy - size], fill=marker_color, width=1) + crosshair_draw.line([cx + size, cy - size, cx + size, cy - size + length], fill=marker_color, width=1) # Bottom-left bracket - self.draw.line([cx - size, cy + size, cx - size + length, cy + size], fill=marker_color, width=1) - self.draw.line([cx - size, cy + size, cx - size, cy + size - length], fill=marker_color, width=1) + crosshair_draw.line([cx - size, cy + size, cx - size + length, cy + size], fill=marker_color, width=1) + crosshair_draw.line([cx - size, cy + size - length, cx - size, cy + size], fill=marker_color, width=1) # Bottom-right bracket - self.draw.line([cx + size, cy + size, cx + size - length, cy + size], fill=marker_color, width=1) - self.draw.line([cx + size, cy + size, cx + size, cy + size - length], fill=marker_color, width=1) + crosshair_draw.line([cx + size - length, cy + size, cx + size, cy + size], fill=marker_color, width=1) + crosshair_draw.line([cx + size, cy + size - length, cx + size, cy + size], fill=marker_color, width=1) + + # Use lighten blend + self.screen = ImageChops.lighter(self.screen, crosshair_layer) + self.draw = ImageDraw.Draw(self.screen) - def _draw_crosshair_dots(self, pulse=False): + def _draw_crosshair_dots(self, mode="off"): """ Draw four corner dots Args: - pulse: If True, apply pulsation effect + mode: Animation mode - "off", "pulse", or "fade" """ width, height = self.display_class.resolution cx, cy = width / 2.0, height / 2.0 - if pulse: - _, size_mult, color_intensity = self._get_pulse_factor() - distance = 8.0 - (4.0 * size_mult) # 8.0 down to 4.0 (more steps) - dot_size = 3.0 - (1.5 * size_mult) # 3.0 down to 1.5 (more steps) - marker_color = self.colors.get(color_intensity) + if mode == "pulse": + pulse_factor, _, color_intensity = self._get_pulse_factor() + distance = 8.0 - (4.0 * pulse_factor) # 8 down to 4 (smooth animation) + dot_size = 3.0 - (1.5 * pulse_factor) # 3 down to 1 (smooth animation) + elif mode == "fade": + color_intensity = self._get_fade_factor() + distance = 4 # Fixed minimum distance + dot_size = 1 # Fixed minimum size else: + color_intensity = 64 distance = 4 # Smaller distance from center to dots dot_size = 1 # Smaller dot radius - marker_color = self.colors.get(64) + + # Create a separate layer for the crosshair + crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) + crosshair_draw = ImageDraw.Draw(crosshair_layer) + + marker_color = (color_intensity, 0, 0) # Four corner dots positions = [ @@ -513,28 +647,43 @@ def _draw_crosshair_dots(self, pulse=False): for x, y in positions: bbox = [x - dot_size, y - dot_size, x + dot_size, y + dot_size] - self.draw.ellipse(bbox, fill=marker_color) + crosshair_draw.ellipse(bbox, fill=marker_color) - def _draw_crosshair_cross(self, pulse=False): + # Use lighten blend + self.screen = ImageChops.lighter(self.screen, crosshair_layer) + self.draw = ImageDraw.Draw(self.screen) + + def _draw_crosshair_cross(self, mode="off"): """ Draw full cross (lines extend across entire screen) Args: - pulse: If True, apply pulsation effect + mode: Animation mode - "off", "pulse", or "fade" """ width, height = self.display_class.resolution cx, cy = width / 2.0, height / 2.0 - if pulse: - _, size_mult, color_intensity = self._get_pulse_factor() - marker_color = self.colors.get(color_intensity) + if mode == "pulse": + pulse_factor, _, color_intensity = self._get_pulse_factor() + elif mode == "fade": + color_intensity = self._get_fade_factor() else: - marker_color = self.colors.get(64) + color_intensity = 64 + + # Create a separate layer for the crosshair + crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) + crosshair_draw = ImageDraw.Draw(crosshair_layer) + + marker_color = (color_intensity, 0, 0) # Horizontal line - self.draw.line([0, cy, width, cy], fill=marker_color, width=1) + crosshair_draw.line([0, cy, width, cy], fill=marker_color, width=1) # Vertical line - self.draw.line([cx, 0, cx, height], fill=marker_color, width=1) + crosshair_draw.line([cx, 0, cx, height], fill=marker_color, width=1) + + # Use lighten blend + self.screen = ImageChops.lighter(self.screen, crosshair_layer) + self.draw = ImageDraw.Draw(self.screen) def _draw_fov_circle(self): """ @@ -702,11 +851,53 @@ def _get_chart_generator(self): logger.info(f">>> _get_chart_generator returning: {chart_gen}") return chart_gen + def _apply_custom_eyepiece(self): + """Apply the custom eyepiece focal length and update display""" + from PiFinder.equipment import Eyepiece + + # Capture the focal length before resetting + focal_length = self.eyepiece_input.focal_length_mm + + # Reset input state FIRST to prevent recursion in update() + self.eyepiece_input.reset() + self.eyepiece_input_display = False + + # Apply the custom eyepiece + if focal_length > 0: + logger.info(f">>> Applying custom eyepiece: {focal_length}mm") + + # Remove old custom eyepiece if it exists + if self._custom_eyepiece is not None and self._custom_eyepiece in self.config_object.equipment.eyepieces: + logger.info(f">>> Removing old custom eyepiece: {self._custom_eyepiece}") + self.config_object.equipment.eyepieces.remove(self._custom_eyepiece) + + # Create and add new custom eyepiece + self._custom_eyepiece = Eyepiece( + make="Custom", + name=f"{focal_length}mm", + focal_length_mm=focal_length, + afov=50, # Default AFOV for custom eyepiece + field_stop=0 + ) + self.config_object.equipment.eyepieces.append(self._custom_eyepiece) + self.config_object.equipment.active_eyepiece_index = len(self.config_object.equipment.eyepieces) - 1 + logger.info(f">>> Added custom eyepiece to equipment list: {self._custom_eyepiece}") + + self.update_object_info() + self.update() + else: + logger.warning(f">>> Invalid focal length: {focal_length}mm, not applying") + def update(self, force=True): import logging import time logger = logging.getLogger("ObjectDetails") + # Check for eyepiece input timeout + if self.eyepiece_input_display and self.eyepiece_input.is_complete(): + # Auto-complete the input + self._apply_custom_eyepiece() + # If we have a chart generator, consume one yield to get the next progressive update if hasattr(self, '_chart_generator') and self._chart_generator is not None: try: @@ -781,9 +972,6 @@ def update(self, force=True): crosshair_style = self.config_object.get_option("obj_chart_crosshair_style") if crosshair_mode != "off": - # Determine if we should pulse - pulse = (crosshair_mode == "pulse") - # Call the appropriate drawing method based on style style_methods = { "simple": self._draw_crosshair_simple, @@ -795,7 +983,15 @@ def update(self, force=True): } draw_method = style_methods.get(crosshair_style, self._draw_crosshair_simple) - draw_method(pulse=pulse) + draw_method(mode=crosshair_mode) + + # Force continuous updates for animated crosshairs + if crosshair_mode in ["pulse", "fade"]: + force = True + else: + # Clear screen when not in image modes (DESC, LOCATE) + self.screen = Image.new("RGB", self.display_class.resolution) + self.draw = ImageDraw.Draw(self.screen, mode="RGBA") if self.object_display_mode == DM_DESC or self.object_display_mode == DM_LOCATE: # catalog and entry field i.e. NGC-311 @@ -847,6 +1043,14 @@ def update(self, force=True): desc.set_available_lines(desc_available_lines) desc.draw((0, posy)) + # Display eyepiece input popup if active + if self.eyepiece_input_display: + self.message( + f"{str(self.eyepiece_input)}mm", + 0.1, + [30, 10, 93, 40], + ) + result = self.screen_update() return result @@ -956,9 +1160,14 @@ def key_left(self): def key_right(self): """ - When right is pressed, move to - logging screen + When right is pressed, move to logging screen + Or, if eyepiece input is active, complete the input """ + # If eyepiece input is active, complete it + if self.eyepiece_input_display: + self._apply_custom_eyepiece() + return True + self.maybe_add_to_recents() if self.shared_state.solution() is None: return @@ -970,7 +1179,56 @@ def key_right(self): self.add_to_stack(object_item_definition) def change_fov(self, direction): - self.config_object.equipment.cycle_eyepieces(direction) + """ + Change field of view by cycling eyepieces. + If a custom eyepiece is active, jump to the nearest configured eyepiece and remove custom. + """ + if self._custom_eyepiece is not None: + # Custom eyepiece is active - remove it and find nearest configured eyepiece + logger.info(f">>> Custom eyepiece active, switching to configured eyepieces") + custom_focal_length = self._custom_eyepiece.focal_length_mm + + # Remove custom eyepiece from equipment list + if self._custom_eyepiece in self.config_object.equipment.eyepieces: + self.config_object.equipment.eyepieces.remove(self._custom_eyepiece) + self._custom_eyepiece = None + + # Get configured eyepieces (now that custom is removed) + eyepieces = self.config_object.equipment.eyepieces + if not eyepieces: + return + + # Sort eyepieces by focal length + sorted_eyepieces = sorted(eyepieces, key=lambda e: e.focal_length_mm) + + if direction > 0: + # Find next larger eyepiece (smaller magnification) + for ep in sorted_eyepieces: + if ep.focal_length_mm > custom_focal_length: + self.config_object.equipment.active_eyepiece_index = eyepieces.index(ep) + logger.info(f">>> Jumped to next larger: {ep}") + break + else: + # No larger eyepiece found, wrap to smallest + self.config_object.equipment.active_eyepiece_index = eyepieces.index(sorted_eyepieces[0]) + logger.info(f">>> Wrapped to smallest: {sorted_eyepieces[0]}") + else: + # Find next smaller eyepiece (larger magnification) + for i in range(len(sorted_eyepieces) - 1, -1, -1): + ep = sorted_eyepieces[i] + if ep.focal_length_mm < custom_focal_length: + self.config_object.equipment.active_eyepiece_index = eyepieces.index(ep) + logger.info(f">>> Jumped to next smaller: {ep}") + break + else: + # No smaller eyepiece found, wrap to largest + self.config_object.equipment.active_eyepiece_index = eyepieces.index(sorted_eyepieces[-1]) + logger.info(f">>> Wrapped to largest: {sorted_eyepieces[-1]}") + else: + # Normal eyepiece cycling + self.config_object.equipment.cycle_eyepieces(direction) + logger.info(f">>> Normal cycle to: {self.config_object.equipment.active_eyepiece}") + self.update_object_info() self.update() @@ -995,10 +1253,19 @@ def key_minus(self): def key_number(self, number): """ Handle number key presses - 0: Toggle between POSS image and deep chart (when both are available) + When viewing image (DM_POSS/DM_CHART): + - 0: Toggle between POSS image and deep chart (only if no input active) + - 1-9: Start custom eyepiece input + - After first digit, 0-9 adds second digit or completes input """ logger.info(f">>> key_number({number}) called") - if number == 0: + + # Only handle custom eyepiece input in image display modes + if self.object_display_mode not in [DM_POSS, DM_SDSS, DM_CHART]: + return + + # Special case: 0 when no input is active toggles POSS/chart + if number == 0 and not self.eyepiece_input_display: logger.info(f">>> Toggling _force_deep_chart (was: {self._force_deep_chart})") # Toggle the flag self._force_deep_chart = not self._force_deep_chart @@ -1021,3 +1288,21 @@ def key_number(self, number): logger.info(f">>> update() returned: {type(update_result)}") logger.info(">>> key_number(0) complete") return True + + # Handle custom eyepiece input (1-9 to start, 0-9 for second digit) + if number >= 1 or (number == 0 and self.eyepiece_input_display): + logger.info(f">>> Adding digit {number} to eyepiece input") + is_complete = self.eyepiece_input.append_digit(number) + self.eyepiece_input_display = True + logger.info(f">>> After adding digit: focal_length={self.eyepiece_input.focal_length_mm}mm, complete={is_complete}, display='{self.eyepiece_input}'") + + if is_complete: + # Two digits entered, apply immediately + logger.info(f">>> Input complete, applying {self.eyepiece_input.focal_length_mm}mm") + self._apply_custom_eyepiece() + else: + # Show popup with current input + logger.info(f">>> Input incomplete, showing popup") + self.update() + + return True diff --git a/python/PiFinder/ui/sqm_calibration.py b/python/PiFinder/ui/sqm_calibration.py index 2fb92e92f..6f72351e9 100644 --- a/python/PiFinder/ui/sqm_calibration.py +++ b/python/PiFinder/ui/sqm_calibration.py @@ -49,7 +49,7 @@ class UISQMCalibration(UIModule): __title__ = "SQM CAL" __help_name__ = "" - def __init__(self, *args, **kwargs): + def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # Wizard state machine From 3f51295b20cea478f568f0bb99933e4984d0647a Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Thu, 27 Nov 2025 21:55:18 +0100 Subject: [PATCH 25/27] Refactoring --- python/PiFinder/audit_images.py | 5 +- python/PiFinder/cat_images.py | 315 ------------------ python/PiFinder/get_images.py | 12 +- python/PiFinder/main.py | 6 +- python/PiFinder/object_images/__init__.py | 65 ++++ .../PiFinder/object_images/chart_provider.py | 134 ++++++++ .../gaia_chart.py} | 151 +++++++-- python/PiFinder/object_images/image_base.py | 72 ++++ .../{ => object_images}/image_utils.py | 138 +++++++- .../PiFinder/object_images/poss_provider.py | 175 ++++++++++ .../{ => object_images}/star_catalog.py | 0 python/PiFinder/ui/log.py | 8 +- python/PiFinder/ui/object_details.py | 117 +++---- python/tests/test_limiting_magnitude.py | 30 +- python/tests/test_star_catalog.py | 2 +- 15 files changed, 772 insertions(+), 458 deletions(-) delete mode 100644 python/PiFinder/cat_images.py create mode 100644 python/PiFinder/object_images/__init__.py create mode 100644 python/PiFinder/object_images/chart_provider.py rename python/PiFinder/{deep_chart.py => object_images/gaia_chart.py} (86%) create mode 100644 python/PiFinder/object_images/image_base.py rename python/PiFinder/{ => object_images}/image_utils.py (58%) create mode 100644 python/PiFinder/object_images/poss_provider.py rename python/PiFinder/{ => object_images}/star_catalog.py (100%) diff --git a/python/PiFinder/audit_images.py b/python/PiFinder/audit_images.py index ef37fdb70..4c59f66f1 100644 --- a/python/PiFinder/audit_images.py +++ b/python/PiFinder/audit_images.py @@ -1,4 +1,5 @@ #!/usr/bin/python +from PiFinder.object_images.poss_provider import POSSImageProvider # -*- coding:utf-8 -*- # mypy: ignore-errors """ @@ -10,7 +11,7 @@ import sqlite3 from tqdm import tqdm -from PiFinder import cat_images +from PiFinder.object_images import get_display_image def get_catalog_objects(): @@ -59,7 +60,7 @@ def check_object_image(catalog_object): if aka_sequence: catalog_object = {"catalog": "NGC", "sequence": aka_sequence} - object_image_path = cat_images.resolve_image_name(catalog_object, "POSS") + object_image_path = POSSImageProvider()._resolve_image_name(catalog_object, "POSS") # POSS image_name = object_image_path.split("/")[-1] seq_ones = image_name.split("_")[0][-1] diff --git a/python/PiFinder/cat_images.py b/python/PiFinder/cat_images.py deleted file mode 100644 index a4693cc6c..000000000 --- a/python/PiFinder/cat_images.py +++ /dev/null @@ -1,315 +0,0 @@ -#!/usr/bin/python -# -*- coding:utf-8 -*- -""" -This module is used at runtime -to handle catalog image loading -""" - -import os -from PIL import Image, ImageChops, ImageDraw -from PiFinder import image_util -from PiFinder import utils -import PiFinder.ui.ui_utils as ui_utils -import logging - -BASE_IMAGE_PATH = f"{utils.data_dir}/catalog_images" -CATALOG_PATH = f"{utils.astro_data_dir}/pifinder_objects.db" - - -logger = logging.getLogger("Catalog.Images") - - -def get_display_image( - catalog_object, - eyepiece_text, - fov, - roll, - display_class, - burn_in=True, - magnification=None, - config_object=None, - shared_state=None, - chart_generator=None, # Pass in from UI layer instead of creating here - force_deep_chart=False, # Toggle: force deep chart even if POSS image exists -): - """ - Returns a 128x128 image buffer for - the catalog object/source - Resizing/cropping as needed to achieve FOV - in degrees - fov: 1-.125 - roll: - degrees - config_object: - Required for deep chart generation - shared_state: - Required for deep chart generation - """ - - logger.debug(f">>> get_display_image() called for {catalog_object.display_name if catalog_object else 'None'}") - logger.debug(f">>> force_deep_chart={force_deep_chart}, chart_generator={chart_generator is not None}") - - object_image_path = resolve_image_name(catalog_object, source="POSS") - logger.debug(f">>> POSS image path: {object_image_path}, exists: {os.path.exists(object_image_path)}") - - # If force_deep_chart is True, skip POSS image even if it exists - if force_deep_chart or not os.path.exists(object_image_path): - logger.debug(f">>> Will use deep chart (force={force_deep_chart}, poss_missing={not os.path.exists(object_image_path)})") - # Try to generate deep chart if catalog available - return_image = None - - if config_object and shared_state: - from pathlib import Path - from PiFinder import utils - - deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") - - logger.debug(f">>> Deep chart request: chart_generator={chart_generator is not None}, catalog_exists={deep_catalog_path.exists()}, path={deep_catalog_path}") - - # Try to generate deep chart if chart_generator was passed in - if chart_generator is not None and deep_catalog_path.exists(): - logger.debug(">>> chart_generator and deep catalog available, generating chart...") - try: - from PiFinder.image_utils import create_loading_image - - # Ensure catalog loading started - logger.debug(">>> Calling chart_generator.ensure_catalog_loading()...") - chart_generator.ensure_catalog_loading() - logger.debug(f">>> Catalog state: {chart_generator.get_catalog_state()}") - - # RETURN THE GENERATOR ITSELF - don't consume it here! - # The UI will consume yields and update display for each one - logger.debug(">>> Returning chart generator (not consuming yields here)") - - # Create generator that yields converted images - def chart_image_generator(): - for image in chart_generator.generate_chart( - catalog_object, - (display_class.fov_res, display_class.fov_res), - burn_in=burn_in, - display_class=display_class, - roll=roll - ): - if image is None: - # Catalog not ready yet, show "Loading..." with progress - if chart_generator.catalog: - progress_text = chart_generator.catalog.load_progress - progress_percent = chart_generator.catalog.load_percent - else: - progress_text = "Initializing..." - progress_percent = 0 - - loading_image = create_loading_image( - display_class, - message="Loading Chart...", - progress_text=progress_text, - progress_percent=progress_percent - ) - loading_image.is_loading_placeholder = True - yield loading_image - else: - # Convert chart to red and yield it - red_image = ImageChops.multiply( - image.convert("RGB"), - display_class.colors.red_image - ) - red_image.is_loading_placeholder = False - yield red_image - - return chart_image_generator() - - # OLD CODE BELOW - never reached - chart_image = None - if chart_image is None: - logger.info(">>> Chart is None, creating loading placeholder...") - # Catalog not ready yet, show "Loading..." with progress - if chart_generator.catalog: - progress_text = chart_generator.catalog.load_progress - progress_percent = chart_generator.catalog.load_percent - else: - progress_text = "Initializing..." - progress_percent = 0 - - return_image = create_loading_image( - display_class, - message="Loading Chart...", - progress_text=progress_text, - progress_percent=progress_percent - ) - # Mark image as "loading" so UI knows to refresh - return_image.is_loading_placeholder = True - logger.info(f">>> Returning loading placeholder: {type(return_image)}") - else: - logger.info(">>> Chart ready, converting to red...") - # Chart ready, convert to red - return_image = ImageChops.multiply( - chart_image.convert("RGB"), - display_class.colors.red_image - ) - return_image.is_loading_placeholder = False - logger.info(f">>> Returning final chart image: {type(return_image)}") - except Exception as e: - logger.error(f">>> Chart generation failed: {e}", exc_info=True) - return_image = None - else: - if chart_generator is None: - logger.warning(">>> Deep chart requested but chart_generator is None") - if not deep_catalog_path.exists(): - logger.warning(f">>> Deep star catalog not found at {deep_catalog_path}") - - # Fallback: "No Image" placeholder - if return_image is None: - logger.debug(">>> No chart generated, creating 'No Image' placeholder") - return_image = Image.new("RGB", display_class.resolution) - ri_draw = ImageDraw.Draw(return_image) - if burn_in: - ri_draw.text( - (30, 50), - _("No Image"), - font=display_class.fonts.large.font, - fill=display_class.colors.get(128), - ) - else: - logger.debug(">>> Using POSS image") - return_image = Image.open(object_image_path) - - # rotate for roll / newtonian orientation - image_rotate = 180 - if roll is not None: - image_rotate += roll - - return_image = return_image.rotate(image_rotate) - - # FOV - fov_size = int(1024 * fov / 2) - return_image = return_image.crop( - ( - 512 - fov_size, - 512 - fov_size, - 512 + fov_size, - 512 + fov_size, - ) - ) - return_image = return_image.resize( - (display_class.fov_res, display_class.fov_res), Image.LANCZOS - ) - - # RED - return_image = image_util.make_red(return_image, display_class.colors) - - if burn_in: - # circle - _circle_dim = Image.new( - "RGB", - (display_class.fov_res, display_class.fov_res), - display_class.colors.get(127), - ) - _circle_draw = ImageDraw.Draw(_circle_dim) - _circle_draw.ellipse( - [2, 2, display_class.fov_res - 2, display_class.fov_res - 2], - fill=display_class.colors.get(255), - ) - return_image = ImageChops.multiply(return_image, _circle_dim) - - ri_draw = ImageDraw.Draw(return_image) - ri_draw.ellipse( - [2, 2, display_class.fov_res - 2, display_class.fov_res - 2], - outline=display_class.colors.get(64), - width=1, - ) - - # Pad out image if needed - if display_class.fov_res != display_class.resX: - pad_image = Image.new("RGB", display_class.resolution) - pad_image.paste( - return_image, - ( - int((display_class.resX - display_class.fov_res) / 2), - 0, - ), - ) - return_image = pad_image - ri_draw = ImageDraw.Draw(return_image) - if display_class.fov_res != display_class.resY: - pad_image = Image.new("RGB", display_class.resolution) - pad_image.paste( - return_image, - ( - 0, - int((display_class.resY - display_class.fov_res) / 2), - ), - ) - return_image = pad_image - ri_draw = ImageDraw.Draw(return_image) - - if burn_in: - # Use shared overlay utility for consistency with generated charts - # Create fake eyepiece object from text if needed - from PiFinder.image_utils import add_image_overlays - - # Parse eyepiece text to get eyepiece object - # If we have config_object, use actual eyepiece - if config_object and hasattr(config_object, 'equipment'): - eyepiece_obj = config_object.equipment.active_eyepiece - else: - # Create minimal eyepiece object from text for overlay - class FakeEyepiece: - def __init__(self, text): - self.focal_length_mm = 0 - self.name = text - eyepiece_obj = FakeEyepiece(eyepiece_text) - - return_image = add_image_overlays( - return_image, - display_class, - fov, - magnification, - eyepiece_obj, - burn_in=True - ) - - logger.debug(f">>> get_display_image() RETURNING: {type(return_image)}, size={return_image.size if return_image else None}, has_is_loading={hasattr(return_image, 'is_loading_placeholder') if return_image else False}") - return return_image - - -def resolve_image_name(catalog_object, source): - """ - returns the image path for this object - """ - - def create_image_path(image_name): - last_char = str(image_name)[-1] - image = f"{BASE_IMAGE_PATH}/{last_char}/{image_name}_{source}.jpg" - exists = os.path.exists(image) - return exists, image - - # Try primary name - image_name = f"{catalog_object.catalog_code}{catalog_object.sequence}" - ok, image = create_image_path(image_name) - - if ok: - catalog_object.image_name = image - return image - - # Try alternatives - for name in catalog_object.names: - alt_image_name = f"{''.join(name.split())}" - ok, image = create_image_path(alt_image_name) - if ok: - catalog_object.image_name = image - return image - - return "" - - -def create_catalog_image_dirs(): - """ - Checks for and creates catalog_image dirs - """ - if not os.path.exists(BASE_IMAGE_PATH): - os.makedirs(BASE_IMAGE_PATH) - - for i in range(0, 10): - _image_dir = f"{BASE_IMAGE_PATH}/{i}" - if not os.path.exists(_image_dir): - os.makedirs(_image_dir) diff --git a/python/PiFinder/get_images.py b/python/PiFinder/get_images.py index 8bedfec7b..628f49033 100644 --- a/python/PiFinder/get_images.py +++ b/python/PiFinder/get_images.py @@ -1,3 +1,5 @@ +from PiFinder.object_images.poss_provider import BASE_IMAGE_PATH +from PiFinder.object_images.poss_provider import create_catalog_image_dirs #!/usr/bin/python # -*- coding:utf-8 -*- """ @@ -11,7 +13,7 @@ from concurrent.futures import ThreadPoolExecutor, as_completed from typing import List, Tuple -from PiFinder import cat_images +from PiFinder.object_images import get_display_image from PiFinder.db.objects_db import ObjectsDatabase @@ -35,7 +37,7 @@ def check_missing_images() -> List[str]: for image_name in tqdm(image_names, desc="Checking existing images"): # Check if POSS image exists (primary check) poss_path = ( - f"{cat_images.BASE_IMAGE_PATH}/{image_name[-1]}/{image_name}_POSS.jpg" + f"{BASE_IMAGE_PATH}/{image_name[-1]}/{image_name}_POSS.jpg" ) if not os.path.exists(poss_path): missing_images.append(image_name) @@ -79,7 +81,7 @@ def fetch_images_for_object( # Download POSS image poss_filename = f"{image_name}_POSS.jpg" - poss_path = f"{cat_images.BASE_IMAGE_PATH}/{seq_ones}/{poss_filename}" + poss_path = f"{BASE_IMAGE_PATH}/{seq_ones}/{poss_filename}" poss_url = f"https://ddbeeedxfpnp0.cloudfront.net/catalog_images/{seq_ones}/{poss_filename}" poss_success, poss_error = download_image_from_url(session, poss_url, poss_path) @@ -88,7 +90,7 @@ def fetch_images_for_object( # Download SDSS image sdss_filename = f"{image_name}_SDSS.jpg" - sdss_path = f"{cat_images.BASE_IMAGE_PATH}/{seq_ones}/{sdss_filename}" + sdss_path = f"{BASE_IMAGE_PATH}/{seq_ones}/{sdss_filename}" sdss_url = f"https://ddbeeedxfpnp0.cloudfront.net/catalog_images/{seq_ones}/{sdss_filename}" sdss_success, sdss_error = download_image_from_url(session, sdss_url, sdss_path) @@ -154,7 +156,7 @@ def main(): """ Main function to check for and download missing catalog images. """ - cat_images.create_catalog_image_dirs() + create_catalog_image_dirs() print("Checking for missing images...") missing_images = check_missing_images() diff --git a/python/PiFinder/main.py b/python/PiFinder/main.py index 90b1bad2e..8680f1683 100644 --- a/python/PiFinder/main.py +++ b/python/PiFinder/main.py @@ -525,8 +525,8 @@ def main( console.write(" Deep Charts") console.update() logger.info(" Initializing deep chart generator...") - from PiFinder.deep_chart import get_chart_generator - chart_gen = get_chart_generator(cfg, shared_state) + from PiFinder.object_images.gaia_chart import get_gaia_chart_generator + chart_gen = get_gaia_chart_generator(cfg, shared_state) # Trigger background loading so catalog is ready when needed chart_gen.ensure_catalog_loading() logger.info(" Deep chart background loading started") @@ -684,7 +684,7 @@ def main( pass # Deep catalog loading removed - now lazy-loads on first chart view - # (cat_images.py triggers loading when needed) + # (object_images triggers loading when needed) # ui queue try: diff --git a/python/PiFinder/object_images/__init__.py b/python/PiFinder/object_images/__init__.py new file mode 100644 index 000000000..7c9546b2c --- /dev/null +++ b/python/PiFinder/object_images/__init__.py @@ -0,0 +1,65 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +Object image providers for catalog objects + +Provides POSS survey images and generated Gaia star charts +""" + +from typing import Union, Generator +from PIL import Image +from .poss_provider import POSSImageProvider +from .chart_provider import ChartImageProvider +from .image_base import ImageProvider + + +def get_display_image( + catalog_object, + eyepiece_text, + fov, + roll, + display_class, + burn_in=True, + force_chart=False, + **kwargs +) -> Union[Image.Image, Generator]: + """ + Get display image for catalog object + + Returns POSS image if available, otherwise generated Gaia chart. + Use force_chart=True to prefer chart even if POSS exists. + + Args: + catalog_object: The astronomical object to image + eyepiece_text: Eyepiece description for overlay + fov: Field of view in degrees + roll: Rotation angle in degrees + display_class: Display configuration object + burn_in: Whether to add overlays (FOV, mag, etc.) + force_chart: Force Gaia chart even if POSS exists + **kwargs: Additional provider-specific parameters + + Returns: + PIL.Image for POSS images + Generator yielding progressive images for Gaia charts + """ + provider: ImageProvider + if force_chart: + provider = ChartImageProvider( + kwargs.get("config_object"), kwargs.get("shared_state") + ) + else: + poss = POSSImageProvider() + if poss.can_provide(catalog_object): + provider = poss + else: + provider = ChartImageProvider( + kwargs.get("config_object"), kwargs.get("shared_state") + ) + + return provider.get_image( + catalog_object, eyepiece_text, fov, roll, display_class, burn_in=burn_in, **kwargs + ) + + +__all__ = ["get_display_image", "POSSImageProvider", "ChartImageProvider"] diff --git a/python/PiFinder/object_images/chart_provider.py b/python/PiFinder/object_images/chart_provider.py new file mode 100644 index 000000000..616ce2b46 --- /dev/null +++ b/python/PiFinder/object_images/chart_provider.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +Gaia chart provider - generates star charts from Gaia catalog +""" + +from pathlib import Path +from typing import Generator +from PIL import Image, ImageChops, ImageDraw +from PiFinder import utils +from .image_base import ImageProvider, ImageType +import logging + +logger = logging.getLogger("PiFinder.ChartProvider") + + +class ChartImageProvider(ImageProvider): + """ + Provides dynamically generated Gaia star charts + + Uses the GaiaChartGenerator to create on-demand star charts + from the HEALPix-indexed deep star catalog. Returns a generator + that yields progressive updates as magnitude bands load. + """ + + def __init__(self, config_object, shared_state): + """ + Initialize chart provider + + Args: + config_object: PiFinder config object + shared_state: Shared state object + """ + self.config_object = config_object + self.shared_state = shared_state + self._chart_generator = None + + def can_provide(self, catalog_object, **kwargs) -> bool: + """ + Check if Gaia chart can be generated + + Returns True if deep star catalog exists + """ + deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") + return deep_catalog_path.exists() + + def get_image( + self, + catalog_object, + eyepiece_text, + fov, + roll, + display_class, + burn_in=True, + magnification=None, + config_object=None, + shared_state=None, + **kwargs + ) -> Generator: + """ + Generate Gaia star chart + + Yields progressive chart updates as magnitude bands load. + Each yielded image has an `is_loading_placeholder` attribute + indicating whether it's a loading screen or actual chart. + + Returns: + Generator yielding PIL.Image objects + """ + from .image_utils import create_loading_image, create_no_image_placeholder + + # Get chart generator (singleton) + if self._chart_generator is None: + from .gaia_chart import get_gaia_chart_generator + + self._chart_generator = get_gaia_chart_generator( + self.config_object, self.shared_state + ) + + deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") + + if not deep_catalog_path.exists(): + logger.warning(f"Gaia star catalog not found at {deep_catalog_path}") + placeholder = create_no_image_placeholder(display_class, burn_in=burn_in) + yield placeholder + return + + try: + # Ensure catalog loading started + logger.debug("Calling chart_generator.ensure_catalog_loading()...") + self._chart_generator.ensure_catalog_loading() + logger.debug( + f"Catalog state: {self._chart_generator.get_catalog_state()}" + ) + + # Create generator that yields converted images + for image in self._chart_generator.generate_chart( + catalog_object, + (display_class.fov_res, display_class.fov_res), + burn_in=burn_in, + display_class=display_class, + roll=roll, + ): + if image is None: + # Catalog not ready yet, show "Loading..." with progress + if self._chart_generator.catalog: + progress_text = self._chart_generator.catalog.load_progress + progress_percent = self._chart_generator.catalog.load_percent + else: + progress_text = "Initializing..." + progress_percent = 0 + + loading_image = create_loading_image( + display_class, + message="Loading...", + progress_text=progress_text, + progress_percent=progress_percent, + ) + loading_image.image_type = ImageType.LOADING + yield loading_image + else: + # Convert chart to red and yield it + red_image = ImageChops.multiply( + image.convert("RGB"), display_class.colors.red_image + ) + # Mark as Gaia chart image + red_image.image_type = ImageType.GAIA_CHART # type: ignore[attr-defined] + yield red_image + + except Exception as e: + logger.error(f"Gaia chart generation failed: {e}", exc_info=True) + placeholder = create_no_image_placeholder(display_class, burn_in=burn_in) + placeholder.image_type = ImageType.ERROR + yield placeholder diff --git a/python/PiFinder/deep_chart.py b/python/PiFinder/object_images/gaia_chart.py similarity index 86% rename from python/PiFinder/deep_chart.py rename to python/PiFinder/object_images/gaia_chart.py index d15d46973..e522b7afe 100644 --- a/python/PiFinder/deep_chart.py +++ b/python/PiFinder/object_images/gaia_chart.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding:utf-8 -*- """ -Deep star chart generator for objects without DSS/POSS images +Gaia star chart generator for objects without DSS/POSS images Generates on-demand star charts using HEALPix-indexed deep star catalog. Features: @@ -20,33 +20,34 @@ from PIL import Image, ImageDraw, ImageFont from PiFinder import utils -from PiFinder.star_catalog import CatalogState, DeepStarCatalog +from PiFinder.object_images.star_catalog import CatalogState, DeepStarCatalog +from PiFinder.object_images.image_utils import pad_to_display_resolution, add_image_overlays -logger = logging.getLogger("PiFinder.DeepChart") +logger = logging.getLogger("PiFinder.GaiaChart") # Global singleton instance to ensure same catalog across all uses -_chart_generator_instance = None +_gaia_chart_generator_instance = None -def get_chart_generator(config, shared_state): +def get_gaia_chart_generator(config, shared_state): """Get or create the global chart generator singleton""" - global _chart_generator_instance - logger.debug(f">>> get_chart_generator() called, instance exists: {_chart_generator_instance is not None}") - if _chart_generator_instance is None: - logger.info(">>> Creating new DeepChartGenerator instance...") - _chart_generator_instance = DeepChartGenerator(config, shared_state) - logger.info(f">>> DeepChartGenerator created, state: {_chart_generator_instance.get_catalog_state()}") + global _gaia_chart_generator_instance + logger.debug(f">>> get_gaia_chart_generator() called, instance exists: {_gaia_chart_generator_instance is not None}") + if _gaia_chart_generator_instance is None: + logger.info(">>> Creating new GaiaChartGenerator instance...") + _gaia_chart_generator_instance = GaiaChartGenerator(config, shared_state) + logger.info(f">>> GaiaChartGenerator created, state: {_gaia_chart_generator_instance.get_catalog_state()}") else: - logger.debug(f">>> Returning existing instance, state: {_chart_generator_instance.get_catalog_state()}") - return _chart_generator_instance + logger.debug(f">>> Returning existing instance, state: {_gaia_chart_generator_instance.get_catalog_state()}") + return _gaia_chart_generator_instance -class DeepChartGenerator: +class GaiaChartGenerator: """ Generate on-demand star charts with equipment-aware settings Usage: - gen = DeepChartGenerator(config, shared_state) + gen = GaiaChartGenerator(config, shared_state) image = gen.generate_chart(catalog_object, (128, 128), burn_in=True) """ @@ -58,7 +59,7 @@ def __init__(self, config, shared_state): config: PiFinder config object shared_state: Shared state object """ - logger.info(">>> DeepChartGenerator.__init__() called") + logger.info(">>> GaiaChartGenerator.__init__() called") self.config = config self.shared_state = shared_state self.catalog = None @@ -85,6 +86,7 @@ def ensure_catalog_loading(self): Triggers background load if needed """ logger.debug(f">>> ensure_catalog_loading() called, catalog is None: {self.catalog is None}") + if self.catalog is None: logger.info(">>> Calling initialize_catalog()...") self.initialize_catalog() @@ -154,10 +156,47 @@ def generate_chart( # Check cache cache_key = self.get_cache_key(catalog_object) if cache_key in self.chart_cache: - # Return cached base image (without crosshair) + # Return cached base image, adding overlays if needed # Crosshair will be added by add_pulsating_crosshair() each frame logger.debug(f"Chart cache HIT for {cache_key}") - yield self.chart_cache[cache_key] + cached_image = self.chart_cache[cache_key] + + # Make a copy to avoid modifying cached image + image = cached_image.copy() + + # ALWAYS pad to display resolution when display_class is provided + if display_class is not None: + image = pad_to_display_resolution(image, display_class) + + # Add overlays if burn_in requested + if burn_in and display_class is not None: + # Add FOV circle + draw = ImageDraw.Draw(image) + width, height = display_class.resolution + cx, cy = width / 2.0, height / 2.0 + radius = min(width, height) / 2.0 - 2 + marker_color = display_class.colors.get(64) + bbox = [cx - radius, cy - radius, cx + radius, cy + radius] + draw.ellipse(bbox, outline=marker_color, width=1) + + # Add text overlays + sqm = self.shared_state.sqm() + mag_limit_calculated = self.get_limiting_magnitude(sqm) + equipment = self.config.equipment + fov = equipment.calc_tfov() + mag = equipment.calc_magnification() + + image = add_image_overlays( + image, + display_class, + fov, + mag, + equipment.active_eyepiece, + burn_in=True, + limiting_magnitude=mag_limit_calculated, + ) + + yield image return # Get equipment settings @@ -218,14 +257,25 @@ def generate_chart( logger.info(f">>> Star generator iteration {iteration_count}: got {len(stars)} stars, complete={is_complete}") t_render_start = time.time() - # Render ALL stars from scratch - image = self.render_chart( + # Render ALL stars from scratch (base image without overlays) + base_image = self.render_chart( stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query ) - # Add FOV circle BEFORE text overlays so it appears behind them + # Store base image for caching (without overlays) + final_base_image = base_image + + # Make a copy for display (don't modify the base image) + display_image = base_image.copy() + + # ALWAYS pad to display resolution when display_class is provided + if display_class is not None: + display_image = pad_to_display_resolution(display_image, display_class) + + # Add overlays if burn_in requested if burn_in and display_class is not None: - draw = ImageDraw.Draw(image) + # Add FOV circle BEFORE text overlays so it appears behind them + draw = ImageDraw.Draw(display_image) width, height = display_class.resolution cx, cy = width / 2.0, height / 2.0 radius = min(width, height) / 2.0 - 2 # Leave 2 pixel margin @@ -233,12 +283,9 @@ def generate_chart( bbox = [cx - radius, cy - radius, cx + radius, cy + radius] draw.ellipse(bbox, outline=marker_color, width=1) - # Add overlays (using shared utility) - if burn_in and display_class is not None: - from PiFinder.image_utils import add_image_overlays - - image = add_image_overlays( - image, + # Add text overlays (using shared utility) + display_image = add_image_overlays( + display_image, display_class, fov, mag, @@ -253,11 +300,9 @@ def generate_chart( f"(complete={is_complete}, total_stars={len(stars)})" ) - final_image = image - - # Yield intermediate result (allows UI to update) + # Yield display image (with or without overlays) if not is_complete: - yield image + yield display_image # If complete, will yield final image after loop # Final yield with complete image @@ -266,16 +311,50 @@ def generate_chart( if iteration_count == 0: logger.warning(f">>> WARNING: Star generator yielded NO results! FOV={fov:.4f}°, center=({catalog_object.ra:.4f}, {catalog_object.dec:.4f})") + # Generate blank chart (no stars) - this is the base image + final_base_image = self.render_chart( + np.array([]).reshape(0, 3), # Empty star array + catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query + ) - # Cache result (limit cache size to 10 charts) - if final_image is not None: - self.chart_cache[cache_key] = final_image + # Cache base image (without overlays) so it can be reused + if 'final_base_image' in locals() and final_base_image is not None: + self.chart_cache[cache_key] = final_base_image if len(self.chart_cache) > 10: # Remove oldest oldest = next(iter(self.chart_cache)) del self.chart_cache[oldest] - yield final_image + # Create final display image + final_display_image = final_base_image.copy() + + # ALWAYS pad to display resolution when display_class is provided + if display_class is not None: + final_display_image = pad_to_display_resolution(final_display_image, display_class) + + # Add overlays if burn_in requested + if burn_in and display_class is not None: + # Add FOV circle + draw = ImageDraw.Draw(final_display_image) + width, height = display_class.resolution + cx, cy = width / 2.0, height / 2.0 + radius = min(width, height) / 2.0 - 2 + marker_color = display_class.colors.get(64) + bbox = [cx - radius, cy - radius, cx + radius, cy + radius] + draw.ellipse(bbox, outline=marker_color, width=1) + + # Add overlays + final_display_image = add_image_overlays( + final_display_image, + display_class, + fov, + mag, + equipment.active_eyepiece, + burn_in=True, + limiting_magnitude=mag_limit_calculated, + ) + + yield final_display_image else: yield None diff --git a/python/PiFinder/object_images/image_base.py b/python/PiFinder/object_images/image_base.py new file mode 100644 index 000000000..7acb1790a --- /dev/null +++ b/python/PiFinder/object_images/image_base.py @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +Abstract base class for object image providers +""" + +from abc import ABC, abstractmethod +from enum import Enum +from typing import Union, Generator +from PIL import Image + + +class ImageType(Enum): + """Image type enumeration for object images""" + POSS = "poss" # Survey image from disk + GAIA_CHART = "gaia_chart" # Generated star chart + LOADING = "loading" # Loading placeholder + ERROR = "error" # Error placeholder + + +class ImageProvider(ABC): + """ + Base class for object image providers + + Provides a common interface for different image sources: + - POSS/survey images from disk + - Generated Gaia star charts + - Future: SDSS, online images, etc. + """ + + @abstractmethod + def can_provide(self, catalog_object, **kwargs) -> bool: + """ + Check if this provider can supply an image for the given object + + Args: + catalog_object: The astronomical object to image + **kwargs: Additional parameters (config, paths, etc.) + + Returns: + True if this provider can supply an image + """ + pass + + @abstractmethod + def get_image( + self, + catalog_object, + eyepiece_text, + fov, + roll, + display_class, + burn_in=True, + **kwargs + ) -> Union[Image.Image, Generator]: + """ + Get image for catalog object + + Args: + catalog_object: The astronomical object to image + eyepiece_text: Eyepiece description for overlay + fov: Field of view in degrees + roll: Rotation angle in degrees + display_class: Display configuration object + burn_in: Whether to add overlays (FOV, mag, etc.) + **kwargs: Provider-specific parameters + + Returns: + PIL.Image for static images (POSS) + Generator yielding progressive images (Gaia charts) + """ + pass diff --git a/python/PiFinder/image_utils.py b/python/PiFinder/object_images/image_utils.py similarity index 58% rename from python/PiFinder/image_utils.py rename to python/PiFinder/object_images/image_utils.py index 2eb941374..822a0c581 100644 --- a/python/PiFinder/image_utils.py +++ b/python/PiFinder/object_images/image_utils.py @@ -1,10 +1,14 @@ #!/usr/bin/python # -*- coding:utf-8 -*- """ -Shared image utility functions for POSS/SDSS images and generated charts +Shared image utility functions for object images + +Provides common operations for: +- POSS survey images +- Generated Gaia star charts """ -from PIL import Image, ImageDraw +from PIL import Image, ImageDraw, ImageChops def add_image_overlays( @@ -14,8 +18,8 @@ def add_image_overlays( Add FOV/magnification/eyepiece overlays to image This function is shared by: - - POSS/SDSS image display (cat_images.py) - - Generated deep star charts (deep_chart.py) + - POSS image display (poss_provider.py) + - Generated Gaia star charts (chart_provider.py) Args: image: PIL Image to modify @@ -117,17 +121,21 @@ def create_loading_image(display_class, message="Loading...", progress_text=None PIL Image with centered message and progress """ image = Image.new( - "RGB", (display_class.fov_res, display_class.fov_res), (0, 0, 0) + "RGB", display_class.resolution, (0, 0, 0) ) draw = ImageDraw.Draw(image) + # Use center of display for positioning + center_x = display_class.resolution[0] // 2 + center_y = display_class.resolution[1] // 2 + # Draw main message text_bbox = draw.textbbox((0, 0), message, font=display_class.fonts.large.font) text_width = text_bbox[2] - text_bbox[0] text_height = text_bbox[3] - text_bbox[1] - x = (display_class.fov_res - text_width) // 2 - y = (display_class.fov_res - text_height) // 2 - 10 + x = center_x - (text_width // 2) + y = center_y - (text_height // 2) - 20 draw.text( (x, y), @@ -141,7 +149,7 @@ def create_loading_image(display_class, message="Loading...", progress_text=None progress_bbox = draw.textbbox((0, 0), progress_text, font=display_class.fonts.base.font) progress_width = progress_bbox[2] - progress_bbox[0] - px = (display_class.fov_res - progress_width) // 2 + px = center_x - (progress_width // 2) py = y + text_height + 8 draw.text( @@ -153,10 +161,10 @@ def create_loading_image(display_class, message="Loading...", progress_text=None # Draw progress bar if percentage > 0 if progress_percent > 0: - bar_width = int(display_class.fov_res * 0.6) + bar_width = int(display_class.resolution[0] * 0.8) bar_height = 4 - bar_x = (display_class.fov_res - bar_width) // 2 - bar_y = display_class.fov_res - 20 + bar_x = center_x - (bar_width // 2) + bar_y = display_class.resolution[1] - 25 # Background bar draw.rectangle( @@ -179,10 +187,116 @@ def create_loading_image(display_class, message="Loading...", progress_text=None percent_width = percent_bbox[2] - percent_bbox[0] draw.text( - ((display_class.fov_res - percent_width) // 2, bar_y + bar_height + 4), + (center_x - (percent_width // 2), bar_y + bar_height + 4), percent_text, font=display_class.fonts.base.font, fill=(100, 0, 0) ) return image + + +def create_no_image_placeholder(display_class, burn_in=True): + """ + Create a "No Image" placeholder + + Used when neither POSS nor Gaia chart is available + + Args: + display_class: Display configuration object + burn_in: Whether to add text (default True) + + Returns: + PIL Image with "No Image" message + """ + image = Image.new("RGB", display_class.resolution) + if burn_in: + draw = ImageDraw.Draw(image) + draw.text( + (30, 50), + "No Image", + font=display_class.fonts.large.font, + fill=display_class.colors.get(128), + ) + return image + + +def apply_circular_vignette(image, display_class): + """ + Apply circular vignette to show eyepiece FOV boundary + + Creates a circular mask that dims everything outside + the eyepiece field of view, then adds a subtle outline. + + Args: + image: PIL Image to modify + display_class: Display configuration object + + Returns: + Modified PIL Image with circular vignette + """ + # Create dimming mask (circle is full brightness, outside is dimmed) + _circle_dim = Image.new( + "RGB", + (display_class.fov_res, display_class.fov_res), + display_class.colors.get(127), # Dim the outside + ) + _circle_draw = ImageDraw.Draw(_circle_dim) + _circle_draw.ellipse( + [2, 2, display_class.fov_res - 2, display_class.fov_res - 2], + fill=display_class.colors.get(255), # Full brightness inside + ) + + # Apply dimming by multiplying + image = ImageChops.multiply(image, _circle_dim) + + # Add subtle outline + draw = ImageDraw.Draw(image) + draw.ellipse( + [2, 2, display_class.fov_res - 2, display_class.fov_res - 2], + outline=display_class.colors.get(64), + width=1, + ) + + return image + + +def pad_to_display_resolution(image, display_class): + """ + Pad image to match display resolution + + If FOV resolution differs from display resolution, + centers the image and pads with black. + + Args: + image: PIL Image to pad + display_class: Display configuration object + + Returns: + Padded PIL Image at display resolution + """ + # Pad horizontally if needed + if display_class.fov_res != display_class.resX: + pad_image = Image.new("RGB", display_class.resolution) + pad_image.paste( + image, + ( + int((display_class.resX - display_class.fov_res) / 2), + 0, + ), + ) + image = pad_image + + # Pad vertically if needed + if display_class.fov_res != display_class.resY: + pad_image = Image.new("RGB", display_class.resolution) + pad_image.paste( + image, + ( + 0, + int((display_class.resY - display_class.fov_res) / 2), + ), + ) + image = pad_image + + return image diff --git a/python/PiFinder/object_images/poss_provider.py b/python/PiFinder/object_images/poss_provider.py new file mode 100644 index 000000000..65692b023 --- /dev/null +++ b/python/PiFinder/object_images/poss_provider.py @@ -0,0 +1,175 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +""" +POSS image provider - loads pre-downloaded survey images from disk +""" + +import os +from PIL import Image +from PiFinder import utils +from PiFinder import image_util +from .image_base import ImageProvider, ImageType +import logging + +logger = logging.getLogger("PiFinder.POSSProvider") + +BASE_IMAGE_PATH = f"{utils.data_dir}/catalog_images" + + +class POSSImageProvider(ImageProvider): + """ + Provides POSS (Palomar Observatory Sky Survey) images from disk + + POSS images are pre-downloaded 1024x1024 JPG files stored in + subdirectories by object ID. This provider: + - Loads image from disk + - Rotates for telescope orientation + - Crops to field of view + - Resizes to display resolution + - Converts to red + - Adds circular vignette (optional) + - Adds text overlays (optional) + """ + + def can_provide(self, catalog_object, **kwargs) -> bool: + """Check if POSS image exists on disk""" + image_path = self._resolve_image_name(catalog_object, source="POSS") + return os.path.exists(image_path) + + def get_image( + self, + catalog_object, + eyepiece_text, + fov, + roll, + display_class, + burn_in=True, + magnification=None, + config_object=None, + **kwargs + ) -> Image.Image: + """ + Load and process POSS image + + Returns: + PIL.Image with POSS image processed and overlayed + """ + from .image_utils import ( + apply_circular_vignette, + pad_to_display_resolution, + add_image_overlays, + ) + + # Load image from disk + image_path = self._resolve_image_name(catalog_object, source="POSS") + return_image = Image.open(image_path) + + # Rotate for roll / newtonian orientation + image_rotate = 180 + if roll is not None: + image_rotate += roll + return_image = return_image.rotate(image_rotate) # type: ignore[assignment] + + # Crop to FOV + fov_size = int(1024 * fov / 2) + return_image = return_image.crop( # type: ignore[assignment] + ( + 512 - fov_size, + 512 - fov_size, + 512 + fov_size, + 512 + fov_size, + ) + ) + + # Resize to display resolution + return_image = return_image.resize( # type: ignore[assignment] + (display_class.fov_res, display_class.fov_res), Image.Resampling.LANCZOS + ) + + # Convert to red + return_image = image_util.make_red(return_image, display_class.colors) + + # Add circular vignette if burn_in + if burn_in: + return_image = apply_circular_vignette(return_image, display_class) + + # Pad to display resolution if needed + return_image = pad_to_display_resolution(return_image, display_class) + + # Add text overlays if burn_in + if burn_in: + # Get eyepiece object for overlay + if config_object and hasattr(config_object, "equipment"): + eyepiece_obj = config_object.equipment.active_eyepiece + else: + # Create minimal eyepiece object from text + class FakeEyepiece: + def __init__(self, text): + self.focal_length_mm = 0 + self.name = text + + eyepiece_obj = FakeEyepiece(eyepiece_text) + + return_image = add_image_overlays( + return_image, + display_class, + fov, + magnification, + eyepiece_obj, + burn_in=True, + ) + + # Mark as POSS image + return_image.image_type = ImageType.POSS # type: ignore[attr-defined] + return return_image + + def _resolve_image_name(self, catalog_object, source): + """ + Resolve image path for this object + + Checks primary name and alternatives + + Args: + catalog_object: Object to find image for + source: Image source ("POSS", "SDSS", etc.) + + Returns: + Path to image file, or empty string if not found + """ + + def create_image_path(image_name): + last_char = str(image_name)[-1] + image = f"{BASE_IMAGE_PATH}/{last_char}/{image_name}_{source}.jpg" + exists = os.path.exists(image) + return exists, image + + # Try primary name + image_name = f"{catalog_object.catalog_code}{catalog_object.sequence}" + ok, image = create_image_path(image_name) + + if ok: + catalog_object.image_name = image + return image + + # Try alternatives + for name in catalog_object.names: + alt_image_name = f"{''.join(name.split())}" + ok, image = create_image_path(alt_image_name) + if ok: + catalog_object.image_name = image + return image + + return "" + + +def create_catalog_image_dirs(): + """ + Checks for and creates catalog_image dirs + """ + if not os.path.exists(BASE_IMAGE_PATH): + os.makedirs(BASE_IMAGE_PATH) + + for i in range(0, 10): + _image_dir = f"{BASE_IMAGE_PATH}/{i}" + if not os.path.exists(_image_dir): + os.makedirs(_image_dir) diff --git a/python/PiFinder/star_catalog.py b/python/PiFinder/object_images/star_catalog.py similarity index 100% rename from python/PiFinder/star_catalog.py rename to python/PiFinder/object_images/star_catalog.py diff --git a/python/PiFinder/ui/log.py b/python/PiFinder/ui/log.py index ab3966ebc..1c6f76fef 100644 --- a/python/PiFinder/ui/log.py +++ b/python/PiFinder/ui/log.py @@ -6,7 +6,7 @@ """ -from PiFinder import cat_images +from PiFinder.object_images import get_display_image from PiFinder import obslog from PiFinder.ui.marking_menus import MarkingMenuOption, MarkingMenu from PiFinder.ui.base import UIModule @@ -49,10 +49,10 @@ def __init__(self, *args, **kwargs): roll = solution["Roll"] # Get chart generator singleton for deep chart support - from PiFinder.deep_chart import get_chart_generator - chart_gen = get_chart_generator(self.config_object, self.shared_state) + from PiFinder.object_images.gaia_chart import get_gaia_chart_generator + chart_gen = get_gaia_chart_generator(self.config_object, self.shared_state) - self.object_image = cat_images.get_display_image( + self.object_image = get_display_image( self.object, "POSS", 1, roll, self.display_class, burn_in=False, config_object=self.config_object, shared_state=self.shared_state, diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index 11ebf2987..ff5187c99 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -6,7 +6,9 @@ """ -from PiFinder import cat_images +from PiFinder.object_images import get_display_image +from PiFinder.object_images.image_base import ImageType +from PiFinder.object_images.star_catalog import CatalogState from PiFinder.ui.marking_menus import MarkingMenuOption, MarkingMenu from PiFinder.obj_types import OBJ_TYPES from PiFinder.ui.align import align_on_radec @@ -33,9 +35,7 @@ # Constants for display modes DM_DESC = 0 # Display mode for description DM_LOCATE = 1 # Display mode for LOCATE -DM_POSS = 2 # Display mode for POSS -DM_SDSS = 3 # Display mode for SDSS -DM_CHART = 4 # Display mode for deep chart +DM_IMAGE = 2 # Display mode for images (POSS or Gaia chart) class EyepieceInput: @@ -117,7 +117,7 @@ def __init__(self, *args, **kwargs): self.object_image = None self._chart_generator = None # Active generator for progressive chart updates self._is_showing_loading_chart = False # Track if showing "Loading..." for deep chart - self._force_deep_chart = False # Toggle: force deep chart even if POSS image exists + self._force_gaia_chart = False # Toggle: force deep chart even if POSS image exists self.eyepiece_input = EyepieceInput() # Custom eyepiece input handler self.eyepiece_input_display = False # Show eyepiece input popup self._custom_eyepiece = None # Reference to custom eyepiece object in equipment list (None = not active) @@ -138,7 +138,7 @@ def __init__(self, *args, **kwargs): ) # Deep Chart Marking Menu - Settings access - self._deep_chart_marking_menu = MarkingMenu( + self._gaia_chart_marking_menu = MarkingMenu( up=MarkingMenuOption(label=_("SETTINGS"), menu_jump="obj_chart_settings"), right=MarkingMenuOption(label=_("CROSS"), menu_jump="obj_chart_crosshair"), down=MarkingMenuOption(label=_("STYLE"), menu_jump="obj_chart_style"), @@ -189,8 +189,8 @@ def marking_menu(self): """ Return appropriate marking menu based on current view mode """ - if self._is_deep_chart: - return self._deep_chart_marking_menu + if self._is_gaia_chart: + return self._gaia_chart_marking_menu return self._default_marking_menu def _layout_designator(self): @@ -322,26 +322,26 @@ def update_object_info(self): prev_object_image = self.object_image - # Get or create chart generator (owned by UI layer, not cat_images) + # Get or create chart generator (owned by UI layer) logger.info(">>> Getting chart generator...") - chart_gen = self._get_chart_generator() + chart_gen = self._get_gaia_chart_generator() logger.info(f">>> Chart generator obtained, state: {chart_gen.get_catalog_state() if chart_gen else 'None'}") - logger.info(f">>> Calling cat_images.get_display_image with force_deep_chart={self._force_deep_chart}") + logger.info(f">>> Calling get_display_image with force_gaia_chart={self._force_gaia_chart}") # get_display_image returns either an image directly (POSS) or a generator (deep chart) - result = cat_images.get_display_image( + result = get_display_image( self.object, eyepiece_text, tfov, roll, self.display_class, - burn_in=self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART], + burn_in=self.object_display_mode == DM_IMAGE, magnification=magnification, config_object=self.config_object, shared_state=self.shared_state, - chart_generator=chart_gen, # Pass our chart generator to cat_images - force_deep_chart=self._force_deep_chart, # Toggle state + chart_generator=chart_gen, # Pass our chart generator to object_images + force_chart=self._force_gaia_chart, # Toggle state ) # Check if it's a generator (progressive deep chart) or direct image (POSS) @@ -358,26 +358,21 @@ def update_object_info(self): logger.info(f">>> update_object_info() complete, self.object_image is now: {type(self.object_image)}") - # Track if we're showing a "Loading..." placeholder for deep chart - # Check if image has the special "is_loading_placeholder" attribute + # Track if we're showing a "Loading..." placeholder for chart self._is_showing_loading_chart = ( self.object_image is not None - and hasattr(self.object_image, 'is_loading_placeholder') - and self.object_image.is_loading_placeholder - and self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART] + and hasattr(self.object_image, 'image_type') + and self.object_image.image_type == ImageType.LOADING ) - # Detect if we're showing a deep chart (forced or automatic due to no POSS image) - # Deep charts are identified by the is_loading_placeholder attribute (loading or False) - # self._is_deep_chart is now a property - pass @property - def _is_deep_chart(self): + def _is_gaia_chart(self): + """Check if currently displaying a Gaia chart""" return ( self.object_image is not None - and hasattr(self.object_image, 'is_loading_placeholder') - and self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART] + and hasattr(self.object_image, 'image_type') + and self.object_image.image_type == ImageType.GAIA_CHART ) def active(self): @@ -841,14 +836,14 @@ def _render_pointing_instructions(self): fill=self.colors.get(indicator_color), ) - def _get_chart_generator(self): + def _get_gaia_chart_generator(self): """Get the global chart generator singleton""" - from PiFinder.deep_chart import get_chart_generator + from PiFinder.object_images.gaia_chart import get_gaia_chart_generator import logging logger = logging.getLogger("ObjectDetails") - chart_gen = get_chart_generator(self.config_object, self.shared_state) - logger.info(f">>> _get_chart_generator returning: {chart_gen}") + chart_gen = get_gaia_chart_generator(self.config_object, self.shared_state) + logger.info(f">>> _get_gaia_chart_generator returning: {chart_gen}") return chart_gen def _apply_custom_eyepiece(self): @@ -913,19 +908,16 @@ def update(self, force=True): # Update loading flag based on current image if self.object_image is not None: self._is_showing_loading_chart = ( - hasattr(self.object_image, 'is_loading_placeholder') - and self.object_image.is_loading_placeholder - and self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART] + hasattr(self.object_image, 'image_type') + and self.object_image.image_type == ImageType.LOADING ) # Check if we're showing "Loading..." for a deep chart # and if catalog is now ready, regenerate the image if self._is_showing_loading_chart: try: - from PiFinder.star_catalog import CatalogState - # Use cached chart generator to preserve catalog state - chart_gen = self._get_chart_generator() + chart_gen = self._get_gaia_chart_generator() state = chart_gen.get_catalog_state() # logger.debug(f">>> Update check: catalog state = {state}") @@ -946,14 +938,10 @@ def update(self, force=True): # logger.debug(f">>> update(): object_display_mode={self.object_display_mode}...") # logger.debug(f">>> update(): object_image type={type(self.object_image)}...") - # DEBUG: Check if object_image has the is_loading_placeholder attribute (indicates it's a chart) - if self.object_image: - is_chart = hasattr(self.object_image, 'is_loading_placeholder') - # logger.debug(f">>> update(): object_image has is_loading_placeholder={is_chart}...") - if self.object_display_mode in [DM_POSS, DM_SDSS, DM_CHART]: + if self.object_display_mode == DM_IMAGE: # DEBUG: Check if image has any non-black pixels - if self.object_image and self.object_display_mode == DM_CHART: + if self.object_image and self._force_gaia_chart: import numpy as np img_array = np.array(self.object_image) non_zero = np.count_nonzero(img_array) @@ -961,13 +949,20 @@ def update(self, force=True): # logger.debug(f">>> CHART IMAGE DEBUG: non-zero pixels={non_zero}, max_value={max_val}, shape={img_array.shape}") self.screen.paste(self.object_image) + # Recreate draw object to ensure it's in sync with screen after paste + self.draw = ImageDraw.Draw(self.screen, mode="RGBA") # logger.debug(f">>> Image pasted to screen") # DEBUG: Save screen buffer to file for inspection # (Removed per user request) - # If showing deep chart, draw crosshair based on config - if self._force_deep_chart and self.object_image is not None: + # If showing Gaia chart, draw crosshair based on config + is_chart = ( + self.object_image is not None + and hasattr(self.object_image, 'image_type') + and self.object_image.image_type == ImageType.GAIA_CHART + ) + if is_chart: crosshair_mode = self.config_object.get_option("obj_chart_crosshair") crosshair_style = self.config_object.get_option("obj_chart_crosshair_style") @@ -988,17 +983,17 @@ def update(self, force=True): # Force continuous updates for animated crosshairs if crosshair_mode in ["pulse", "fade"]: force = True - else: - # Clear screen when not in image modes (DESC, LOCATE) - self.screen = Image.new("RGB", self.display_class.resolution) - self.draw = ImageDraw.Draw(self.screen, mode="RGBA") + # Note: We do NOT create a new screen/draw here because text layouts + # hold references to self.draw from __init__. The screen was already + # cleared by self.clear_screen() at line 940. if self.object_display_mode == DM_DESC or self.object_display_mode == DM_LOCATE: # catalog and entry field i.e. NGC-311 self.refresh_designator() desc_available_lines = 4 - desig = self.texts["designator"] - desig.draw((0, 20)) + desig = self.texts.get("designator") + if desig: + desig.draw((0, 20)) # Object TYPE and Constellation i.e. 'Galaxy PER' typeconst = self.texts.get("type-const") @@ -1253,31 +1248,23 @@ def key_minus(self): def key_number(self, number): """ Handle number key presses - When viewing image (DM_POSS/DM_CHART): - - 0: Toggle between POSS image and deep chart (only if no input active) + When viewing image (DM_IMAGE): + - 0: Toggle between POSS image and Gaia chart (only if no input active) - 1-9: Start custom eyepiece input - After first digit, 0-9 adds second digit or completes input """ logger.info(f">>> key_number({number}) called") # Only handle custom eyepiece input in image display modes - if self.object_display_mode not in [DM_POSS, DM_SDSS, DM_CHART]: + if self.object_display_mode != DM_IMAGE: return # Special case: 0 when no input is active toggles POSS/chart if number == 0 and not self.eyepiece_input_display: - logger.info(f">>> Toggling _force_deep_chart (was: {self._force_deep_chart})") + logger.info(f">>> Toggling _force_gaia_chart (was: {self._force_gaia_chart})") # Toggle the flag - self._force_deep_chart = not self._force_deep_chart - logger.info(f">>> _force_deep_chart now: {self._force_deep_chart}") - - # Set appropriate display mode: DM_CHART for deep chart, DM_POSS for POSS image - if self._force_deep_chart: - logger.info(f">>> Setting object_display_mode to DM_CHART (was {self.object_display_mode})") - self.object_display_mode = DM_CHART - else: - logger.info(f">>> Setting object_display_mode to DM_POSS (was {self.object_display_mode})") - self.object_display_mode = DM_POSS + self._force_gaia_chart = not self._force_gaia_chart + logger.info(f">>> _force_gaia_chart now: {self._force_gaia_chart}") # Reload image with new setting logger.info(">>> Calling update_object_info()...") diff --git a/python/tests/test_limiting_magnitude.py b/python/tests/test_limiting_magnitude.py index 6efee7d57..66dae6502 100644 --- a/python/tests/test_limiting_magnitude.py +++ b/python/tests/test_limiting_magnitude.py @@ -4,7 +4,7 @@ """ import pytest -from PiFinder.deep_chart import DeepChartGenerator +from PiFinder.object_images.gaia_chart import GaiaChartGenerator class TestFeijthComelloFormula: @@ -26,7 +26,7 @@ def test_reference_calculation(self): M = 400.0 # Magnification t = 0.54 # Transmission - result = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, t) + result = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, t) # Should be 13.36 according to reference (allow 0.1 mag tolerance) assert abs(result - 13.36) < 0.1, f"Expected ~13.36, got {result:.2f}" @@ -39,7 +39,7 @@ def test_unobstructed_telescope(self): M = 100.0 t = 0.85 - result = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, t) + result = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, t) # Should give reasonable result (12-14 range for 200mm scope) assert 10.0 < result < 15.0, f"Result {result:.2f} outside expected range" @@ -54,9 +54,9 @@ def test_higher_magnification_improves_lm(self): d = 0.0 t = 0.85 - lm_40x = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 40.0, t) - lm_100x = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 100.0, t) - lm_200x = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 200.0, t) + lm_40x = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 40.0, t) + lm_100x = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 100.0, t) + lm_200x = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, 200.0, t) # Higher magnification should give better (larger number) limiting magnitude assert lm_100x > lm_40x, f"100x ({lm_100x:.2f}) should be > 40x ({lm_40x:.2f})" @@ -69,9 +69,9 @@ def test_larger_aperture_improves_lm(self): M = 100.0 t = 0.85 - lm_80mm = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, 8.0, d, M, t) - lm_150mm = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, 15.0, d, M, t) - lm_250mm = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, 25.0, d, M, t) + lm_80mm = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, 8.0, d, M, t) + lm_150mm = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, 15.0, d, M, t) + lm_250mm = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, 25.0, d, M, t) # Larger aperture should give better limiting magnitude assert lm_150mm > lm_80mm, f"150mm ({lm_150mm:.2f}) should be > 80mm ({lm_80mm:.2f})" @@ -84,8 +84,8 @@ def test_obstruction_reduces_lm(self): M = 100.0 t = 0.85 - lm_no_obstruction = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, 0.0, M, t) - lm_with_obstruction = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, 5.0, M, t) + lm_no_obstruction = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, 0.0, M, t) + lm_with_obstruction = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, 5.0, M, t) # Obstruction should reduce limiting magnitude assert lm_no_obstruction > lm_with_obstruction, \ @@ -98,8 +98,8 @@ def test_better_transmission_improves_lm(self): d = 0.0 M = 100.0 - lm_poor_transmission = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, 0.50) - lm_good_transmission = DeepChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, 0.85) + lm_poor_transmission = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, 0.50) + lm_good_transmission = GaiaChartGenerator.feijth_comello_limiting_magnitude(mv, D, d, M, 0.85) # Better transmission should give better limiting magnitude assert lm_good_transmission > lm_poor_transmission, \ @@ -115,8 +115,8 @@ def test_darker_sky_improves_naked_eye_lm(self): M = 100.0 t = 0.85 - lm_bright_sky = DeepChartGenerator.feijth_comello_limiting_magnitude(5.0, D, d, M, t) - lm_dark_sky = DeepChartGenerator.feijth_comello_limiting_magnitude(6.5, D, d, M, t) + lm_bright_sky = GaiaChartGenerator.feijth_comello_limiting_magnitude(5.0, D, d, M, t) + lm_dark_sky = GaiaChartGenerator.feijth_comello_limiting_magnitude(6.5, D, d, M, t) # Darker sky should give better limiting magnitude assert lm_dark_sky > lm_bright_sky, \ diff --git a/python/tests/test_star_catalog.py b/python/tests/test_star_catalog.py index 883450da9..afcfe85d8 100644 --- a/python/tests/test_star_catalog.py +++ b/python/tests/test_star_catalog.py @@ -9,7 +9,7 @@ from unittest.mock import MagicMock, patch import sys -from PiFinder.star_catalog import DeepStarCatalog, STAR_RECORD_DTYPE, STAR_RECORD_SIZE +from PiFinder.object_images.star_catalog import DeepStarCatalog, STAR_RECORD_DTYPE, STAR_RECORD_SIZE class TestDeepStarCatalog(unittest.TestCase): def setUp(self): From 3854d6037d91431ad1b143fffccd54cad5721bc1 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Tue, 2 Dec 2025 18:46:31 +0100 Subject: [PATCH 26/27] Refactored, using gaia consistently --- python/PiFinder/camera_debug.py | 2 +- python/PiFinder/main.py | 10 +- .../PiFinder/object_images/chart_provider.py | 14 +- python/PiFinder/object_images/gaia_chart.py | 20 +-- python/PiFinder/object_images/star_catalog.py | 12 +- python/PiFinder/ui/lm_entry.py | 29 +---- python/PiFinder/ui/log.py | 2 +- python/PiFinder/ui/object_details.py | 120 +++++------------- python/tests/test_star_catalog.py | 6 +- 9 files changed, 66 insertions(+), 149 deletions(-) diff --git a/python/PiFinder/camera_debug.py b/python/PiFinder/camera_debug.py index 3870570a4..d1222a3dc 100644 --- a/python/PiFinder/camera_debug.py +++ b/python/PiFinder/camera_debug.py @@ -49,7 +49,7 @@ def setup_debug_images(self) -> None: self.images = list(zip(range(1, len(images) + 1), images)) self.image_cycle = cycle(self.images) self.last_image_time: float = time.time() - self.current_image_num, self.last_image = self.images[0] + self.current_image_num, self.last_image = self.images[1] # Use darker sky image def initialize(self) -> None: self._camera_started = True diff --git a/python/PiFinder/main.py b/python/PiFinder/main.py index 8680f1683..093089dc1 100644 --- a/python/PiFinder/main.py +++ b/python/PiFinder/main.py @@ -521,15 +521,15 @@ def main( _new_filter.load_from_config(cfg) catalogs.set_catalog_filter(_new_filter) - # Initialize deep chart generator in background to avoid first-use delay - console.write(" Deep Charts") + # Initialize Gaia chart generator in background to avoid first-use delay + console.write(" Gaia Charts") console.update() - logger.info(" Initializing deep chart generator...") + logger.info(" Initializing Gaia chart generator...") from PiFinder.object_images.gaia_chart import get_gaia_chart_generator chart_gen = get_gaia_chart_generator(cfg, shared_state) # Trigger background loading so catalog is ready when needed chart_gen.ensure_catalog_loading() - logger.info(" Deep chart background loading started") + logger.info(" Gaia chart background loading started") console.write(" Menus") console.update() @@ -683,7 +683,7 @@ def main( except queue.Empty: pass - # Deep catalog loading removed - now lazy-loads on first chart view + # Gaia catalog loading removed - now lazy-loads on first chart view # (object_images triggers loading when needed) # ui queue diff --git a/python/PiFinder/object_images/chart_provider.py b/python/PiFinder/object_images/chart_provider.py index 616ce2b46..63bfd8883 100644 --- a/python/PiFinder/object_images/chart_provider.py +++ b/python/PiFinder/object_images/chart_provider.py @@ -19,7 +19,7 @@ class ChartImageProvider(ImageProvider): Provides dynamically generated Gaia star charts Uses the GaiaChartGenerator to create on-demand star charts - from the HEALPix-indexed deep star catalog. Returns a generator + from the HEALPix-indexed Gaia star catalog. Returns a generator that yields progressive updates as magnitude bands load. """ @@ -39,10 +39,10 @@ def can_provide(self, catalog_object, **kwargs) -> bool: """ Check if Gaia chart can be generated - Returns True if deep star catalog exists + Returns True if Gaia star catalog exists """ - deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") - return deep_catalog_path.exists() + gaia_catalog_path = Path(utils.astro_data_dir, "gaia_stars", "metadata.json") + return gaia_catalog_path.exists() def get_image( self, @@ -77,10 +77,10 @@ def get_image( self.config_object, self.shared_state ) - deep_catalog_path = Path(utils.astro_data_dir, "deep_stars", "metadata.json") + gaia_catalog_path = Path(utils.astro_data_dir, "gaia_stars", "metadata.json") - if not deep_catalog_path.exists(): - logger.warning(f"Gaia star catalog not found at {deep_catalog_path}") + if not gaia_catalog_path.exists(): + logger.warning(f"Gaia star catalog not found at {gaia_catalog_path}") placeholder = create_no_image_placeholder(display_class, burn_in=burn_in) yield placeholder return diff --git a/python/PiFinder/object_images/gaia_chart.py b/python/PiFinder/object_images/gaia_chart.py index e522b7afe..22ea9c65d 100644 --- a/python/PiFinder/object_images/gaia_chart.py +++ b/python/PiFinder/object_images/gaia_chart.py @@ -3,7 +3,7 @@ """ Gaia star chart generator for objects without DSS/POSS images -Generates on-demand star charts using HEALPix-indexed deep star catalog. +Generates on-demand star charts using HEALPix-indexed Gaia star catalog. Features: - Equipment-aware FOV and magnitude limits - Stereographic projection (matching chart.py) @@ -20,7 +20,7 @@ from PIL import Image, ImageDraw, ImageFont from PiFinder import utils -from PiFinder.object_images.star_catalog import CatalogState, DeepStarCatalog +from PiFinder.object_images.star_catalog import CatalogState, GaiaStarCatalog from PiFinder.object_images.image_utils import pad_to_display_resolution, add_image_overlays logger = logging.getLogger("PiFinder.GaiaChart") @@ -108,22 +108,22 @@ def ensure_catalog_loading(self): def initialize_catalog(self): """Create catalog instance (doesn't load data yet)""" - catalog_path = Path(utils.astro_data_dir, "deep_stars") + catalog_path = Path(utils.astro_data_dir, "gaia_stars") logger.info(f">>> initialize_catalog() - catalog_path: {catalog_path}") # Check if catalog exists before initializing metadata_file = catalog_path / "metadata.json" if not metadata_file.exists(): - logger.warning(f"Deep star catalog not found at {catalog_path}") + logger.warning(f"Gaia star catalog not found at {catalog_path}") logger.warning("To build catalog, run: python -m PiFinder.catalog_tools.gaia_downloader --mag-limit 12 --output /tmp/gaia.csv") - logger.warning("Then: python -m PiFinder.catalog_tools.healpix_builder --input /tmp/gaia.csv --output {}/astro_data/deep_stars".format(Path.home() / "PiFinder")) + logger.warning("Then: python -m PiFinder.catalog_tools.healpix_builder --input /tmp/gaia.csv --output {}/astro_data/gaia_stars".format(Path.home() / "PiFinder")) - logger.info(f">>> Creating DeepStarCatalog instance...") + logger.info(f">>> Creating GaiaStarCatalog instance...") import time t0 = time.time() - self.catalog = DeepStarCatalog(str(catalog_path)) + self.catalog = GaiaStarCatalog(str(catalog_path)) t_init = (time.time() - t0) * 1000 - logger.info(f">>> DeepStarCatalog.__init__() took {t_init:.1f}ms") + logger.info(f">>> GaiaStarCatalog.__init__() took {t_init:.1f}ms") logger.info(f">>> Catalog initialized: {catalog_path}, state: {self.catalog.state}") def generate_chart( @@ -545,7 +545,7 @@ def render_chart( t_end = time.time() logger.debug(f" Total render time: {(t_end-t_start)*1000:.1f}ms") - # Tag image as a deep chart (not a loading placeholder) + # Tag image as a Gaia chart (not a loading placeholder) # This enables the correct marking menu in UIObjectDetails image.is_loading_placeholder = False # type: ignore[attr-defined] @@ -696,7 +696,7 @@ def render_chart_incremental( image = Image.fromarray(image_array, mode="RGB") - # Tag as deep chart + # Tag as Gaia chart image.is_loading_placeholder = False # type: ignore[attr-defined] t_end = time.time() diff --git a/python/PiFinder/object_images/star_catalog.py b/python/PiFinder/object_images/star_catalog.py index 8e139f587..7526984f3 100644 --- a/python/PiFinder/object_images/star_catalog.py +++ b/python/PiFinder/object_images/star_catalog.py @@ -3,7 +3,7 @@ """ HEALPix-indexed star catalog loader with background loading and CPU throttling -This module provides efficient loading of deep star catalogs for chart generation. +This module provides efficient loading of Gaia star catalogs for chart generation. Features: - Background loading with thread safety - CPU throttling to avoid blocking other processes @@ -166,12 +166,12 @@ def __del__(self): self.close() -class DeepStarCatalog: +class GaiaStarCatalog: """ HEALPix-indexed star catalog with background loading Usage: - catalog = DeepStarCatalog("/path/to/deep_stars") + catalog = GaiaStarCatalog("/path/to/gaia_stars") catalog.start_background_load(observer_lat=40.0, limiting_mag=14.0) # ... wait for catalog.state == CatalogState.READY ... stars = catalog.get_stars_for_fov(ra=180.0, dec=45.0, fov=10.0, mag_limit=12.0) @@ -182,9 +182,9 @@ def __init__(self, catalog_path: str): Initialize catalog (doesn't load data yet) Args: - catalog_path: Path to deep_stars directory containing metadata.json + catalog_path: Path to gaia_stars directory containing metadata.json """ - logger.info(f">>> DeepStarCatalog.__init__() called with path: {catalog_path}") + logger.info(f">>> GaiaStarCatalog.__init__() called with path: {catalog_path}") self.catalog_path = Path(catalog_path) self.state = CatalogState.NOT_LOADED self.metadata: Optional[Dict[str, Any]] = None @@ -200,7 +200,7 @@ def __init__(self, catalog_path: str): self._index_cache: Dict[str, Any] = {} # Cache of existing tile IDs per magnitude band to avoid scanning for non-existent tiles self._existing_tiles_cache: Dict[str, Set[int]] = {} - logger.info(f">>> DeepStarCatalog.__init__() completed") + logger.info(f">>> GaiaStarCatalog.__init__() completed") def start_background_load( self, observer_lat: Optional[float] = None, limiting_mag: float = 12.0 diff --git a/python/PiFinder/ui/lm_entry.py b/python/PiFinder/ui/lm_entry.py index e2b8cf0b3..a51bc0da5 100644 --- a/python/PiFinder/ui/lm_entry.py +++ b/python/PiFinder/ui/lm_entry.py @@ -174,50 +174,27 @@ def key_left(self): def key_right(self): """Accept - save value and exit""" - import logging - logger = logging.getLogger("UILMEntry") - logger.info(">>> key_right() called!") - try: - # Convert digits to string, replacing spaces with nothing won't work - # We need at least one digit before decimal and one after value_str = "".join(self.digits).strip() - logger.info(f"LM entry: digits={self.digits}, value_str='{value_str}'") - # Check if we have any actual digits (not just spaces and decimal) if value_str.replace('.', '').replace(' ', '') == '': - # No digits entered, reject - logger.info("LM entry rejected: no digits") return False - # Replace remaining spaces with 0 for parsing value_str = value_str.replace(' ', '0') final_value = float(value_str) - logger.info(f"LM entry: parsed value={final_value}") - # Validate range if final_value < 5.0 or final_value > 20.0: - # Out of range, reject - logger.info(f"LM entry rejected: out of range (5.0-20.0)") return False - logger.info(f"LM entry accepted: {final_value}") self.config_object.set_option(self.config_option, final_value) - - # Also set the mode to "fixed" since user entered a value self.config_object.set_option("obj_chart_lm_mode", "fixed") - # No need to invalidate cache - cache key includes LM so different - # LM values will automatically get separate cache entries - - logger.info("Calling remove_from_stack() to exit LM entry screen") - # Exit the screen by removing from stack + # Exit: LM entry -> LM menu -> back to chart if self.remove_from_stack: self.remove_from_stack() + self.remove_from_stack() return True - except ValueError as e: - # Invalid value, don't accept - logger.error(f"LM entry ValueError: {e}") + except ValueError: return False def active(self): diff --git a/python/PiFinder/ui/log.py b/python/PiFinder/ui/log.py index 1c6f76fef..f1544825b 100644 --- a/python/PiFinder/ui/log.py +++ b/python/PiFinder/ui/log.py @@ -48,7 +48,7 @@ def __init__(self, *args, **kwargs): if solution: roll = solution["Roll"] - # Get chart generator singleton for deep chart support + # Get chart generator singleton for Gaia chart support from PiFinder.object_images.gaia_chart import get_gaia_chart_generator chart_gen = get_gaia_chart_generator(self.config_object, self.shared_state) diff --git a/python/PiFinder/ui/object_details.py b/python/PiFinder/ui/object_details.py index ff5187c99..c2f15a2af 100644 --- a/python/PiFinder/ui/object_details.py +++ b/python/PiFinder/ui/object_details.py @@ -116,8 +116,8 @@ def __init__(self, *args, **kwargs): self.object_display_mode = DM_LOCATE self.object_image = None self._chart_generator = None # Active generator for progressive chart updates - self._is_showing_loading_chart = False # Track if showing "Loading..." for deep chart - self._force_gaia_chart = False # Toggle: force deep chart even if POSS image exists + self._is_showing_loading_chart = False # Track if showing "Loading..." for Gaia chart + self._force_gaia_chart = False # Toggle: force Gaia chart even if POSS image exists self.eyepiece_input = EyepieceInput() # Custom eyepiece input handler self.eyepiece_input_display = False # Show eyepiece input popup self._custom_eyepiece = None # Reference to custom eyepiece object in equipment list (None = not active) @@ -137,7 +137,7 @@ def __init__(self, *args, **kwargs): ), ) - # Deep Chart Marking Menu - Settings access + # Gaia Chart Marking Menu - Settings access self._gaia_chart_marking_menu = MarkingMenu( up=MarkingMenuOption(label=_("SETTINGS"), menu_jump="obj_chart_settings"), right=MarkingMenuOption(label=_("CROSS"), menu_jump="obj_chart_crosshair"), @@ -329,7 +329,7 @@ def update_object_info(self): logger.info(f">>> Calling get_display_image with force_gaia_chart={self._force_gaia_chart}") - # get_display_image returns either an image directly (POSS) or a generator (deep chart) + # get_display_image returns either an image directly (POSS) or a generator (Gaia chart) result = get_display_image( self.object, eyepiece_text, @@ -344,7 +344,7 @@ def update_object_info(self): force_chart=self._force_gaia_chart, # Toggle state ) - # Check if it's a generator (progressive deep chart) or direct image (POSS) + # Check if it's a generator (progressive Gaia chart) or direct image (POSS) if hasattr(result, '__iter__') and hasattr(result, '__next__'): # It's a generator - store it for progressive consumption by update() logger.info(">>> get_display_image returned GENERATOR, storing for progressive updates...") @@ -505,18 +505,10 @@ def _draw_crosshair_circle(self, mode="off"): color_intensity = 64 radius = 4 # Smaller fixed size - # Create a separate layer for the crosshair - crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) - crosshair_draw = ImageDraw.Draw(crosshair_layer) - - # Draw circle on the layer + # Draw directly on screen marker_color = (color_intensity, 0, 0) bbox = [cx - radius, cy - radius, cx + radius, cy + radius] - crosshair_draw.ellipse(bbox, outline=marker_color, width=1) - - # Use lighten blend: take the lighter of the two values for each pixel - self.screen = ImageChops.lighter(self.screen, crosshair_layer) - self.draw = ImageDraw.Draw(self.screen) + self.draw.ellipse(bbox, outline=marker_color, width=1) def _draw_crosshair_bullseye(self, mode="off"): """ @@ -539,19 +531,11 @@ def _draw_crosshair_bullseye(self, mode="off"): color_intensity = 64 radii = [2, 4, 6] # Smaller fixed radii - # Create a separate layer for the crosshair - crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) - crosshair_draw = ImageDraw.Draw(crosshair_layer) - - # Draw concentric circles on the layer + # Draw directly on screen marker_color = (color_intensity, 0, 0) for radius in radii: bbox = [cx - radius, cy - radius, cx + radius, cy + radius] - crosshair_draw.ellipse(bbox, outline=marker_color, width=1) - - # Use lighten blend - self.screen = ImageChops.lighter(self.screen, crosshair_layer) - self.draw = ImageDraw.Draw(self.screen) + self.draw.ellipse(bbox, outline=marker_color, width=1) def _draw_crosshair_brackets(self, mode="off"): """ @@ -576,32 +560,24 @@ def _draw_crosshair_brackets(self, mode="off"): size = 4 # Smaller distance from center to bracket corner length = 3 # Shorter bracket arms - # Create a separate layer for the crosshair - crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) - crosshair_draw = ImageDraw.Draw(crosshair_layer) - + # Draw directly on screen marker_color = (color_intensity, 0, 0) - # Draw brackets on the layer # Top-left bracket - crosshair_draw.line([cx - size, cy - size, cx - size + length, cy - size], fill=marker_color, width=1) - crosshair_draw.line([cx - size, cy - size, cx - size, cy - size + length], fill=marker_color, width=1) + self.draw.line([cx - size, cy - size, cx - size + length, cy - size], fill=marker_color, width=1) + self.draw.line([cx - size, cy - size, cx - size, cy - size + length], fill=marker_color, width=1) # Top-right bracket - crosshair_draw.line([cx + size - length, cy - size, cx + size, cy - size], fill=marker_color, width=1) - crosshair_draw.line([cx + size, cy - size, cx + size, cy - size + length], fill=marker_color, width=1) + self.draw.line([cx + size - length, cy - size, cx + size, cy - size], fill=marker_color, width=1) + self.draw.line([cx + size, cy - size, cx + size, cy - size + length], fill=marker_color, width=1) # Bottom-left bracket - crosshair_draw.line([cx - size, cy + size, cx - size + length, cy + size], fill=marker_color, width=1) - crosshair_draw.line([cx - size, cy + size - length, cx - size, cy + size], fill=marker_color, width=1) + self.draw.line([cx - size, cy + size, cx - size + length, cy + size], fill=marker_color, width=1) + self.draw.line([cx - size, cy + size - length, cx - size, cy + size], fill=marker_color, width=1) # Bottom-right bracket - crosshair_draw.line([cx + size - length, cy + size, cx + size, cy + size], fill=marker_color, width=1) - crosshair_draw.line([cx + size, cy + size - length, cx + size, cy + size], fill=marker_color, width=1) - - # Use lighten blend - self.screen = ImageChops.lighter(self.screen, crosshair_layer) - self.draw = ImageDraw.Draw(self.screen) + self.draw.line([cx + size - length, cy + size, cx + size, cy + size], fill=marker_color, width=1) + self.draw.line([cx + size, cy + size - length, cx + size, cy + size], fill=marker_color, width=1) def _draw_crosshair_dots(self, mode="off"): """ @@ -626,10 +602,7 @@ def _draw_crosshair_dots(self, mode="off"): distance = 4 # Smaller distance from center to dots dot_size = 1 # Smaller dot radius - # Create a separate layer for the crosshair - crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) - crosshair_draw = ImageDraw.Draw(crosshair_layer) - + # Draw directly on screen marker_color = (color_intensity, 0, 0) # Four corner dots @@ -642,11 +615,7 @@ def _draw_crosshair_dots(self, mode="off"): for x, y in positions: bbox = [x - dot_size, y - dot_size, x + dot_size, y + dot_size] - crosshair_draw.ellipse(bbox, fill=marker_color) - - # Use lighten blend - self.screen = ImageChops.lighter(self.screen, crosshair_layer) - self.draw = ImageDraw.Draw(self.screen) + self.draw.ellipse(bbox, fill=marker_color) def _draw_crosshair_cross(self, mode="off"): """ @@ -665,20 +634,13 @@ def _draw_crosshair_cross(self, mode="off"): else: color_intensity = 64 - # Create a separate layer for the crosshair - crosshair_layer = Image.new("RGB", (width, height), (0, 0, 0)) - crosshair_draw = ImageDraw.Draw(crosshair_layer) - + # Draw directly on screen marker_color = (color_intensity, 0, 0) # Horizontal line - crosshair_draw.line([0, cy, width, cy], fill=marker_color, width=1) + self.draw.line([0, cy, width, cy], fill=marker_color, width=1) # Vertical line - crosshair_draw.line([cx, 0, cx, height], fill=marker_color, width=1) - - # Use lighten blend - self.screen = ImageChops.lighter(self.screen, crosshair_layer) - self.draw = ImageDraw.Draw(self.screen) + self.draw.line([cx, 0, cx, height], fill=marker_color, width=1) def _draw_fov_circle(self): """ @@ -912,7 +874,7 @@ def update(self, force=True): and self.object_image.image_type == ImageType.LOADING ) - # Check if we're showing "Loading..." for a deep chart + # Check if we're showing "Loading..." for a Gaia chart # and if catalog is now ready, regenerate the image if self._is_showing_loading_chart: try: @@ -930,31 +892,14 @@ def update(self, force=True): except Exception as e: logger.error(f">>> Update check failed: {e}", exc_info=True) pass - # Clear Screen - self.clear_screen() - - # paste image - # paste image - # logger.debug(f">>> update(): object_display_mode={self.object_display_mode}...") - # logger.debug(f">>> update(): object_image type={type(self.object_image)}...") - - - if self.object_display_mode == DM_IMAGE: - # DEBUG: Check if image has any non-black pixels - if self.object_image and self._force_gaia_chart: - import numpy as np - img_array = np.array(self.object_image) - non_zero = np.count_nonzero(img_array) - max_val = np.max(img_array) - # logger.debug(f">>> CHART IMAGE DEBUG: non-zero pixels={non_zero}, max_value={max_val}, shape={img_array.shape}") + # Clear screen + self.draw.rectangle( + [0, 0, self.display_class.resX, self.display_class.resY], + fill=self.colors.get(0), + ) + if self.object_display_mode == DM_IMAGE and self.object_image: self.screen.paste(self.object_image) - # Recreate draw object to ensure it's in sync with screen after paste - self.draw = ImageDraw.Draw(self.screen, mode="RGBA") - # logger.debug(f">>> Image pasted to screen") - - # DEBUG: Save screen buffer to file for inspection - # (Removed per user request) # If showing Gaia chart, draw crosshair based on config is_chart = ( @@ -967,7 +912,6 @@ def update(self, force=True): crosshair_style = self.config_object.get_option("obj_chart_crosshair_style") if crosshair_mode != "off": - # Call the appropriate drawing method based on style style_methods = { "simple": self._draw_crosshair_simple, "circle": self._draw_crosshair_circle, @@ -980,12 +924,8 @@ def update(self, force=True): draw_method = style_methods.get(crosshair_style, self._draw_crosshair_simple) draw_method(mode=crosshair_mode) - # Force continuous updates for animated crosshairs if crosshair_mode in ["pulse", "fade"]: force = True - # Note: We do NOT create a new screen/draw here because text layouts - # hold references to self.draw from __init__. The screen was already - # cleared by self.clear_screen() at line 940. if self.object_display_mode == DM_DESC or self.object_display_mode == DM_LOCATE: # catalog and entry field i.e. NGC-311 diff --git a/python/tests/test_star_catalog.py b/python/tests/test_star_catalog.py index afcfe85d8..1fda91962 100644 --- a/python/tests/test_star_catalog.py +++ b/python/tests/test_star_catalog.py @@ -9,13 +9,13 @@ from unittest.mock import MagicMock, patch import sys -from PiFinder.object_images.star_catalog import DeepStarCatalog, STAR_RECORD_DTYPE, STAR_RECORD_SIZE +from PiFinder.object_images.star_catalog import GaiaStarCatalog, STAR_RECORD_DTYPE, STAR_RECORD_SIZE -class TestDeepStarCatalog(unittest.TestCase): +class TestGaiaStarCatalog(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() self.catalog_path = Path(self.test_dir) - self.catalog = DeepStarCatalog(str(self.catalog_path)) + self.catalog = GaiaStarCatalog(str(self.catalog_path)) self.catalog.nside = 512 def tearDown(self): From 2add5f2b740258d18a59eb39b783e37bbcd55d93 Mon Sep 17 00:00:00 2001 From: Mike Rosseel Date: Fri, 26 Dec 2025 23:56:08 +0100 Subject: [PATCH 27/27] fix rotation --- python/PiFinder/object_images/gaia_chart.py | 39 ++++++++++++++----- .../PiFinder/object_images/poss_provider.py | 26 ++++++++++++- 2 files changed, 53 insertions(+), 12 deletions(-) diff --git a/python/PiFinder/object_images/gaia_chart.py b/python/PiFinder/object_images/gaia_chart.py index 22ea9c65d..75af05191 100644 --- a/python/PiFinder/object_images/gaia_chart.py +++ b/python/PiFinder/object_images/gaia_chart.py @@ -238,14 +238,25 @@ def generate_chart( mag_limit=mag_limit_query, ) - # Calculate rotation angle for roll / Newtonian orientation - # TODO: Should use telescope type from config (Newtonian vs Refractor) - # For now, hardcoded 180° to match existing cat_images.py behavior - # Add 90° clockwise rotation to match POSS image orientation - image_rotate = 180 + 90 # Newtonian inverts image + 90° CW alignment + # Calculate rotation angle for roll / telescope orientation + # Reflectors (Newtonian, SCT) invert the image 180° + # Refractors typically don't invert (depends on eyepiece design) + # Use obstruction as heuristic: obstruction > 0 = reflector + telescope = equipment.active_telescope + if telescope and telescope.obstruction_perc > 0: + # Reflector telescope (Newtonian, SCT) - inverts image + image_rotate = 180 + else: + # Refractor or unknown - no base rotation + image_rotate = 0 + if roll is not None: image_rotate += roll + # Get flip/flop settings from telescope config + flip_image = telescope.flip_image if telescope else False + flop_image = telescope.flop_image if telescope else False + # Progressive rendering: Yield image after each magnitude band loads # Re-render all stars each time (simple, correct, fast enough) final_image = None @@ -259,7 +270,8 @@ def generate_chart( # Render ALL stars from scratch (base image without overlays) base_image = self.render_chart( - stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query + stars, catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query, + flip_image=flip_image, flop_image=flop_image ) # Store base image for caching (without overlays) @@ -314,7 +326,8 @@ def generate_chart( # Generate blank chart (no stars) - this is the base image final_base_image = self.render_chart( np.array([]).reshape(0, 3), # Empty star array - catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query + catalog_object.ra, catalog_object.dec, fov, resolution, mag, image_rotate, mag_limit_query, + flip_image=flip_image, flop_image=flop_image ) # Cache base image (without overlays) so it can be reused @@ -368,6 +381,8 @@ def render_chart( magnification: float = 50.0, rotation: float = 0.0, mag_limit: float = 17.0, + flip_image: bool = False, + flop_image: bool = False, ) -> Image.Image: """ Render stars to PIL Image with center crosshair @@ -534,9 +549,13 @@ def render_chart( t6 = time.time() logger.debug(f" Image conversion: {(t6-t5)*1000:.1f}ms") - # NOTE: No vertical flip needed - rotation already handles telescope orientation - # POSS images use rotation only (180° + roll), so we match that behavior - # The 180° rotation in generate_chart() inverts the image for Newtonian telescopes + # Apply telescope flip/flop transformations + # flip_image = vertical flip (mirror top to bottom) + # flop_image = horizontal flip (mirror left to right) + if flip_image: + image = image.transpose(Image.Transpose.FLIP_TOP_BOTTOM) + if flop_image: + image = image.transpose(Image.Transpose.FLIP_LEFT_RIGHT) # Note: Limiting magnitude display added by add_image_overlays() in generate_chart() # Note: Pulsating crosshair added separately via add_pulsating_crosshair() diff --git a/python/PiFinder/object_images/poss_provider.py b/python/PiFinder/object_images/poss_provider.py index 65692b023..b964b4c64 100644 --- a/python/PiFinder/object_images/poss_provider.py +++ b/python/PiFinder/object_images/poss_provider.py @@ -64,12 +64,34 @@ def get_image( image_path = self._resolve_image_name(catalog_object, source="POSS") return_image = Image.open(image_path) - # Rotate for roll / newtonian orientation - image_rotate = 180 + # Rotate for roll / telescope orientation + # Reflectors (Newtonian, SCT) invert the image 180° + # Refractors typically don't invert (depends on eyepiece design) + # Use obstruction as heuristic: obstruction > 0 = reflector + telescope = None + if config_object and hasattr(config_object, "equipment"): + telescope = config_object.equipment.active_telescope + + if telescope and telescope.obstruction_perc > 0: + # Reflector telescope (Newtonian, SCT) - inverts image + image_rotate = 180 + else: + # Refractor or unknown - no base rotation + image_rotate = 0 + if roll is not None: image_rotate += roll return_image = return_image.rotate(image_rotate) # type: ignore[assignment] + # Apply telescope flip/flop transformations + # flip_image = vertical flip (mirror top to bottom) + # flop_image = horizontal flip (mirror left to right) + if telescope: + if telescope.flip_image: + return_image = return_image.transpose(Image.Transpose.FLIP_TOP_BOTTOM) # type: ignore[assignment] + if telescope.flop_image: + return_image = return_image.transpose(Image.Transpose.FLIP_LEFT_RIGHT) # type: ignore[assignment] + # Crop to FOV fov_size = int(1024 * fov / 2) return_image = return_image.crop( # type: ignore[assignment]