diff --git a/src/main/java/network/crypta/client/async/USKAttempt.java b/src/main/java/network/crypta/client/async/USKAttempt.java new file mode 100644 index 0000000000..34fdf1e06b --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttempt.java @@ -0,0 +1,242 @@ +package network.crypta.client.async; + +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.USK; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tracks a single edition probe, including its checker state and polling metadata. + * + *
Each attempt owns a {@link USKChecker} that performs the actual request and reports completion + * through {@link USKCheckerCallback}. The attempt records whether it has succeeded, failed (DNF), + * or been canceled, and it exposes scheduling hooks used by the owning fetcher. The attempt also + * tracks whether it has ever entered finite cooldown so that polling rounds can determine when a + * round is finished for now. + * + *
The class is mutable and relies on synchronization for checker state updates. Callers usually + * treat each attempt as part of a larger scheduling loop, invoking {@link #schedule(ClientContext)} + * and reacting to callbacks from the checker. Instances are short-lived and are replaced as polling + * rounds advance. + * + *
The constructor wires the checker used to probe the target edition and initializes the + * attempt state for scheduling. When {@code forever} is {@code true}, the checker is created for + * a long-lived polling attempt; otherwise it represents a one-off probe that will retire after + * completion. + * + * @param attemptContext shared configuration for attempt construction + * @param lookup descriptor containing edition and key information + * @param forever {@code true} to create a polling attempt; {@code false} for a one-off probe + */ + USKAttempt(USKAttemptContext attemptContext, USKKeyWatchSet.Lookup lookup, boolean forever) { + this.callbacks = attemptContext.callbacks(); + this.origUSK = attemptContext.origUSK(); + this.parent = attemptContext.parent(); + this.lookup = lookup; + this.number = lookup.val; + this.succeeded = false; + this.dnf = false; + this.forever = forever; + this.checker = + new USKChecker( + this, + lookup.key, + forever ? -1 : attemptContext.ctx().maxUSKRetries, + lookup.ignoreStore ? attemptContext.ctxNoStore() : attemptContext.ctx(), + attemptContext.parent(), + attemptContext.realTimeFlag()); + } + + @Override + public void onDNF(ClientContext context) { + synchronized (this) { + checker = null; + dnf = true; + } + callbacks.onDNF(this, context); + } + + @Override + public void onSuccess(ClientSSKBlock block, ClientContext context) { + synchronized (this) { + checker = null; + succeeded = true; + } + callbacks.onSuccess(this, false, block, context); + } + + @Override + public void onFatalAuthorError(ClientContext context) { + synchronized (this) { + checker = null; + } + // Counts as success except it doesn't update + callbacks.onSuccess(this, true, null, context); + } + + @Override + public void onNetworkError(ClientContext context) { + synchronized (this) { + checker = null; + } + // Treat network error as DNF for scheduling purposes + callbacks.onDNF(this, context); + } + + @Override + public void onCancelled(ClientContext context) { + synchronized (this) { + checker = null; + if (cancelNotified) return; + cancelNotified = true; + } + callbacks.onCancelled(this, context); + } + + /** + * Cancels this attempt and propagates cancellation to the checker if present. + * + * @param context client context used to cancel scheduling; must not be null + */ + public void cancel(ClientContext context) { + cancelled = true; + USKChecker c; + synchronized (this) { + c = checker; + } + if (c != null) { + c.cancel(context); + } + onCancelled(context); + } + + /** + * Schedules this attempt with its checker if still active. + * + * @param context client context used to schedule the checker; must not be null + */ + public void schedule(ClientContext context) { + USKChecker c; + synchronized (this) { + c = checker; + } + if (c == null) { + if (LOG.isDebugEnabled()) LOG.debug("Checker == null in schedule() for {}", this); + } else { + assert (!c.persistent()); + c.schedule(context); + } + } + + @Override + public String toString() { + return "USKAttempt for " + + number + + FOR_LITERAL + + origUSK.getURI() + + (forever ? " (forever)" : ""); + } + + @Override + public short getPriority() { + if (callbacks.isBackgroundPoll()) { + synchronized (this) { + if (forever) { + if (!everInCooldown) { + // Boost the priority initially, so that finding the first edition takes precedence + // over ongoing polling after we're fairly sure we're not going to find anything. + // The ongoing polling keeps the ULPRs up to date so that we will get told quickly, + // but if we are overloaded, we won't be able to keep up regardless. + return callbacks.getProgressPollPriority(); + } else { + return callbacks.getNormalPollPriority(); + } + } else { + // If !forever, this is a random-probe. + // It's not that important. + return callbacks.getNormalPollPriority(); + } + } + } + return parent.getPriorityClass(); + } + + @Override + public void onEnterFiniteCooldown(ClientContext context) { + synchronized (this) { + everInCooldown = true; + } + callbacks.onEnterFiniteCooldown(context); + } + + /** + * Reports whether this attempt has ever entered a finite cooldown. + * + * @return {@code true} if the attempt has cooled down at least once + */ + public synchronized boolean everInCooldown() { + return everInCooldown; + } + + /** Refreshes cached poll parameters on the underlying checker, if active. */ + public void reloadPollParameters() { + USKChecker c; + synchronized (this) { + c = checker; + } + if (c == null) return; + c.onChangedFetchContext(); + } +} diff --git a/src/main/java/network/crypta/client/async/USKAttemptCallbacks.java b/src/main/java/network/crypta/client/async/USKAttemptCallbacks.java new file mode 100644 index 0000000000..6a5fa40f93 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttemptCallbacks.java @@ -0,0 +1,101 @@ +package network.crypta.client.async; + +import java.util.Random; +import network.crypta.keys.ClientSSKBlock; + +/** + * Callback interface for {@link USKAttempt} lifecycle events. + * + *
Implementations receive completion and scheduling signals from polling attempts. These hooks + * allow the owning fetcher to react to success, DNF, cancellation, and cooldown transitions while + * providing priority information used by the scheduler. The callbacks are intentionally minimal and + * are expected to be fast, as they are invoked on scheduling or network threads. + * + *
The interface is stateful in the sense that implementations can depend on the owning fetcher + * state, but callers should treat each method as a synchronous notification. No concurrency + * guarantees are enforced beyond what the caller provides, so implementations should provide their + * own synchronization if they mutate a shared state. + * + *
Implementations may record the failure, reschedule work, or update the UI state. The attempt + * is already marked as complete when this callback runs. + * + * @param attempt attempt that reported the DNF result; never null + * @param context client context associated with the attempt; must not be null + */ + void onDNF(USKAttempt attempt, ClientContext context); + + /** + * Notifies that an attempt succeeded. + * + *
The callback receives the decoded block if available and a flag indicating that the success + * should not update internal edition tracking. Implementations typically decide whether to decode + * or propagate data based on these inputs. + * + * @param attempt attempt that reported success; never null + * @param dontUpdate whether the success should avoid updating edition tracking + * @param block decoded block returned by the attempt; may be null + * @param context client context associated with the attempt; must not be null + */ + void onSuccess( + USKAttempt attempt, boolean dontUpdate, ClientSSKBlock block, ClientContext context); + + /** + * Notifies that an attempt was canceled. + * + *
This callback is invoked after the attempt has been marked canceled and any checker has been + * shut down. + * + * @param attempt attempt that was canceled; never null + * @param context client context associated with the attempt; must not be null + */ + void onCancelled(USKAttempt attempt, ClientContext context); + + /** + * Notifies that an attempt entered a finite cooldown period. + * + *
This signal is used to determine when a polling round can be treated as finished for now. + * + * @param context client context associated with the attempt; must not be null + */ + void onEnterFiniteCooldown(ClientContext context); + + /** + * Indicates whether the owning fetcher is running background polling. + * + * @return {@code true} when background polling is active + */ + boolean isBackgroundPoll(); + + /** + * Returns the polling priority used while making progress on a round. + * + * @return priority class for progress-oriented polling + */ + short getProgressPollPriority(); + + /** + * Returns the polling priority used during steady-state background polling. + * + * @return priority class for normal background polling + */ + short getNormalPollPriority(); + + /** + * Determines whether random editions should be added during polling. + * + * @param random random source used to sample candidates; must not be null + * @param firstLoop whether the round is in its initial loop + * @return {@code true} to schedule random editions, otherwise {@code false} + */ + boolean shouldAddRandomEditions(Random random, boolean firstLoop); +} diff --git a/src/main/java/network/crypta/client/async/USKAttemptContext.java b/src/main/java/network/crypta/client/async/USKAttemptContext.java new file mode 100644 index 0000000000..771efc6b91 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttemptContext.java @@ -0,0 +1,25 @@ +package network.crypta.client.async; + +import network.crypta.client.FetchContext; +import network.crypta.keys.USK; + +/** + * Shared configuration for creating {@link USKAttempt} instances. + * + *
This bundles the stable dependencies required to spawn attempt checkers so callers can reuse a + * single parameter object when scheduling multiple attempts. + * + * @param callbacks owning callback handler for lifecycle events + * @param origUSK base USK used for logging + * @param ctx base fetch context for scheduling + * @param ctxNoStore no-store fetch context for probes that bypass the store + * @param parent parent requester providing scheduling policy + * @param realTimeFlag whether to use real-time scheduling for the checker + */ +record USKAttemptContext( + USKAttemptCallbacks callbacks, + USK origUSK, + FetchContext ctx, + FetchContext ctxNoStore, + ClientRequester parent, + boolean realTimeFlag) {} diff --git a/src/main/java/network/crypta/client/async/USKAttemptManager.java b/src/main/java/network/crypta/client/async/USKAttemptManager.java new file mode 100644 index 0000000000..a898bb3c26 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttemptManager.java @@ -0,0 +1,484 @@ +package network.crypta.client.async; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Manages USK attempt lifecycle, staging, and scheduling. + * + *
This helper owns the attempt maps and the mechanics for adding, cancelling, and registering + * probe attempts. It delegates scheduling callbacks to the owning {@link USKFetcher} through the + * {@link USKAttemptCallbacks} interface. The manager tracks both short-lived random-probe attempts + * and long-lived polling attempts, ensuring that duplicate editions are not scheduled twice. It + * also coordinates the transition from datastore checks to network scheduling. + * + *
The class is mutable and synchronizes access to attempt collections. Callers typically invoke + * it from scheduler threads and should avoid holding external locks to prevent deadlocks. It + * prefers deterministic, ordered behavior by using {@link TreeMap} for edition-keyed attempts and + * by snapshotting collections before scheduling network work. + * + *
The manager holds the context and collaborators required to build and schedule attempts. It + * assumes the provided dependencies remain valid for the lifetime of the owning fetcher. + * + * @param attemptContext shared configuration used for new attempt construction; must be non-null + * @param uskManager manager used to query the latest slots; must be non-null + * @param watchingKeys watch set used to plan fetch and poll editions; must be non-null + * @param checkStoreOnly whether to suppress network attempts and only check the store + * @param keepLastData whether to retain the last data when scheduling new attempts + */ + USKAttemptManager( + USKAttemptContext attemptContext, + USKManager uskManager, + USKKeyWatchSet watchingKeys, + boolean checkStoreOnly, + boolean keepLastData) { + this.attemptContext = attemptContext; + this.uskManager = uskManager; + this.watchingKeys = watchingKeys; + this.checkStoreOnly = checkStoreOnly; + this.keepLastData = keepLastData; + } + + /** + * Cancels attempts for editions older than the current latest value. + * + *
The method removes attempts from the internal maps and returns a list of attempts that
+ * should be canceled by the caller. It does not perform cancellation itself so that callers can
+ * decide when to propagate the cancellation on their own thread.
+ *
+ * @param curLatest latest edition value used as a cutoff for cancellation
+ * @return list of attempts to cancel, or {@code null} when none were removed
+ */
+ List The method consults the watch set to determine which editions should be polled or fetched
+ * and stages the resulting attempts in {@link #attemptsToStart}. Duplicate editions are filtered
+ * out, and no attempts are created when running in store-only mode.
+ *
+ * @param curLatest latest edition value used to seed scheduling decisions
+ * @param context client context providing randomness and scheduling information
+ * @param firstLoop whether this is the first scheduling loop in the round
+ */
+ void addNewAttempts(long curLatest, ClientContext context, boolean firstLoop) {
+ USKKeyWatchSet.ToFetch list =
+ watchingKeys.getEditionsToFetch(
+ curLatest,
+ context.random,
+ getRunningFetchEditions(),
+ shouldAddRandomEditions(context, firstLoop),
+ firstLoop);
+ USKKeyWatchSet.Lookup[] toPoll = list.poll;
+ USKKeyWatchSet.Lookup[] toFetch = list.fetch;
+ synchronized (this) {
+ for (USKKeyWatchSet.Lookup lookup : toPoll) {
+ if (LOG.isTraceEnabled()) LOG.trace("Polling {} for {}", lookup, attemptContext.origUSK());
+ USKAttempt attempt = add(lookup, true);
+ if (attempt != null) attemptsToStart.add(attempt);
+ }
+ for (USKKeyWatchSet.Lookup lookup : toFetch) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Adding checker for edition {} for {}", lookup, attemptContext.origUSK());
+ USKAttempt attempt = add(lookup, false);
+ if (attempt != null) attemptsToStart.add(attempt);
+ }
+ }
+ }
+
+ /**
+ * Returns whether random editions should be added during scheduling.
+ *
+ * @param context client context providing randomness for selection
+ * @param firstLoop whether this is the first scheduling loop in the round
+ * @return {@code true} if random editions should be added, otherwise {@code false}
+ */
+ boolean shouldAddRandomEditions(ClientContext context, boolean firstLoop) {
+ return attemptContext.callbacks().shouldAddRandomEditions(context.random, firstLoop);
+ }
+
+ /**
+ * Adds a new attempt for the given lookup descriptor.
+ *
+ * This method enforces duplicate checks and stores the attempt in the appropriate map based on
+ * whether it is a polling attempt. It returns {@code null} when the attempt is suppressed (for
+ * example, in store-only mode or when a duplicate is detected).
+ *
+ * @param lookup descriptor containing the edition to probe
+ * @param forever whether the attempt should be treated as a polling attempt
+ * @return the created attempt, or {@code null} when no attempt was added
+ */
+ private synchronized USKAttempt add(USKKeyWatchSet.Lookup lookup, boolean forever) {
+ long edition = lookup.val;
+ if (lookup.val < 0)
+ throw new IllegalArgumentException(
+ "Can't check <0" + FOR_LITERAL + lookup.val + " on " + attemptContext.origUSK());
+ if (checkStoreOnly) return null;
+ if (LOG.isDebugEnabled())
+ LOG.debug("Adding USKAttempt for {} for {}", edition, attemptContext.origUSK());
+ if (isDuplicateAttempt(forever, edition)) return null;
+ USKAttempt attempt = new USKAttempt(attemptContext, lookup, forever);
+ if (forever) pollingAttempts.put(edition, attempt);
+ else {
+ runningAttempts.put(edition, attempt);
+ }
+ if (LOG.isDebugEnabled()) LOG.debug("Added {} for {}", attempt, attemptContext.origUSK());
+ return attempt;
+ }
+
+ /**
+ * Returns whether an attempt already exists for the given edition.
+ *
+ * @param forever whether the attempt is a polling attempt
+ * @param edition edition number to check for duplicates
+ * @return {@code true} if a duplicate attempt is already present
+ */
+ private synchronized boolean isDuplicateAttempt(boolean forever, long edition) {
+ if (forever) {
+ if (pollingAttempts.containsKey(edition)) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Already polling edition: {} for {}", edition, attemptContext.origUSK());
+ return true;
+ }
+ } else {
+ if (runningAttempts.containsKey(edition)) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Returning because already running for {}", attemptContext.origUSK().getURI());
+
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Returns whether any random-probe attempts are running.
+ *
+ * @return {@code true} if there are active running attempts
+ */
+ synchronized boolean hasRunningAttempts() {
+ return !runningAttempts.isEmpty();
+ }
+
+ /**
+ * Returns whether any polling attempts are registered.
+ *
+ * @return {@code true} if no polling attempts are registered
+ */
+ synchronized boolean hasNoPollingAttempts() {
+ return pollingAttempts.isEmpty();
+ }
+
+ /**
+ * Returns a snapshot of polling attempts.
+ *
+ * @return array of polling attempts; may be empty but never null
+ */
+ synchronized USKAttempt[] snapshotPollingAttempts() {
+ return pollingAttempts.values().toArray(new USKAttempt[0]);
+ }
+
+ /**
+ * Returns a snapshot of running attempts.
+ *
+ * @return array of running attempts; may be empty but never null
+ */
+ synchronized USKAttempt[] snapshotRunningAttempts() {
+ return runningAttempts.values().toArray(new USKAttempt[0]);
+ }
+
+ /**
+ * Returns a snapshot of attempts staged for registration.
+ *
+ * @return array of attempts staged to start; may be empty but never null
+ */
+ synchronized USKAttempt[] snapshotAttemptsToStart() {
+ return attemptsToStart.toArray(new USKAttempt[0]);
+ }
+
+ /**
+ * Returns whether any attempts are staged for registration.
+ *
+ * @return {@code true} when staged attempts are available
+ */
+ synchronized boolean hasPendingAttempts() {
+ return !attemptsToStart.isEmpty();
+ }
+
+ /** Clears the staged attempts list. */
+ synchronized void clearAttemptsToStart() {
+ attemptsToStart.clear();
+ }
+
+ /** Clears all attempt collections, removing staged, running, and polling attempts. */
+ synchronized void clearAllAttempts() {
+ attemptsToStart.clear();
+ runningAttempts.clear();
+ pollingAttempts.clear();
+ }
+
+ /**
+ * Removes a running attempt by edition.
+ *
+ * @param edition edition number to remove
+ */
+ synchronized void removeRunningAttempt(long edition) {
+ runningAttempts.remove(edition);
+ }
+
+ /**
+ * Removes a polling attempt by edition.
+ *
+ * @param edition edition number to remove
+ */
+ synchronized void removePollingAttempt(long edition) {
+ pollingAttempts.remove(edition);
+ }
+
+ /**
+ * Returns the count of running attempts.
+ *
+ * @return number of running attempts
+ */
+ @SuppressWarnings("unused")
+ synchronized int runningAttemptCount() {
+ return runningAttempts.size();
+ }
+
+ /**
+ * Returns the count of polling attempts.
+ *
+ * @return number of polling attempts
+ */
+ @SuppressWarnings("unused")
+ synchronized int pollingAttemptCount() {
+ return pollingAttempts.size();
+ }
+
+ /**
+ * Returns a human-readable description of running attempts.
+ *
+ * @return description string containing edition numbers and flags
+ */
+ synchronized String runningAttemptsDescription() {
+ StringBuilder sb = new StringBuilder();
+ boolean first = true;
+ for (USKAttempt attempt : runningAttempts.values()) {
+ if (!first) sb.append(", ");
+ first = false;
+ sb.append(attempt.number);
+ if (attempt.cancelled) sb.append("(cancelled)");
+ if (attempt.succeeded) sb.append("(succeeded)");
+ }
+ return sb.toString();
+ }
+
+ /**
+ * Returns lookup descriptors for currently running fetch editions.
+ *
+ * @return list of lookup descriptors associated with running or polling attempts
+ */
+ synchronized List The method drains the staged attempt list, notifies the parent requester when network work
+ * is about to start, and schedules each attempt if it is still newer than the latest known slot.
+ * Attempts that are already obsolete are removed from the internal maps.
+ *
+ * @param params registration parameters containing context and edition tracking information
+ */
+ void registerAttempts(USKAttemptRegistrationParams params) {
+ USKAttempt[] attempts;
+ int runningCount;
+ int pollingCount;
+ synchronized (this) {
+ attempts = attemptsToStart.toArray(new USKAttempt[0]);
+ attemptsToStart.clear();
+ runningCount = runningAttempts.size();
+ pollingCount = pollingAttempts.size();
+ }
+
+ if (attempts.length > 0) attemptContext.parent().toNetwork(params.context());
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Registering {} USKChecker's for {} running={} polling={}",
+ attempts.length,
+ attemptContext.origUSK(),
+ runningCount,
+ pollingCount);
+ for (USKAttempt attempt : attempts) {
+ long lastEd = uskManager.lookupLatestSlot(attemptContext.origUSK());
+
+ if (keepLastData && !params.hasLastRequestData() && lastEd == params.suggestedEdition())
+ lastEd--;
+
+ if (attempt == null) continue;
+ if (attempt.number > lastEd) attempt.schedule(params.context());
+ else {
+ removeRunningAttempt(attempt.number);
+ removePollingAttempt(attempt.number);
+ }
+ }
+ }
+
+ /**
+ * Processes attempts after a datastore store check completes.
+ *
+ * This method mirrors {@link #registerAttempts(USKAttemptRegistrationParams)} but operates on
+ * a provided attempt array after a store check completes. It schedules attempts that remain newer
+ * than the latest known slot and removes those that are already obsolete.
+ *
+ * @param params registration parameters containing context and edition tracking information
+ * @param attempts attempts to schedule after the store check; may be empty but not null
+ */
+ void processAttemptsAfterStoreCheck(USKAttemptRegistrationParams params, USKAttempt[] attempts) {
+ for (USKAttempt attempt : attempts) {
+ long lastEd = uskManager.lookupLatestSlot(attemptContext.origUSK());
+ if (keepLastData && !params.hasLastRequestData() && lastEd == params.suggestedEdition())
+ lastEd--;
+ if (attempt == null) continue;
+ if (attempt.number > lastEd) attempt.schedule(params.context());
+ else {
+ removeRunningAttempt(attempt.number);
+ removePollingAttempt(attempt.number);
+ }
+ }
+ }
+
+ /**
+ * Notes that an attempt succeeded and removes it from running attempts.
+ *
+ * @param attempt attempt that succeeded; may be null
+ */
+ @SuppressWarnings("unused")
+ void noteAttemptSuccess(USKAttempt attempt) {
+ if (attempt == null) return;
+ removeRunningAttempt(attempt.number);
+ }
+
+ /**
+ * Notes that an attempt was canceled and removes it from running attempts.
+ *
+ * @param attempt attempt that was canceled; may be null
+ */
+ @SuppressWarnings("unused")
+ void noteAttemptCancelled(USKAttempt attempt) {
+ if (attempt == null) return;
+ if (LOG.isDebugEnabled())
+ LOG.debug("Attempt {} cancelled for {}", attempt.number, attemptContext.origUSK());
+ removeRunningAttempt(attempt.number);
+ }
+
+ /** Refreshes poll parameters on all polling attempts. */
+ void reloadPollParameters() {
+ USKAttempt[] pollers;
+ synchronized (this) {
+ pollers = pollingAttempts.values().toArray(new USKAttempt[0]);
+ }
+ for (USKAttempt attempt : pollers) attempt.reloadPollParameters();
+ }
+
+ /**
+ * Registration parameters used when scheduling or processing attempts.
+ *
+ * @param context client context for scheduling callbacks and networking
+ * @param hasLastRequestData whether the fetcher has retained the last request data
+ * @param suggestedEdition edition value suggested by the original USK
+ */
+ record USKAttemptRegistrationParams(
+ ClientContext context, boolean hasLastRequestData, long suggestedEdition) {}
+}
diff --git a/src/main/java/network/crypta/client/async/USKCompletionCoordinator.java b/src/main/java/network/crypta/client/async/USKCompletionCoordinator.java
new file mode 100644
index 0000000000..e33f9081c9
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKCompletionCoordinator.java
@@ -0,0 +1,198 @@
+package network.crypta.client.async;
+
+import network.crypta.keys.ClientSSKBlock;
+import network.crypta.keys.USK;
+import network.crypta.support.api.Bucket;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Coordinates completion callbacks and retained data handling for USK fetchers.
+ *
+ * This helper wraps a {@link USKCompletionHandler} to decode data, retain the most recent
+ * payload, and deliver completion callbacks when a polling cycle finishes. It owns references to
+ * the manager, original USK, and requester so it can unregister and emit callbacks consistently.
+ * Callers typically invoke it when a fetcher is finished or canceled, and the coordinator handles
+ * cleanup of scheduler state and subscriber notification.
+ *
+ * The class is mutable but relies on the caller for synchronization; it performs no internal
+ * locking beyond the underlying collaborators. It also keeps track of real-time scheduling bias to
+ * interact with the correct scheduler queue when cleaning up pending keys.
+ *
+ * The coordinator depends on collaborators that are expected to remain valid for the life of
+ * the fetcher. The {@code parent} and {@code realTimeFlag} are used to align cleanup operations
+ * with the same scheduling bias as the fetcher itself.
+ *
+ * @param completionHandler handler that decodes and stores retained data; must be non-null
+ * @param uskManager manager used to unsubscribe and track completion; must be non-null
+ * @param origUSK base USK used for lookups and callback payloads; must be non-null
+ * @param parent requester used for decode context and scheduling; must be non-null
+ * @param realTimeFlag whether cleanup should use real-time scheduling queues
+ */
+ USKCompletionCoordinator(
+ USKCompletionHandler completionHandler,
+ USKManager uskManager,
+ USK origUSK,
+ ClientRequester parent,
+ boolean realTimeFlag) {
+ this.completionHandler = completionHandler;
+ this.uskManager = uskManager;
+ this.origUSK = origUSK;
+ this.parent = parent;
+ this.realTimeFlag = realTimeFlag;
+ }
+
+ /**
+ * Decodes and applies a data block when decoding is requested.
+ *
+ * If {@code decode} is {@code false}, the method returns immediately. Otherwise, it delegates
+ * to {@link USKCompletionHandler#decodeBlockIfNeeded(boolean, ClientSSKBlock, ClientContext,
+ * ClientRequester)} to produce a decoded bucket and then applies the decoded data to the
+ * completion handler.
+ *
+ * @param decode whether decoding should be performed
+ * @param block block to decode; may be null when only metadata is available
+ * @param context client context used for decoding; must not be null
+ */
+ void applyDecodedData(boolean decode, ClientSSKBlock block, ClientContext context) {
+ if (!decode) return;
+ Bucket decoded = completionHandler.decodeBlockIfNeeded(true, block, context, parent);
+ completionHandler.applyDecodedData(true, block, decoded);
+ }
+
+ /**
+ * Applies decoded data for a discovered edition.
+ *
+ * This delegates to the completion handler to parse or store the supplied data payload and
+ * metadata flags.
+ *
+ * @param decode whether the payload should be decoded
+ * @param metadata whether the payload represents metadata rather than raw content
+ * @param codec compression codec identifier associated with the payload
+ * @param data raw payload bytes; may be null when data is unavailable
+ * @param context client context used for decoding; must not be null
+ */
+ void applyFoundDecodedData(
+ boolean decode, boolean metadata, short codec, byte[] data, ClientContext context) {
+ completionHandler.applyFoundDecodedData(decode, metadata, codec, data, context);
+ }
+
+ /**
+ * Releases retained data bytes, if any.
+ *
+ * @return retained data bytes, or {@code null} when none are stored
+ */
+ @SuppressWarnings("unused")
+ byte[] releaseLastDataBytes() {
+ return completionHandler.releaseLastDataBytes();
+ }
+
+ /**
+ * Returns the compression codec used by the retained data.
+ *
+ * @return codec identifier for the last retained data
+ */
+ @SuppressWarnings("unused")
+ short lastCompressionCodec() {
+ return completionHandler.lastCompressionCodec();
+ }
+
+ /**
+ * Returns whether the retained data represents metadata.
+ *
+ * @return {@code true} if the retained data is metadata
+ */
+ @SuppressWarnings("unused")
+ boolean lastWasMetadata() {
+ return completionHandler.lastWasMetadata();
+ }
+
+ /**
+ * Returns whether retained data from the last request is available.
+ *
+ * @return {@code true} if retained data is present
+ */
+ boolean hasLastRequestData() {
+ return completionHandler.hasLastRequestData();
+ }
+
+ /** Clears any retained data from the last request. */
+ void clearLastRequestData() {
+ completionHandler.clearLastRequestData();
+ }
+
+ /**
+ * Completes callbacks and cleans up fetcher state.
+ *
+ * The method unsubscribes the fetcher, removes pending keys from the scheduler, and delivers
+ * completion callbacks with the latest known edition and retained data. Exceptions thrown by
+ * callbacks are caught and logged so that remaining callbacks still receive notifications.
+ *
+ * @param context client context used for scheduling and callback payloads
+ * @param fetcher fetcher instance being completed; must not be null
+ * @param callbacks callback array to notify; may be empty but not null
+ */
+ void completeCallbacks(
+ ClientContext context, USKFetcher fetcher, USKFetcherCallback[] callbacks) {
+ uskManager.unsubscribe(origUSK, fetcher);
+ uskManager.onFinished(fetcher);
+ context
+ .getSskFetchScheduler(realTimeFlag)
+ .schedTransient
+ .removePendingKeys((KeyListener) fetcher);
+ long ed = uskManager.lookupLatestSlot(origUSK);
+ byte[] data = completionHandler.releaseLastDataBytes();
+ short codec = completionHandler.lastCompressionCodec();
+ boolean metadata = completionHandler.lastWasMetadata();
+ for (USKFetcherCallback c : callbacks) {
+ try {
+ if (ed == -1) c.onFailure(context);
+ else
+ c.onFoundEdition(
+ new USKFoundEdition(
+ ed, origUSK.copy(ed), context, metadata, codec, data, false, false));
+ } catch (Exception e) {
+ LOG.error(
+ "An exception occurred while dealing with a callback:{}\n{}", c, e.getMessage(), e);
+ }
+ }
+ }
+
+ /**
+ * Notifies callbacks that the fetcher was canceled.
+ *
+ * @param context client context supplied to cancellation callbacks
+ * @param callbacks callback array to notify; may be empty but not null
+ */
+ void finishCancelled(ClientContext context, USKFetcherCallback[] callbacks) {
+ for (USKFetcherCallback c : callbacks) c.onCancelled(context);
+ }
+}
diff --git a/src/main/java/network/crypta/client/async/USKCompletionHandler.java b/src/main/java/network/crypta/client/async/USKCompletionHandler.java
new file mode 100644
index 0000000000..2e22918fae
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKCompletionHandler.java
@@ -0,0 +1,247 @@
+package network.crypta.client.async;
+
+import java.io.IOException;
+import network.crypta.keys.ClientSSKBlock;
+import network.crypta.keys.KeyDecodeException;
+import network.crypta.support.api.Bucket;
+import network.crypta.support.io.BucketTools;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tracks the most recently decoded USK payload and exposes it to completion callbacks.
+ *
+ * This helper is used by USK fetch coordination to retain metadata about the last successful
+ * fetch and optionally hold on to the decoded data bucket. Callers feed decoded blocks or already
+ * decoded byte arrays into this instance, then later query or release the retained data when a
+ * fetcher completes. The handler is intentionally stateful: it keeps the last compression codec,
+ * whether the last block was metadata, and an optional data bucket controlled by {@code
+ * keepLastData}.
+ *
+ * All state mutations are synchronized on the instance to allow concurrent fetch activity. The
+ * class does not perform network I/O; it only records and releases data that has already been
+ * decoded. Callers must treat returned buckets and byte arrays as owned by the caller after
+ * retrieval.
+ *
+ * The {@code keepLastData} flag controls whether decoded data buckets are held so that
+ * completion callbacks can access them later. The handler does not decode any data on its own
+ * during construction; it only initializes the retention policy and starts with an empty state.
+ *
+ * @param keepLastData {@code true} to retain the last decoded bucket; {@code false} to discard
+ * decoded data after updating metadata flags and codec information.
+ */
+ USKCompletionHandler(boolean keepLastData) {
+ this.keepLastData = keepLastData;
+ }
+
+ /**
+ * Reports whether a retained data bucket is currently available.
+ *
+ * The value reflects the last successful decoding that was retained. The result may change
+ * after {@link #applyDecodedData(boolean, ClientSSKBlock, Bucket)} or {@link
+ * #applyFoundDecodedData(boolean, boolean, short, byte[], ClientContext)} is called, or after
+ * {@link #releaseLastDataBytes()} frees the stored bucket.
+ *
+ * @return {@code true} if a bucket is currently stored; {@code false} otherwise.
+ */
+ boolean hasLastRequestData() {
+ synchronized (this) {
+ return lastRequestData != null;
+ }
+ }
+
+ /**
+ * Returns the compression codec recorded for the most recently applied block.
+ *
+ * The codec is updated when decoded data is applied or when metadata is applied from a found
+ * edition. If no block has been applied yet, the value remains at the default zero value.
+ *
+ * @return the last compression codec recorded for a decoded block.
+ */
+ short lastCompressionCodec() {
+ synchronized (this) {
+ return lastCompressionCodec;
+ }
+ }
+
+ /**
+ * Returns whether the most recently applied block represented metadata.
+ *
+ * This reflects the last known metadata flag from applied decoded data or from a found
+ * edition. Callers should interpret it in tandem with {@link #lastCompressionCodec()} when
+ * building completion callbacks.
+ *
+ * @return {@code true} if the last applied block was metadata; {@code false} otherwise.
+ */
+ boolean lastWasMetadata() {
+ synchronized (this) {
+ return lastWasMetadata;
+ }
+ }
+
+ /**
+ * Releases any retained data bucket and clears stored state.
+ *
+ * This method frees the retained bucket if one exists and clears the handler reference so it
+ * can be garbage collected. It does not modify codec or metadata flags, which are updated by
+ * later calls to {@link #applyDecodedData(boolean, ClientSSKBlock, Bucket)}.
+ */
+ void clearLastRequestData() {
+ synchronized (this) {
+ if (lastRequestData != null) {
+ lastRequestData.free();
+ }
+ lastRequestData = null;
+ }
+ }
+
+ /**
+ * Decodes the provided block into a data bucket when decoding is requested.
+ *
+ * This method is a small adapter that checks the decode flag and the availability of the
+ * block. If either condition is not met, it returns {@code null} without changing internal state.
+ * When decoding is performed, the returned bucket is owned by the caller and may be retained or
+ * freed based on {@link #applyDecodedData(boolean, ClientSSKBlock, Bucket)}.
+ *
+ * @param decode {@code true} to decode the provided block; {@code false} to skip decoding.
+ * @param block the block to decode, or {@code null} when no block is available.
+ * @param context client context used to get temporary bucket factories.
+ * @param parent requester providing persistence information for bucket allocation.
+ * @return a decoded data bucket, or {@code null} if decoding was skipped or failed.
+ */
+ Bucket decodeBlockIfNeeded(
+ boolean decode, ClientSSKBlock block, ClientContext context, ClientRequester parent) {
+ if (!decode || block == null) return null;
+ return ClientSSKBlockDecoder.decode(block, context, parent.persistent());
+ }
+
+ /**
+ * Applies decoded data and updates the recorded metadata and codec state.
+ *
+ * The method is synchronized to serialize state updates. When decoding is disabled, it is a
+ * no-op. If a block is supplied, the codec and metadata flags are taken from that block, and the
+ * data bucket is either retained or freed based on {@code keepLastData}. If the block is {@code
+ * null}, codec and metadata flags are reset and any retained bucket is cleared.
+ *
+ * @param decode {@code true} to apply the block information; {@code false} to skip updates.
+ * @param block the decoded block, or {@code null} to clear codec and metadata state.
+ * @param data the decoded data bucket, or {@code null} when no payload is available.
+ */
+ void applyDecodedData(boolean decode, ClientSSKBlock block, Bucket data) {
+ synchronized (this) {
+ if (!decode) return;
+ if (block != null) {
+ lastCompressionCodec = block.getCompressionCodec();
+ lastWasMetadata = block.isMetadata();
+ if (keepLastData) {
+ if (lastRequestData != null) lastRequestData.free();
+ lastRequestData = data;
+ } else if (data != null) {
+ data.free();
+ }
+ } else {
+ lastCompressionCodec = -1;
+ lastWasMetadata = false;
+ lastRequestData = null;
+ }
+ }
+ }
+
+ /**
+ * Applies already decoded data and records metadata/codec values.
+ *
+ * This variant is used when a decoded byte array is already available, such as when data is
+ * supplied by a higher-level cache. If {@code keepLastData} is enabled, the byte array is wrapped
+ * into an immutable bucket for retention. If decoding is disabled, no changes are made.
+ *
+ * @param decode {@code true} to apply the provided metadata and data; {@code false} to skip.
+ * @param metadata {@code true} when the payload represents metadata rather than raw data.
+ * @param codec compression codec identifier associated with the decoded payload.
+ * @param data decoded data bytes; must not be mutated by the caller after passing here.
+ * @param context client context providing the temporary bucket factory for retention.
+ */
+ void applyFoundDecodedData(
+ boolean decode, boolean metadata, short codec, byte[] data, ClientContext context) {
+ synchronized (this) {
+ if (!decode) return;
+ lastCompressionCodec = codec;
+ lastWasMetadata = metadata;
+ if (keepLastData) {
+ // Note: converting bucket to byte[] and back is inefficient
+ if (lastRequestData != null) lastRequestData.free();
+ try {
+ lastRequestData = BucketTools.makeImmutableBucket(context.tempBucketFactory, data);
+ } catch (IOException e) {
+ LOG.error("Caught {}", e, e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Releases retained data as a byte array and clears the stored bucket.
+ *
+ * If no data is retained, this returns {@code null} to preserve the "no payload" signal used
+ * by downstream callbacks. The caller owns the returned byte array. The retained bucket is always
+ * freed, even if conversion fails, ensuring the handler does not retain buffers longer than
+ * needed.
+ *
+ * @return the retained data bytes, or {@code null} when no data is stored
+ */
+ @SuppressWarnings("java:S1168")
+ byte[] releaseLastDataBytes() {
+ synchronized (this) {
+ if (lastRequestData == null) return null;
+ try {
+ return BucketTools.toByteArray(lastRequestData);
+ } catch (IOException e) {
+ LOG.error("Unable to turn lastRequestData into byte[]: caught I/O exception: {}", e, e);
+ return null;
+ } finally {
+ lastRequestData.free();
+ lastRequestData = null;
+ }
+ }
+ }
+
+ private static final class ClientSSKBlockDecoder {
+ private ClientSSKBlockDecoder() {}
+
+ private static Bucket decode(ClientSSKBlock block, ClientContext context, boolean persistent) {
+ try {
+ return block.decode(context.getBucketFactory(persistent), 1025 /* it's an SSK */, true);
+ } catch (KeyDecodeException _) {
+ return null;
+ } catch (IOException e) {
+ LOG.error("Decode failed due to I/O error: {}", e.getMessage(), e);
+ return null;
+ }
+ }
+ }
+}
diff --git a/src/main/java/network/crypta/client/async/USKFetcher.java b/src/main/java/network/crypta/client/async/USKFetcher.java
index 5652d493bf..a904f985e1 100644
--- a/src/main/java/network/crypta/client/async/USKFetcher.java
+++ b/src/main/java/network/crypta/client/async/USKFetcher.java
@@ -1,97 +1,76 @@
package network.crypta.client.async;
-import java.io.IOException;
-import java.lang.ref.WeakReference;
import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
import java.util.Random;
-import java.util.TreeMap;
-import java.util.TreeSet;
import network.crypta.client.FetchContext;
-import network.crypta.keys.ClientSSK;
import network.crypta.keys.ClientSSKBlock;
import network.crypta.keys.FreenetURI;
import network.crypta.keys.Key;
import network.crypta.keys.KeyBlock;
-import network.crypta.keys.KeyDecodeException;
import network.crypta.keys.NodeSSK;
-import network.crypta.keys.SSKBlock;
-import network.crypta.keys.SSKVerifyException;
import network.crypta.keys.USK;
-import network.crypta.node.RequestStarter;
import network.crypta.node.SendableGet;
-import network.crypta.support.RemoveRangeArrayList;
-import network.crypta.support.api.Bucket;
-import network.crypta.support.io.BucketTools;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Coordinates discovery and fetching of editions for a {@link USK}.
+ * Coordinates discovery, polling, and optional data retrieval for a {@link USK} namespace.
*
- * USKs (Unique SSKs) advance over time; this class drives the polling and discovery loop that
- * identifies the latest available edition and optionally retrieves its data. It combines
- * datastore-prechecks, targeted slot checks, and Date-Based Request (DBR) hint fetches to balance
- * latency and load. The fetcher can run once for a specific request or continue in background
- * polling mode to track updates over time.
+ * This fetcher drives a USK discovery round by consulting the datastore, scheduling edition
+ * probes, and applying Date-Based Request (DBR) hints to narrow toward the latest available slot.
+ * Callers typically construct one instance per USK, register callbacks or subscribers, and invoke
+ * {@link #schedule(ClientContext)} to begin work. The instance may complete a single round or
+ * continue background polling; it cooperates with {@link USKManager} and scheduler infrastructure
+ * so network I/O stays in scheduler-managed tasks rather than in this class.
*
- * Lifecycle and behavior:
+ * The internal state model centers on mutable polling state: in-flight attempts, a watch window,
+ * the last attempted edition, and optional retained payload data. The fetcher respects a minimum
+ * failure threshold before declaring a round finished and may reschedule with backoff when
+ * configured. These invariants let callers treat each round as a bounded probe of the USK space.
+ *
+ * Concurrency is handled with synchronized sections guarding shared fields such as completion
+ * flags and watch lists. Cancellation or completion is terminal and makes later schedule requests
+ * no-ops, and the fetcher is not persistent across restarts.
*
* Threading and state: instances are mutable and use fine-grained synchronization around shared
- * fields to coordinate scheduling and callbacks. Cancellation short-circuits pending work and marks
- * the instance as finished. This class is not persistent; persistence of intent is tracked by
- * {@code USKFetcherTag} which recreates fetchers on startup as needed.
- *
* @see USKManager
* @see USK
+ * @see USKDateHintFetches
*/
-public class USKFetcher implements ClientGetState, USKCallback, HasKeyListener, KeyListener {
+public class USKFetcher
+ implements ClientGetState, USKCallback, HasKeyListener, KeyListener, USKAttemptCallbacks {
/** Logger for polling, scheduling, and hint-processing diagnostics. */
private static final Logger LOG = LoggerFactory.getLogger(USKFetcher.class);
- /** Literal used in attempt descriptions to keep log formatting consistent. */
- private static final String FOR_LITERAL = " for ";
-
- /** USK manager */
+ /** Manager that owns known slot state and subscription coordination. */
private final USKManager uskManager;
- /** The USK to fetch */
+ /** Base USK namespace from which edition keys are derived. */
private final USK origUSK;
- /** Callbacks */
+ /** Registered completion callbacks for this fetch cycle. */
private final List Callbacks are invoked when a polling round reaches a terminal outcome or when a single-shot
+ * fetch completes. They receive {@code onFoundEdition(...)} at most once per lifecycle unless
+ * background polling is enabled, in which case the callback may not be notified for long periods.
+ * This method also affects dynamic scheduling because callback priority hints are folded into the
+ * polling priority calculation and can bias progress checks for interactive users.
*
- * Callbacks are notified when the overall USK fetch cycle completes. Unless background polling
- * is enabled, they receive {@code onFoundEdition(...)} at most once when the final decision for
- * the current cycle is known. Callbacks also participate in determining the dynamic polling
- * priority via {@link #updatePriorities()} so interactive callers can promote progress checks.
+ * The call is thread-safe and idempotent with respect to completed instances. Adding callbacks
+ * after completion has no effect and returns {@code false} without side effects. Callback
+ * instances are expected to remain valid for the life of the fetcher and may be called from
+ * scheduler threads rather than the caller's thread. The method does not trigger scheduling on
+ * its own, but it does update priorities immediately after the callback is stored.
*
- * Note: When continuous background polling is enabled, consider whether registering a callback
- * is appropriate, as the cycle may not reach a terminal state for long periods.
+ * Preconditions are minimal: the callback must be non-null and should tolerate invocation on
+ * internal threads. Postconditions are limited to registration and priority refresh; the caller
+ * should not expect immediate network activity as a result of this call.
*
- * @param cb the callback to add; must remain valid for the lifetime of this fetch cycle; {@code
- * null} is not permitted
- * @return {@code true} when the callback was added successfully; {@code false} when the fetcher
- * has already completed and no further callbacks are accepted
+ * @param cb callback instance to register; must be non-null and long-lived
+ * @return {@code true} when accepted; {@code false} if already completed
+ * Each attempt owns a {@link USKChecker} that performs the actual request and reports
- * completion through {@link USKCheckerCallback}. The attempt records whether it has succeeded,
- * failed (DNF), or been canceled, and it exposes scheduling hooks used by the outer fetcher.
- */
- class USKAttempt implements USKCheckerCallback {
- /** Edition number */
- long number;
-
- /** Attempt to fetch that edition number (or null if the fetch has finished) */
- USKChecker checker;
-
- /** Successful fetch? */
- boolean succeeded;
-
- /** DNF? */
- boolean dnf;
-
- /** Whether this attempt has been explicitly canceled. */
- boolean cancelled;
-
- /** Lookup descriptor associated with this attempt. */
- final Lookup lookup;
-
- /** Whether this attempt is a long-lived polling attempt. */
- final boolean forever;
-
- /** Whether this attempt has ever entered finite cooldown. */
- private boolean everInCooldown;
-
- /**
- * Creates a new attempt for the provided lookup descriptor.
- *
- * @param l lookup descriptor containing edition and key information; must not be null
- * @param forever {@code true} to create a polling attempt; {@code false} for a one-off probe
- */
- private USKAttempt(Lookup l, boolean forever) {
- this.lookup = l;
- this.number = l.val;
- this.succeeded = false;
- this.dnf = false;
- this.forever = forever;
- this.checker =
- new USKChecker(
- this,
- l.key,
- forever ? -1 : ctx.maxUSKRetries,
- l.ignoreStore ? ctxNoStore : ctx,
- parent,
- realTimeFlag);
- }
-
- @Override
- public void onDNF(ClientContext context) {
- synchronized (this) {
- checker = null;
- dnf = true;
- }
- USKFetcher.this.onDNF(this, context);
- }
-
- @Override
- public void onSuccess(ClientSSKBlock block, ClientContext context) {
- synchronized (this) {
- checker = null;
- succeeded = true;
- }
- USKFetcher.this.onSuccess(this, false, block, context);
- }
-
- @Override
- public void onFatalAuthorError(ClientContext context) {
- synchronized (this) {
- checker = null;
- }
- // Counts as success except it doesn't update
- USKFetcher.this.onSuccess(this, true, null, context);
- }
-
- @Override
- public void onNetworkError(ClientContext context) {
- synchronized (this) {
- checker = null;
- }
- // Treat network error as DNF for scheduling purposes
- USKFetcher.this.onDNF(this, context);
- }
-
- @Override
- public void onCancelled(ClientContext context) {
- synchronized (this) {
- checker = null;
- }
- USKFetcher.this.onCancelled(this, context);
- }
-
- /**
- * Cancels this attempt and propagates cancellation to the checker if present.
- *
- * @param context client context used to cancel scheduling; must not be null
- */
- public void cancel(ClientContext context) {
- cancelled = true;
- USKChecker c;
- synchronized (this) {
- c = checker;
- }
- if (c != null) c.cancel(context);
- onCancelled(context);
- }
-
- /**
- * Schedules this attempt with its checker if still active.
- *
- * @param context client context used to schedule the checker; must not be null
- */
- public void schedule(ClientContext context) {
- USKChecker c;
- synchronized (this) {
- c = checker;
- }
- if (c == null) {
- if (LOG.isDebugEnabled()) LOG.debug("Checker == null in schedule() for {}", this);
- } else {
- assert (!c.persistent());
- c.schedule(context);
- }
- }
-
- @Override
- public String toString() {
- return "USKAttempt for "
- + number
- + FOR_LITERAL
- + origUSK.getURI()
- + FOR_LITERAL
- + USKFetcher.this
- + (forever ? " (forever)" : "");
- }
-
- @Override
- public short getPriority() {
- if (backgroundPoll) {
- synchronized (this) {
- if (forever) {
- if (!everInCooldown) {
- // Boost the priority initially, so that finding the first edition takes precedence
- // over ongoing polling after we're fairly sure we're not going to find anything.
- // The ongoing polling keeps the ULPRs up to date so that we will get told quickly,
- // but if we are overloaded we won't be able to keep up regardless.
- return progressPollPriority;
- } else {
- return normalPollPriority;
- }
- } else {
- // If !forever, this is a random-probe.
- // It's not that important.
- return normalPollPriority;
- }
- }
- }
- return parent.getPriorityClass();
- }
-
- @Override
- public void onEnterFiniteCooldown(ClientContext context) {
- synchronized (this) {
- everInCooldown = true;
- }
- USKFetcher.this.onCheckEnteredFiniteCooldown(context);
- }
-
- /**
- * Reports whether this attempt has ever entered a finite cooldown.
- *
- * @return {@code true} if the attempt has cooled down at least once
- */
- public synchronized boolean everInCooldown() {
- return everInCooldown;
- }
-
- /** Refreshes cached poll parameters on the underlying checker, if active. */
- public void reloadPollParameters() {
- USKChecker c;
- synchronized (this) {
- c = checker;
- }
- if (c == null) return;
- c.onChangedFetchContext();
- }
- }
-
/** Helper for Date-Based Request (DBR) hint scheduling and parsing. */
private final USKDateHintFetches dbrHintFetches;
- /** Active random-probe attempts keyed by edition number. */
- private final TreeMap The constructor wires the primary and DBR-specific {@link FetchContext} instances, captures
* the parent requester, and seeds the initial watch list using the last known slot from {@link
* USKManager}. It does not start network work; callers must invoke {@link
- * #schedule(ClientContext)} or {@link #schedule(long, ClientContext)} to begin a cycle.
- *
- * @param origUSK base USK to probe for editions; must not be null
- * @param manager manager used to look up and update known slots; must not be null
- * @param ctx base fetch context used for normal and no-store checks; must not be null
- * @param requester parent requester that supplies priority and persistence flags; must not be
- * null
- * @param minFailures minimum number of DNFs tolerated before concluding a round; non-negative
- * values are expected
+ * #schedule(ClientContext)} or {@link #schedule(long, ClientContext)} to begin a cycle. The
+ * resulting instance is mutable and designed to be used by scheduling threads; it is not
+ * persistent across restarts.
+ *
+ * Configuration flags in {@code options} can enable background polling, retain the most recent
+ * payload, or restrict work to datastore checks. Invalid combinations are not explicitly
+ * rejected, so callers should supply only supported flags.
+ *
+ * @param origUSK base USK to probe for editions; must be non-null and valid
+ * @param manager manager used to look up and update known slots; must be non-null and shared
+ * @param ctx base fetch context used for normal and no-store checks; must be non-null
+ * @param requester parent requester that supplies priority and persistence flags; must be
+ * non-null
+ * @param minFailures minimum DNFs tolerated before concluding a round; non-negative values only
* @param options bitmask of {@code OPT_*} flags controlling polling and storage behavior
* @throws IllegalArgumentException if {@code minFailures} exceeds the internal watch limit
*/
@@ -438,16 +210,15 @@ public void reloadPollParameters() {
this.origUSK = origUSK;
this.uskManager = manager;
this.origMinFailures = minFailures;
- if (origMinFailures > WATCH_KEYS) throw new IllegalArgumentException();
- firstLoop = true;
+ if (origMinFailures > USKKeyWatchSet.WATCH_KEYS) throw new IllegalArgumentException();
callbacks = new ArrayList<>();
- subscribers = new HashSet<>();
lastFetchedEdition = -1;
this.realTimeFlag = parent.realTimeFlag();
this.backgroundPoll = (options & OPT_POLL_FOREVER) != 0;
this.keepLastData = (options & OPT_KEEP_LAST_DATA) != 0;
this.checkStoreOnly = (options & OPT_CHECK_STORE_ONLY) != 0;
ctxDBR = new FetchContext(ctx, FetchContext.IDENTICAL_MASK, true, null);
+
if (ctx.getFollowRedirects()) {
this.ctx = new FetchContext(ctx, FetchContext.IDENTICAL_MASK, true, null);
this.ctx.setFollowRedirects(false);
@@ -468,11 +239,54 @@ public void reloadPollParameters() {
}
if (checkStoreOnly && LOG.isDebugEnabled()) LOG.debug("Just checking store on {}", this);
// origUSK is a hint. We *do* want to check the edition given.
- // Whereas latestSlot we've definitely fetched, we don't want to re-check.
+ // Whereas the latestSlot we've definitely fetched, we don't want to re-check.
watchingKeys =
- new USKWatchingKeys(origUSK, Math.max(0, uskManager.lookupLatestSlot(origUSK) + 1));
- attemptsToStart = new ArrayList<>();
+ new USKKeyWatchSet(
+ origUSK,
+ Math.max(0, uskManager.lookupLatestSlot(origUSK) + 1),
+ minFailures,
+ backgroundPoll);
dbrHintFetches = new USKDateHintFetches(this, uskManager, origUSK, this.ctx, ctxDBR, parent);
+ attempts =
+ new USKAttemptManager(
+ new USKAttemptContext(this, origUSK, this.ctx, ctxNoStore, parent, realTimeFlag),
+ uskManager,
+ watchingKeys,
+ checkStoreOnly,
+ keepLastData);
+ subscriberRegistry = new USKSubscriberRegistry(watchingKeys, uskManager, attempts, origUSK);
+ completionCoordinator =
+ new USKCompletionCoordinator(
+ new USKCompletionHandler(keepLastData), uskManager, origUSK, parent, realTimeFlag);
+ successPlanner = new USKSuccessPlanner();
+ storeChecks =
+ new USKStoreCheckCoordinator(
+ USKStoreCheckCoordinator.Params.builder()
+ .watchingKeys(watchingKeys)
+ .attempts(attempts)
+ .parent(parent)
+ .checkStoreOnly(checkStoreOnly)
+ .uskManager(uskManager)
+ .origUSK(origUSK)
+ .callbacks(new StoreCheckCallbacks())
+ .realTimeFlag(realTimeFlag)
+ .build());
+ schedulingCoordinator =
+ new USKSchedulingCoordinator(attempts, storeChecks, dbrHintFetches, checkStoreOnly);
+ pollingRound =
+ new USKPollingRound(
+ new USKPollingRoundContext(
+ attempts,
+ storeChecks,
+ dbrHintFetches,
+ subscriberRegistry,
+ uskManager,
+ origUSK,
+ realTimeFlag),
+ ORIG_SLEEP_TIME,
+ true,
+ ORIG_SLEEP_TIME,
+ MAX_SLEEP_TIME);
}
/**
@@ -480,143 +294,49 @@ public void reloadPollParameters() {
*
* If the main scheduling path was waiting for DBR results, this method triggers the next
* scheduling step. It also checks whether the current polling round can be considered finished
- * for now and notifies progress callbacks.
+ * for now and notifies progress callbacks. The method is safe to call from scheduler threads and
+ * performs no blocking work beyond scheduling follow-up tasks.
+ *
+ * Calling this method multiple times is safe; repeated invocations simply re-evaluate the
+ * scheduling state and may become no-ops if the poll round has already advanced. No exceptions
+ * are thrown, and the only side effects are scheduling decisions and progress checks.
*
- * @param context the client context used for scheduling follow-up work; must not be {@code null}
+ * @param context client context used to schedule follow-up work; must be non-null
*/
public void onDBRsFinished(ClientContext context) {
- boolean needSchedule = false;
+ boolean needSchedule;
synchronized (this) {
- if (scheduleAfterDBRsDone) needSchedule = true; // Note: additional conditions may apply.
+ needSchedule = schedulingCoordinator.scheduleAfterDBRsDone();
}
if (needSchedule) schedule(context);
- checkFinishedForNow(context);
+ pollingRound.checkFinishedForNow(context, cancelled, completed);
}
/**
* Notifies that a USK slot check entered a finite cooldown.
*
- * This is used as a progress signal during a polling round to determine whether the round can
- * be considered finished for now when all active checks have cooled down at least once.
+ * This acts as a progress signal during a polling round. When all active checks have cooled
+ * down at least once, the round can be treated as finished for now and progress callbacks may be
+ * invoked. The method is a lightweight hook and does not trigger network I/O itself.
*
- * @param context client context used to perform completion checks; must not be {@code null}
+ * @param context client context used to perform completion checks; must be non-null
*/
- public void onCheckEnteredFiniteCooldown(ClientContext context) {
+ @Override
+ public void onEnterFiniteCooldown(ClientContext context) {
checkFinishedForNow(context);
}
/**
* Evaluates whether the current polling round can be treated as finished.
*
- * The method consults {@link #resolvePollingAttemptsIfAllChecksDone()} and verifies that all
- * polling attempts have entered a finite cooldown at least once. When those conditions hold, it
- * emits the round-finished callback to interested subscribers.
+ * The method consults {@link USKPollingRound} and verifies that all polling attempts have
+ * entered a finite cooldown at least once. When those conditions hold, it emits the
+ * round-finished callback to interested subscribers.
*
* @param context client context used to notify progress callbacks; must not be null
*/
private void checkFinishedForNow(ClientContext context) {
- PollingResolution res = resolvePollingAttemptsIfAllChecksDone();
- if (!res.ready) return;
- for (USKAttempt a : res.attempts) {
- // All the polling attempts currently running must have entered cooldown once.
- // I.e. they must have done all their fetches at least once.
- // If we check whether they are *currently* in cooldown, then under heavy USK load (the common
- // case!), we can see them overlapping and never notify finished.
- if (!a.everInCooldown()) {
- if (LOG.isDebugEnabled())
- LOG.debug(
- "Not finished because polling attempt {} never entered cooldown on {}", a, this);
- return;
- }
- }
- notifyFinishedForNow(context);
- }
-
- /**
- * Captures whether a polling round can be considered complete and which attempts remain.
- *
- * The resolution is used to decide when to notify progress callbacks and to gate scheduling
- * decisions that depend on the completion of store checks, random probes, and DBR hints.
- */
- private static final class PollingResolution {
- /** Whether the polling round is ready to be considered finished for now. */
- final boolean ready;
-
- /** Snapshot of active polling attempts at resolution time. */
- final USKAttempt[] attempts;
-
- /**
- * Creates a resolution result for the current polling round.
- *
- * @param ready whether all checks are complete for the current round
- * @param attempts snapshot of polling attempts to examine for cooldown state
- */
- PollingResolution(boolean ready, USKAttempt[] attempts) {
- this.ready = ready;
- this.attempts = attempts;
- }
- }
-
- /**
- * Determines whether all checks for the polling round have completed.
- *
- * The method verifies that there are no running store checks, random probes, or outstanding
- * DBR hints. It also ensures that polling attempts exist before reporting completion. When any of
- * these conditions is not met, it returns a resolution marked not ready.
- *
- * @return a resolution object indicating readiness and the current polling attempts
- */
- private PollingResolution resolvePollingAttemptsIfAllChecksDone() {
- synchronized (this) {
- if (cancelled || completed) return new PollingResolution(false, new USKAttempt[0]);
- if (runningStoreChecker != null) {
- if (LOG.isDebugEnabled())
- LOG.debug("Not finished because still running store checker on {}", this);
- return new PollingResolution(false, new USKAttempt[0]); // Still checking the store
- }
- if (!runningAttempts.isEmpty()) {
- if (LOG.isDebugEnabled())
- LOG.debug("Not finished because running attempts (random probes) on {}", this);
- return new PollingResolution(false, new USKAttempt[0]); // Still running
- }
- if (pollingAttempts.isEmpty()) {
- if (LOG.isDebugEnabled())
- LOG.debug("Not finished because no polling attempts (not started???) on {}", this);
- return new PollingResolution(false, new USKAttempt[0]); // Not started yet
- }
- if (dbrHintFetches.hasOutstanding()) {
- if (LOG.isDebugEnabled())
- LOG.debug("Not finished because still waiting for DBR attempts on {}", this);
- return new PollingResolution(false, new USKAttempt[0]); // DBRs
- }
- return new PollingResolution(true, pollingAttempts.values().toArray(new USKAttempt[0]));
- }
- }
-
- /**
- * Notifies {@link USKProgressCallback} subscribers that a polling round has completed.
- *
- * The notification is best-effort: if the fetcher has been canceled or completed, the method
- * returns without invoking callbacks. The notification does not imply that the USK has advanced,
- * only that a round of polling work has reached a stable point.
- *
- * @param context client context forwarded to progress callbacks; must not be null
- */
- private void notifyFinishedForNow(ClientContext context) {
- if (LOG.isDebugEnabled())
- LOG.debug(
- "Notifying finished for now on {} for {}{}",
- this,
- origUSK,
- this.realTimeFlag ? " (realtime)" : " (bulk)");
- USKCallback[] toCheck;
- synchronized (this) {
- if (cancelled || completed) return;
- toCheck = subscribers.toArray(new USKCallback[0]);
- }
- for (USKCallback cb : toCheck) {
- if (cb instanceof USKProgressCallback callback) callback.onRoundFinished(context);
- }
+ pollingRound.checkFinishedForNow(context, cancelled, completed);
}
// moved into USKStoreCheckerGetter to satisfy S3398
@@ -625,55 +345,46 @@ private void notifyFinishedForNow(ClientContext context) {
* Handles a "data not found" result from an attempt and advances completion logic.
*
* The method updates tracking structures, records the last fetched edition, and determines
- * whether a polling round should be concluded. It treats the DNF as a non-fatal result that
- * influences scheduling decisions rather than an immediate failure.
+ * whether a polling round should be concluded. A DNF is treated as non-fatal and is used only to
+ * drive scheduling decisions; it does not terminate the fetcher unless other completion criteria
+ * are met. This method is safe to call from worker threads used by individual attempts.
+ *
+ * DNFs may occur during datastore checks or network probes; the handler treats both sources
+ * the same and only examines attempt state, never the payload. The method does not throw and
+ * performs no blocking I/O, so callers can invoke it directly from scheduling callbacks. If the
+ * last running attempt reports DNF, the method may trigger completion for the current polling
+ * round.
*
- * @param att attempt that reported DNF; must not be null
- * @param context client context used for follow-up scheduling; must not be null
+ * @param att attempt that reported DNF; must be non-null and associated with this fetcher
+ * @param context client context used for follow-up scheduling; must be non-null
*/
- void onDNF(USKAttempt att, ClientContext context) {
+ @Override
+ public void onDNF(USKAttempt att, ClientContext context) {
if (LOG.isDebugEnabled()) LOG.debug("DNF: {}", att);
boolean finished = false;
long curLatest = uskManager.lookupLatestSlot(origUSK);
synchronized (this) {
if (completed || cancelled) return;
lastFetchedEdition = Math.max(lastFetchedEdition, att.number);
- runningAttempts.remove(att.number);
- if (runningAttempts.isEmpty()) {
+ attempts.removeRunningAttempt(att.number);
+ if (!attempts.hasRunningAttempts()) {
if (LOG.isDebugEnabled())
LOG.debug(
"latest: {}, last fetched: {}, curLatest+MIN_FAILURES: {}",
curLatest,
lastFetchedEdition,
curLatest + origMinFailures);
- if (started) {
+ if (schedulingCoordinator.isStarted()) {
finished = true;
}
- } else if (LOG.isDebugEnabled()) LOG.debug("Remaining: {}", runningAttempts());
+ } else if (LOG.isDebugEnabled())
+ LOG.debug("Remaining: {}", attempts.runningAttemptsDescription());
}
if (finished) {
finishSuccess(context);
}
}
- /**
- * Builds a diagnostic string describing current running attempts.
- *
- * @return a comma-separated description of running attempts and their state flags
- */
- private synchronized String runningAttempts() {
- StringBuilder sb = new StringBuilder();
- boolean first = true;
- for (USKAttempt a : runningAttempts.values()) {
- if (!first) sb.append(", ");
- first = false;
- sb.append(a.number);
- if (a.cancelled) sb.append("(cancelled)");
- if (a.succeeded) sb.append("(succeeded)");
- }
- return sb.toString();
- }
-
/**
* Completes the current round, either by rescheduling or by notifying callbacks.
*
@@ -702,34 +413,11 @@ private void finishSuccess(ClientContext context) {
* @param context client context used to access randomness and scheduling; must not be null
*/
private void rescheduleBackgroundPoll(ClientContext context) {
- long valAtEnd = uskManager.lookupLatestSlot(origUSK);
- long end;
- long now = System.currentTimeMillis();
- synchronized (this) {
- started = false; // don't finish before have rescheduled
-
- // Find out when we should check next ('end'), in an increasing delay (unless we make
- // progress).
- long newSleepTime = sleepTime * 2;
- if (newSleepTime > MAX_SLEEP_TIME) newSleepTime = MAX_SLEEP_TIME;
- sleepTime = newSleepTime;
- end = now + context.random.nextInt((int) sleepTime);
-
- if (valAtEnd > valueAtSchedule && valAtEnd > origUSK.suggestedEdition) {
- // We have advanced; keep trying as if we just started.
- // Only if we actually DO advance, not if we just confirm our suspicion (valueAtSchedule
- // always starts at 0).
- sleepTime = ORIG_SLEEP_TIME;
- firstLoop = false;
- end = now;
- if (LOG.isDebugEnabled())
- LOG.debug("We have advanced: at start, {} at end, {}", valueAtSchedule, valAtEnd);
- }
- if (LOG.isDebugEnabled())
- LOG.debug("Sleep time is {} this sleep is {} for {}", sleepTime, end - now, this);
- }
- schedule(end - now, context);
- checkFinishedForNow(context);
+ schedulingCoordinator.resetStarted();
+ long delay =
+ pollingRound.rescheduleBackgroundPoll(context, schedulingCoordinator.valueAtSchedule());
+ schedule(delay, context);
+ pollingRound.checkFinishedForNow(context, cancelled, completed);
}
/**
@@ -748,56 +436,29 @@ private void completeCallbacks(ClientContext context) {
completed = true;
cb = callbacks.toArray(new USKFetcherCallback[0]);
}
- uskManager.unsubscribe(origUSK, this);
- uskManager.onFinished(this);
- context.getSskFetchScheduler(realTimeFlag).schedTransient.removePendingKeys((KeyListener) this);
- long ed = uskManager.lookupLatestSlot(origUSK);
- byte[] data;
- synchronized (this) {
- if (lastRequestData == null) data = null;
- else {
- try {
- data = BucketTools.toByteArray(lastRequestData);
- } catch (IOException e) {
- LOG.error("Unable to turn lastRequestData into byte[]: caught I/O exception: {}", e, e);
- data = null;
- }
- lastRequestData.free();
- }
- }
- for (USKFetcherCallback c : cb) {
- try {
- if (ed == -1) c.onFailure(context);
- else
- c.onFoundEdition(
- new USKFoundEdition(
- ed,
- origUSK.copy(ed),
- context,
- lastWasMetadata,
- lastCompressionCodec,
- data,
- false,
- false));
- } catch (Exception e) {
- LOG.error(
- "An exception occured while dealing with a callback:{}\n{}", c, e.getMessage(), e);
- }
- }
+ completionCoordinator.completeCallbacks(context, this, cb);
}
/**
* Handles a successful attempt using the attempt's edition as the current latest.
*
* This is a convenience overload that forwards to the edition-aware handler and preserves the
- * update flag.
+ * update flag. The method expects that the provided attempt originated from this fetcher; it does
+ * not perform deep validation beyond scheduling and tracking updates.
+ *
+ * The outcome mirrors the full handler: scheduling decisions, decode choices, and manager
+ * updates are derived from the attempt's edition and the current slot state. The call is safe
+ * from worker threads and does not block beyond enqueuing follow-up work. Passing {@code null}
+ * for the attempt is permitted for synthetic success notifications that still carry a block
+ * payload.
*
* @param att attempt that completed successfully; may be null for synthetic successes
* @param dontUpdate whether to suppress updating the USK manager with this edition
* @param block block returned by the attempt, or {@code null} for metadata-only successes
- * @param context client context used for scheduling and storage; must not be null
+ * @param context client context used for scheduling and storage; must be non-null
*/
- void onSuccess(
+ @Override
+ public void onSuccess(
USKAttempt att, boolean dontUpdate, ClientSSKBlock block, final ClientContext context) {
onSuccess(att, att.number, dontUpdate, block, context);
}
@@ -807,13 +468,20 @@ void onSuccess(
*
* The method prepares a success plan, cancels obsolete attempts, optionally decodes payload
* data, and updates the USK manager unless suppressed. It may also register new attempts to
- * continue probing near the current latest edition.
+ * continue probing near the current latest edition. When {@code dontUpdate} is {@code true}, the
+ * manager is left untouched but local bookkeeping and decode decisions still apply.
+ *
+ * The method is idempotent with respect to repeated success notifications for the same
+ * edition; it only advances the latest slot when the reported edition exceeds the current known
+ * value. Callers should pass the same {@link ClientContext} used by related scheduling operations
+ * so that follow-up tasks are enqueued on consistent queues. If the fetcher is already completed
+ * or canceled, the success is ignored and no additional scheduling occurs.
*
* @param att attempt that completed successfully; may be null for synthetic successes
- * @param curLatest edition number discovered by the attempt
+ * @param curLatest edition number discovered by the attempt; non-negative values are expected
* @param dontUpdate whether to suppress updating the USK manager with this edition
* @param block fetched block containing metadata or data; may be null for author errors
- * @param context client context used for scheduling and storage; must not be null
+ * @param context client context used for scheduling and storage; must be non-null
*/
void onSuccess(
USKAttempt att,
@@ -825,14 +493,13 @@ void onSuccess(
if (LOG.isDebugEnabled())
LOG.debug("Found edition {} for {} official is {} on {}", curLatest, origUSK, lastEd, this);
- SuccessPlan plan = prepareSuccessPlan(att, curLatest, dontUpdate, block, context, lastEd);
+ USKSuccessPlanner.SuccessPlan plan =
+ prepareSuccessPlan(att, curLatest, dontUpdate, block, context, lastEd);
if (plan == null) return; // finished or canceled
- finishCancelBefore(plan.killAttempts, context);
-
- Bucket data = decodeBlockIfNeeded(plan.decode, block, context);
+ attempts.finishCancelBefore(plan.killAttempts, context);
- applyDecodedData(plan.decode, block, data);
+ applyDecodedData(plan.decode, block, context);
if (!dontUpdate) uskManager.updateSlot(origUSK, plan.curLatest, context);
if (plan.registerNow) registerAttempts(context);
@@ -844,72 +511,9 @@ void onSuccess(
* @param decode whether decoding should be attempted for this block
* @param block block to decode; may be null when decoding is not applicable
* @param context client context used for bucket allocation; must not be null
- * @return a decoded bucket, or {@code null} when decoding was skipped or failed
- */
- private Bucket decodeBlockIfNeeded(boolean decode, ClientSSKBlock block, ClientContext context) {
- if (!decode || block == null) return null;
- return ClientSSKBlockDecoder.decode(block, context, parent.persistent());
- }
-
- /**
- * Utility for decoding {@link ClientSSKBlock} instances into buckets.
- *
- * Decoding errors are treated as non-fatal and reported via logging; the caller receives
- * {@code null} when decoding fails or cannot be completed.
- */
- private static final class ClientSSKBlockDecoder {
- /** Utility class; not instantiable. */
- private ClientSSKBlockDecoder() {}
-
- /**
- * Decodes the provided block using the context's bucket factory.
- *
- * @param block block to decode; must not be null
- * @param context client context used to obtain bucket factories; must not be null
- * @param persistent whether the resulting bucket should be persistent
- * @return the decoded bucket, or {@code null} when decoding fails
- */
- private static Bucket decode(ClientSSKBlock block, ClientContext context, boolean persistent) {
- try {
- return block.decode(context.getBucketFactory(persistent), 1025 /* it's an SSK */, true);
- } catch (KeyDecodeException _) {
- return null;
- } catch (IOException e) {
- LOG.error("An IOE occured while decoding: {}", e.getMessage(), e);
- return null;
- }
- }
- }
-
- /**
- * Applies decoded payload data to the fetcher's retained state.
- *
- * The method updates compression metadata and either retains or frees the decoded bucket based
- * on {@link #keepLastData}. When decoding was not requested, the method returns without modifying
- * state.
- *
- * @param decode whether decoding was requested for this block
- * @param block block providing metadata such as compression codec; may be null
- * @param data decoded bucket to retain or free; may be null
*/
- private void applyDecodedData(boolean decode, ClientSSKBlock block, Bucket data) {
- synchronized (this) {
- if (!decode) return;
- if (block != null) {
- lastCompressionCodec = block.getCompressionCodec();
- lastWasMetadata = block.isMetadata();
- if (keepLastData) {
- if (lastRequestData != null) lastRequestData.free();
- lastRequestData = data;
- } else if (data != null) {
- data.free();
- }
- } else {
- lastCompressionCodec = -1;
- lastWasMetadata = false;
- lastRequestData = null;
- }
- }
+ private void applyDecodedData(boolean decode, ClientSSKBlock block, ClientContext context) {
+ completionCoordinator.applyDecodedData(decode, block, context);
}
/**
@@ -927,7 +531,7 @@ private void applyDecodedData(boolean decode, ClientSSKBlock block, Bucket data)
* @param lastEd last known edition from the manager at time of success
* @return a success plan, or {@code null} if the fetcher is completed or canceled
*/
- private SuccessPlan prepareSuccessPlan(
+ private USKSuccessPlanner.SuccessPlan prepareSuccessPlan(
USKAttempt att,
long curLatest,
boolean dontUpdate,
@@ -938,116 +542,57 @@ private SuccessPlan prepareSuccessPlan(
List The method examines watched keys and subscriber hints to determine which editions should be
- * fetched or polled next, and it schedules those attempts immediately.
- *
- * @param curLatest current latest edition used to seed new attempts
- * @param context client context used to schedule new attempts; must not be null
- */
- private void addNewAttempts(long curLatest, ClientContext context) {
- USKWatchingKeys.ToFetch list =
- watchingKeys.getEditionsToFetch(
- curLatest,
- context.random,
- getRunningFetchEditions(),
- shouldAddRandomEditions(context.random));
- Lookup[] toPoll = list.poll;
- Lookup[] toFetch = list.fetch;
- for (Lookup i : toPoll) {
- if (LOG.isTraceEnabled()) LOG.trace("Polling {} for {}", i, this);
- attemptsToStart.add(add(i, true));
- }
- for (Lookup i : toFetch) {
- if (LOG.isDebugEnabled()) LOG.debug("Adding checker for edition {} for {}", i, origUSK);
- attemptsToStart.add(add(i, false));
- }
- }
-
- /**
- * Describes how to process a successful attempt.
- *
- * The plan tells the caller whether to decode data, which attempts to cancel, and whether new
- * attempts should be registered immediately.
- */
- private static final class SuccessPlan {
- /** Whether the payload should be decoded and retained. */
- boolean decode;
-
- /** Latest edition value to use for updates and scheduling. */
- long curLatest;
-
- /** Whether new attempts should be registered after processing. */
- boolean registerNow;
-
- /** Attempts that should be canceled because they are now obsolete. */
- List The decision is delegated to the DBR hint subsystem so that hint fetch outcomes influence
+ * how aggressively random probing is used. This avoids excessive random probes when hint-driven
+ * discovery already provides sufficient coverage.
+ *
+ * @param random random source used for probabilistic scheduling; must be non-null
+ * @param isFirstLoop whether this scheduling pass is the first loop after construction
* @return {@code true} when random probes should be added for this round
*/
- private boolean shouldAddRandomEditions(Random random) {
- return dbrHintFetches.shouldAddRandomEditions(random, firstLoop);
+ @Override
+ public boolean shouldAddRandomEditions(Random random, boolean isFirstLoop) {
+ return dbrHintFetches.shouldAddRandomEditions(random, isFirstLoop);
}
/**
* Handles cancellation of an attempt and completes cancellation if needed.
*
- * @param att attempt that was canceled; must not be null
- * @param context client context used for callback notifications; must not be null
+ * The method removes the attempt from active tracking. If this was the last running attempt
+ * and the fetcher has already been marked as canceled, completion callbacks are fired. The call
+ * is safe from worker threads and performs no blocking I/O.
+ *
+ * @param att attempt that was canceled; must be non-null and associated with this fetcher
+ * @param context client context used for callback notifications; must be non-null
*/
- void onCancelled(USKAttempt att, ClientContext context) {
+ @Override
+ public void onCancelled(USKAttempt att, ClientContext context) {
synchronized (this) {
- runningAttempts.remove(att.number);
- if (!runningAttempts.isEmpty()) return;
+ attempts.removeRunningAttempt(att.number);
+ if (attempts.hasRunningAttempts()) return;
if (cancelled) finishCancelled(context);
}
@@ -1064,109 +609,7 @@ private void finishCancelled(ClientContext context) {
completed = true;
cb = callbacks.toArray(new USKFetcherCallback[0]);
}
- for (USKFetcherCallback c : cb) c.onCancelled(context);
- }
-
- /**
- * Removes attempts targeting editions below the provided threshold.
- *
- * The returned list contains the canceled attempts so the caller may propagate cancellation.
- * The method operates on polling attempts only and respects the ordering of the internal map.
- *
- * @param curLatest edition threshold; attempts below this edition are removed
- * @return list of removed attempts, or {@code null} when no removals were necessary
- */
- private List The attempt is inserted into either the polling or running map depending on {@code forever}.
- * The caller is responsible for calling {@link USKAttempt#schedule(ClientContext)} to actually
- * enqueue the attempt.
- *
- * @param l lookup descriptor containing edition and key information; must not be null
- * @param forever {@code true} to register as a polling attempt; {@code false} for a one-off probe
- * @return the created attempt, or {@code null} when duplicates or invalid state prevent creation
- * @throws IllegalArgumentException if the lookup edition is negative
- */
- private synchronized USKAttempt add(Lookup l, boolean forever) {
- long i = l.val;
- if (l.val < 0)
- throw new IllegalArgumentException(
- "Can't check <0" + FOR_LITERAL + l.val + " on " + this + FOR_LITERAL + origUSK);
- if (cancelled) return null;
- if (checkStoreOnly) return null;
- if (LOG.isDebugEnabled()) LOG.debug("Adding USKAttempt for {} for {}", i, origUSK.getURI());
- if (isDuplicateAttempt(forever, i)) return null;
- USKAttempt a = new USKAttempt(l, forever);
- if (forever) pollingAttempts.put(i, a);
- else {
- runningAttempts.put(i, a);
- }
- if (LOG.isDebugEnabled()) LOG.debug("Added {} for {}", a, origUSK);
- return a;
- }
-
- /**
- * Checks whether an attempt for the given edition is already registered.
- *
- * @param forever {@code true} to check polling attempts; {@code false} to check running probes
- * @param edition edition number to test for duplication
- * @return {@code true} when an attempt already exists for the edition
- */
- private boolean isDuplicateAttempt(boolean forever, long edition) {
- if (forever) {
- if (pollingAttempts.containsKey(edition)) {
- if (LOG.isDebugEnabled()) LOG.debug("Already polling edition: {} for {}", edition, this);
- return true;
- }
- } else {
- if (runningAttempts.containsKey(edition)) {
- if (LOG.isDebugEnabled())
- LOG.debug("Returning because already running for {}", origUSK.getURI());
- return true;
- }
- }
- return false;
+ completionCoordinator.finishCancelled(context, cb);
}
/**
@@ -1174,11 +617,10 @@ private boolean isDuplicateAttempt(boolean forever, long edition) {
*
* The returned URI reflects the base USK namespace and does not change as editions advance.
* Callers can use it for logging, diagnostics, or to derive edition-specific URIs via {@link
- * USK#copy(long)}. The method performs no I/O and does not allocate new objects beyond the
- * returned reference.
+ * USK#copy(long)}. The method performs no I/O and does not allocate new objects beyond returning
+ * the existing reference.
*
- * @return an immutable URI identifying the USK being fetched; callers must not modify the
- * returned object
+ * @return immutable URI identifying the tracked USK; callers must not mutate it
*/
public FreenetURI getURI() {
return origUSK.getURI();
@@ -1190,7 +632,7 @@ public FreenetURI getURI() {
* Returns {@code true} once the fetcher has been canceled or completed. After that point it no
* longer schedules work, though background pollers may be re-armed by {@link
* #schedule(ClientContext)} if applicable. This method is safe to call from any thread and
- * provides a snapshot of state that may change immediately after return.
+ * provides a snapshot of the state that may change immediately after return.
*
* @return {@code true} if canceled or completed; otherwise {@code false}
*/
@@ -1222,9 +664,15 @@ public USK getOriginalUSK() {
* from that context. The method is idempotent and safe to call repeatedly; if the fetcher has
* already completed or been canceled, the scheduled task will effectively be a no-op.
*
+ * Delays are expressed in milliseconds and are interpreted relative to the caller's clock.
+ * This method does not validate whether the fetcher is currently registered; it simply forwards
+ * to the scheduler. Delayed scheduling preserves the same priority configuration that would be
+ * applied to an immediate call. The caller should avoid scheduling multiple delayed calls for the
+ * same instance unless intentional, as each call queues an independent timed job.
+ *
* @param delay delay in milliseconds before scheduling; non-positive schedules immediately
- * @param context client context used to reach the scheduler and timing facilities; must not be
- * {@code null}
+ * @param context client context used to reach the scheduler and timing facilities; must be
+ * non-null
*/
public void schedule(long delay, final ClientContext context) {
if (delay <= 0) {
@@ -1244,8 +692,18 @@ public void schedule(long delay, final ClientContext context) {
* ensure registration is in place. This method performs no blocking I/O directly; network work is
* delegated to the schedulers.
*
+ * Callers should supply the same {@link ClientContext} used by related requests so scheduling
+ * occurs on the expected queues. The method is idempotent with respect to registration state, but
+ * it does not coalesce concurrent calls. If the request is configured for store-only checks, this
+ * method may resolve the round immediately after store checks are complete.
+ *
+ * The plan determines whether attempts should be registered immediately, whether the fetcher
* should exit early, and whether store-only checking can be considered complete.
*
- * @param lookedUp latest slot looked up in the manager
+ * @param lookedUp the latest slot looked up in the manager
* @param startedDBRs whether DBR hint fetches were started for this round
* @param context client context used for scheduling decisions; must not be null
* @return a schedule plan describing next steps for the caller
*/
- private SchedulePlan buildSchedulePlan(
+ private USKSchedulingCoordinator.SchedulePlan buildSchedulePlan(
long lookedUp, boolean startedDBRs, ClientContext context) {
- boolean registerNow = false;
- boolean bye;
- boolean completeCheckingStore = false;
synchronized (this) {
- valueAtSchedule = Math.max(lookedUp + 1, valueAtSchedule);
- bye = cancelled || completed;
- if (!bye) {
- // subscribe() above may have called onFoundEdition and thus added a load of stuff. If so,
- // we don't need to do so here.
- if ((!checkStoreOnly)
- && attemptsToStart.isEmpty()
- && runningAttempts.isEmpty()
- && pollingAttempts.isEmpty()) {
- addNewAttempts(lookedUp, context);
- }
-
- started = true;
- if (lookedUp <= 0 && startedDBRs) {
- // If we don't know anything, do the DBRs first.
- scheduleAfterDBRsDone = true;
- } else if ((!scheduleAfterDBRsDone) || !dbrHintFetches.hasOutstanding()) {
- registerNow = !fillKeysWatching(lookedUp, context);
- }
- completeCheckingStore =
- checkStoreOnly && scheduleAfterDBRsDone && runningStoreChecker == null;
- }
+ if (cancelled || completed) return null;
}
- SchedulePlan plan = new SchedulePlan();
- plan.registerNow = registerNow;
- plan.bye = bye;
- plan.completeCheckingStore = completeCheckingStore;
- return plan;
- }
-
- /**
- * Captures the actions required to continue or conclude a scheduling pass.
- *
- * This plan is computed under synchronization and then applied without holding locks to avoid
- * long lock hold times.
- */
- private static final class SchedulePlan {
- /** Whether attempts should be registered immediately after planning. */
- boolean registerNow;
-
- /** Whether the scheduler should exit early due to cancellation or completion. */
- boolean bye;
-
- /** Whether store-only checking can be marked complete for this round. */
- boolean completeCheckingStore;
-
- /** Creates an empty schedule plan. */
- SchedulePlan() {}
+ return schedulingCoordinator.buildSchedulePlan(
+ lookedUp, startedDBRs, context, pollingRound.firstLoop());
}
/**
@@ -1352,81 +774,64 @@ private static final class SchedulePlan {
*
* After cancellation the fetcher stops scheduling any further datastore checks, DBR hint
* fetches, or edition probes, and it unsubscribes from the {@link USKManager}. In-flight attempts
- * are canceled when possible and subsequent calls that would otherwise schedule work become
- * no-ops. This method is idempotent; calling it more than once has no additional effect beyond
- * logging.
+ * are canceled when possible, and later calls that would otherwise schedule work become no-ops.
+ * This method is idempotent; calling it more than once has no additional effect beyond logging.
*
* Cancellation does not delete any previously obtained data. If background polling was
* configured, it is disabled for the lifetime of this instance. A new {@code USKFetcher} must be
* created to resume discovery.
*
- * @param context the client runtime context used to unregister listeners and cancel outstanding
- * work; must not be {@code null}
+ * Cancellation is synchronous with respect to internal bookkeeping but does not wait for
+ * external network operations to finish; those are aborted or left to complete asynchronously by
+ * the underlying schedulers. Any retained payload data is cleared, so later callbacks do not
+ * reuse stale buffers.
+ *
+ * @param context client runtime context used to unregister listeners and cancel outstanding work;
+ * must be non-null
*/
@Override
public void cancel(ClientContext context) {
if (LOG.isDebugEnabled()) LOG.debug("Cancelling {}", this);
uskManager.unsubscribe(origUSK, this);
context.getSskFetchScheduler(realTimeFlag).schedTransient.removePendingKeys((KeyListener) this);
- USKAttempt[] attempts;
+ USKAttempt[] running;
USKAttempt[] polling;
uskManager.onFinished(this);
- SendableGet storeChecker;
- Bucket data;
synchronized (this) {
if (cancelled) LOG.error("Already cancelled {}", this);
if (completed) LOG.error("Already completed {}", this);
cancelled = true;
- attempts = runningAttempts.values().toArray(new USKAttempt[0]);
- polling = pollingAttempts.values().toArray(new USKAttempt[0]);
- attemptsToStart.clear();
- runningAttempts.clear();
- pollingAttempts.clear();
- storeChecker = runningStoreChecker;
- runningStoreChecker = null;
- data = lastRequestData;
- lastRequestData = null;
+ running = attempts.snapshotRunningAttempts();
+ polling = attempts.snapshotPollingAttempts();
+ attempts.clearAllAttempts();
}
- for (USKAttempt attempt : attempts) attempt.cancel(context);
+ for (USKAttempt attempt : running) attempt.cancel(context);
for (USKAttempt p : polling) p.cancel(context);
dbrHintFetches.cancelAll(context);
- if (storeChecker != null)
- // Remove from the store checker queue.
- storeChecker.unregister(context, storeChecker.getPriorityClass());
- if (data != null) data.free();
+ storeChecks.cancelStoreChecker(context);
+ completionCoordinator.clearLastRequestData();
}
- /**
- * Set of interested USKCallbacks. Note that we don't actually send them any information - they
- * are essentially placeholders, an alternative to a refcount. This could be replaced with a Bloom
- * filter or whatever, we only need .exists and .count.
- */
- final HashSet Subscribers are not directly notified by this class; instead they influence whether and how
+ * This class does not directly notify subscribers; instead, they influence whether and how
* aggressively the fetcher continues to probe for newer editions. Hints help bias the search and
- * are folded into the key-watching window used for datastore checks and network probes.
+ * are folded into the key-watching window used for datastore checks and network probes. The call
+ * is thread-safe and does not trigger immediate network I/O. Repeated registrations of the same
+ * callback update its hint and priority contributions without creating duplicate entries.
*
- * @param cb the subscriber whose interest influences polling priority and continuation; must not
- * be {@code null}
- * @param hint the subscriber's best-known edition number; values less than or equal to the last
- * looked-up slot are ignored; larger values expand the search window
+ * The method only mutates subscription state; it does not schedule new attempts directly. Any
+ * new scheduling decisions will happen when priorities are recomputed or when the next scheduling
+ * pass runs.
+ *
+ * @param cb subscriber whose interest influences polling priority and continuation; must be
+ * non-null
+ * @param hint subscriber's best-known edition number; larger values expand the watch window
*/
public void addSubscriber(USKCallback cb, long hint) {
- Long[] hints;
- synchronized (this) {
- subscribers.add(cb);
- subscriberHints.put(cb, hint);
- hints = subscriberHints.values().toArray(new Long[0]);
- }
- updatePriorities();
- watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK));
+ USKFetcherCallback[] fetcherCallbacks = snapshotCallbacks();
+ subscriberRegistry.addSubscriber(cb, hint, fetcherCallbacks, toString());
}
/**
@@ -1436,34 +841,7 @@ public void addSubscriber(USKCallback cb, long hint) {
* selects the most urgent priorities among all interested parties.
*/
private void updatePriorities() {
- Prio prio = initialPrio();
- USKCallback[] localCallbacks;
- USKFetcherCallback[] fetcherCallbacks;
- synchronized (this) {
- localCallbacks = subscribers.toArray(new USKCallback[0]);
- // Callbacks also determine the fetcher's priority.
- // Otherwise, USKFetcherTag would have no way to tell us the priority we should run at.
- fetcherCallbacks = callbacks.toArray(new USKFetcherCallback[0]);
- }
- if (noCallbacks(localCallbacks, fetcherCallbacks)) {
- setDefaultPriorities();
- return;
- }
-
- accumulatePriorities(localCallbacks, prio);
- accumulatePriorities(fetcherCallbacks, prio);
-
- if (LOG.isDebugEnabled())
- LOG.debug(
- "Updating priorities: normal={} progress={} for {} for {}",
- prio.normal,
- prio.progress,
- this,
- origUSK);
- synchronized (this) {
- normalPollPriority = prio.normal;
- progressPollPriority = prio.progress;
- }
+ subscriberRegistry.updatePriorities(snapshotCallbacks(), toString());
}
/**
@@ -1472,91 +850,7 @@ private void updatePriorities() {
* @return priority class to use for progress-oriented polling
*/
short refreshAndGetProgressPollPriority() {
- updatePriorities();
- return getPriorityClass();
- }
-
- /** Mutable container for derived polling priorities. */
- private static final class Prio {
- /** Normal polling priority class. */
- short normal;
-
- /** Progress polling priority class. */
- short progress;
-
- /** Creates a priority container with unset values. */
- Prio() {}
- }
-
- /**
- * Creates a priority container initialized to the paused priority class.
- *
- * @return a new priority container with paused defaults
- */
- private static Prio initialPrio() {
- Prio p = new Prio();
- p.normal = RequestStarter.PAUSED_PRIORITY_CLASS;
- p.progress = RequestStarter.PAUSED_PRIORITY_CLASS;
- return p;
- }
-
- /**
- * Checks whether there are no callbacks influencing priority selection.
- *
- * @param localCallbacks subscriber callbacks to test
- * @param fetcherCallbacks fetcher-level callbacks to test
- * @return {@code true} when both callback arrays are empty
- */
- private static boolean noCallbacks(
- USKCallback[] localCallbacks, USKFetcherCallback[] fetcherCallbacks) {
- return localCallbacks.length == 0 && fetcherCallbacks.length == 0;
- }
-
- /** Restores default polling priorities for normal and progress polling. */
- private void setDefaultPriorities() {
- normalPollPriority = DEFAULT_NORMAL_POLL_PRIORITY;
- progressPollPriority = DEFAULT_PROGRESS_POLL_PRIORITY;
- if (LOG.isDebugEnabled())
- LOG.debug(
- "Updating priorities: normal = {} progress = {} for {} for {}",
- normalPollPriority,
- progressPollPriority,
- this,
- origUSK);
- }
-
- /**
- * Accumulates priority preferences from subscriber callbacks.
- *
- * @param cbs callbacks providing priority hints; must not be null
- * @param prio mutable container to update with minimum priorities
- */
- private void accumulatePriorities(USKCallback[] cbs, Prio prio) {
- for (USKCallback cb : cbs) {
- short n = cb.getPollingPriorityNormal();
- if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n);
- if (n < prio.normal) prio.normal = n;
- if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n);
- short p = cb.getPollingPriorityProgress();
- if (p < prio.progress) prio.progress = p;
- }
- }
-
- /**
- * Accumulates priority preferences from fetcher-level callbacks.
- *
- * @param cbs callbacks providing priority hints; must not be null
- * @param prio mutable container to update with minimum priorities
- */
- private void accumulatePriorities(USKFetcherCallback[] cbs, Prio prio) {
- for (USKFetcherCallback cb : cbs) {
- short n = cb.getPollingPriorityNormal();
- if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n);
- if (n < prio.normal) prio.normal = n;
- if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n);
- short p = cb.getPollingPriorityProgress();
- if (p < prio.progress) prio.progress = p;
- }
+ return subscriberRegistry.refreshAndGetProgressPollPriority(snapshotCallbacks(), toString());
}
/**
@@ -1570,8 +864,8 @@ private void accumulatePriorities(USKFetcherCallback[] cbs, Prio prio) {
*
* @return {@code true} when one or more subscribers are present; {@code false} when none remain
*/
- public synchronized boolean hasSubscribers() {
- return !subscribers.isEmpty();
+ public boolean hasSubscribers() {
+ return subscriberRegistry.hasSubscribers();
}
/**
@@ -1585,8 +879,8 @@ public synchronized boolean hasSubscribers() {
* @return {@code true} when one or more callbacks are registered; otherwise {@code false}
*/
@SuppressWarnings("unused")
- public synchronized boolean hasCallbacks() {
- return !callbacks.isEmpty();
+ public boolean hasCallbacks() {
+ return subscriberRegistry.hasCallbacks(snapshotCallbacks());
}
/**
@@ -1595,19 +889,13 @@ public synchronized boolean hasCallbacks() {
* The subscriber will no longer influence polling priority or the set of editions watched in
* the datastore. Removing a non-existent subscriber has no effect. The method also updates
* internal hint tracking so that future scheduling reflects the reduced interest set, and it
- * recalculates priorities based on remaining subscribers.
+ * recalculates priorities based on remaining subscribers. The call is thread-safe and does not
+ * block on network activity.
*
- * @param cb the subscriber to remove; {@code null} is ignored
+ * @param cb subscriber to remove; {@code null} is ignored
*/
public void removeSubscriber(USKCallback cb) {
- Long[] hints;
- synchronized (this) {
- subscribers.remove(cb);
- subscriberHints.remove(cb);
- hints = subscriberHints.values().toArray(new Long[0]);
- }
- updatePriorities();
- watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK));
+ subscriberRegistry.removeSubscriber(cb, snapshotCallbacks(), toString());
}
/**
@@ -1618,17 +906,11 @@ public void removeSubscriber(USKCallback cb) {
* because those callbacks are tracked separately from subscriber callbacks. This behavior mirrors
* legacy expectations where the same callback instance can be used in both roles.
*
- * @param cb the callback to remove; {@code null} is ignored
+ * @param cb callback to remove; {@code null} is ignored
*/
@SuppressWarnings("unused")
public void removeCallback(USKCallback cb) {
- Long[] hints;
- synchronized (this) {
- subscribers.remove(cb);
- subscriberHints.remove(cb);
- hints = subscriberHints.values().toArray(new Long[0]);
- }
- watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK));
+ subscriberRegistry.removeCallback(cb);
}
/**
@@ -1669,8 +951,8 @@ public short getPollingPriorityNormal() {
* Not supported for this class: priority is determined by internal state and the current
* progress polling class reported by {@link #getPriorityClass()}. This method is not expected to
* be called by production code and will throw an exception if invoked; callers should consult
- * {@link #refreshAndGetProgressPollPriority()} instead to refresh priorities and obtain the
- * current value.
+ * {@link #refreshAndGetProgressPollPriority()} instead to refresh priorities and get the current
+ * value.
*
* @return never returns normally
* @throws UnsupportedOperationException always, because this operation is unsupported here
@@ -1681,25 +963,27 @@ public short getPollingPriorityProgress() {
}
/**
- * {@inheritDoc}
+ * Reacts to a newly discovered USK edition.
*
* When invoked with {@code newKnownGood == true} and {@code newSlotToo == false} the callback
* is ignored because slot (edition) discovery is the only driver for follow-up work here. For
- * other cases, the method updates the manager and continues the discovery loop as appropriate for
- * the configured mode.
+ * other cases, the method updates internal bookkeeping, may cancel stale attempts, and continues
+ * the discovery loop as appropriate for the configured mode. This handler does not block; it
+ * schedules work via the same mechanisms as regular attempts.
*
- * @param foundEdition The payload describing the discovered edition and its metadata.
+ * @param foundEdition payload describing the discovered edition and its metadata; must be
+ * non-null
*/
@Override
public void onFoundEdition(USKFoundEdition foundEdition) {
if (foundEdition.newKnownGood() && !foundEdition.newSlotToo())
return; // Only interested in slots
- // Because this is frequently run off-thread, it is actually possible that the looked up edition
- // is not the same as the edition we are being notified of.
- FoundPlan plan =
+ // Because this is frequently run off-thread, it is actually possible that the looked-up edition
+ // is different from the edition we are being notified of.
+ USKSuccessPlanner.FoundPlan plan =
prepareFoundPlan(foundEdition.edition(), foundEdition.data(), foundEdition.context());
if (plan == null) return;
- finishCancelBefore(plan.killAttempts, foundEdition.context());
+ attempts.finishCancelBefore(plan.killAttempts, foundEdition.context());
if (plan.registerNow) registerAttempts(foundEdition.context());
applyFoundDecodedData(
plan.decode,
@@ -1720,7 +1004,8 @@ public void onFoundEdition(USKFoundEdition foundEdition) {
* @param context client context used for scheduling decisions; must not be null
* @return a plan describing how to apply the found edition
*/
- private FoundPlan prepareFoundPlan(long ed, byte[] data, ClientContext context) {
+ private USKSuccessPlanner.FoundPlan prepareFoundPlan(
+ long ed, byte[] data, ClientContext context) {
final long lastEd = uskManager.lookupLatestSlot(origUSK);
boolean decode;
List When {@code decode} is {@code true}, the method updates compression metadata and retains the
* decoded data bucket if configured to keep the last data.
@@ -1760,337 +1041,79 @@ private FoundPlan prepareFoundPlan(long ed, byte[] data, ClientContext context)
*/
private void applyFoundDecodedData(
boolean decode, boolean metadata, short codec, byte[] data, ClientContext context) {
- synchronized (this) {
- if (!decode) return;
- lastCompressionCodec = codec;
- lastWasMetadata = metadata;
- if (keepLastData) {
- // Note: converting bucket to byte[] and back is inefficient
- if (lastRequestData != null) lastRequestData.free();
- try {
- lastRequestData = BucketTools.makeImmutableBucket(context.tempBucketFactory, data);
- } catch (IOException e) {
- LOG.error("Caught {}", e, e);
- }
- }
- }
+ completionCoordinator.applyFoundDecodedData(decode, metadata, codec, data, context);
}
- /** Describes how to apply a found edition and update scheduling state. */
- private static final class FoundPlan {
- /** Whether decoded data should be applied. */
- boolean decode;
-
- /** Attempts to cancel after accepting the found edition. */
- List This helper merges keys from multiple sources and forwards completion notifications back to
- * the underlying sub-checkers.
- */
- class USKStoreChecker {
-
- /** Sub-checkers contributing keys to query in the datastore. */
- final USKWatchingKeys.KeyList.StoreSubChecker[] checkers;
-
- /**
- * Creates a store checker from a list of sub-checkers.
- *
- * @param c sub-checkers that contribute keys; must not be null
- */
- public USKStoreChecker(List The method unregisters the checker, marks it complete, then schedules any pending attempts
- * based on the datastore results. When running in store-only mode, it may immediately conclude
- * the round after DBR handling.
- *
- * @param storeChecker active store checker getter instance; must not be null
- * @param checker datastore checker wrapper used to mark completion; must not be null
- * @param context client context used for scheduling and callbacks; must not be null
- * @param toNetwork whether the scheduler intended a network send for the checker
- * @return {@code toNetwork} to preserve scheduler semantics; never sends network requests here
- */
- @SuppressWarnings("java:S3516")
- boolean preRegisterStoreChecker(
- USKStoreCheckerGetter storeChecker,
- USKStoreChecker checker,
- ClientContext context,
- boolean toNetwork) {
- if (cancelled || completed) {
- storeChecker.unregister(context, storeChecker.getPriorityClass());
- synchronized (this) {
- runningStoreChecker = null;
- }
- if (LOG.isDebugEnabled())
- LOG.debug("StoreChecker preRegister aborted: fetcher cancelled/completed");
- return toNetwork; // cancel network send when scheduler planned to send
- // value ignored by scheduler when toNetwork == false
+ @Override
+ public void setScheduleAfterDBRsDone(boolean value) {
+ USKFetcher.this.schedulingCoordinator.setScheduleAfterDBRsDone(value);
}
- storeChecker.unregister(context, storeChecker.getPriorityClass());
-
- USKAttempt[] attempts;
- synchronized (this) {
- runningStoreChecker = null;
- // Note: optionally start USKAttempts only when datastore check shows no progress.
- attempts = attemptsToStart.toArray(new USKAttempt[0]);
- attemptsToStart.clear();
- if (cancelled || completed) attempts = new USKAttempt[0];
+ @Override
+ public boolean isCancelled() {
+ return USKFetcher.this.isCancelled();
}
- checker.checked();
-
- if (LOG.isDebugEnabled())
- LOG.debug(
- "Checked datastore, finishing registration for {} checkers for {} for {}",
- attempts.length,
- this,
- origUSK);
-
- if (attempts.length > 0) {
- parent.toNetwork(context);
- notifySendingToNetwork(context);
+ @Override
+ public FetchContext fetcherContext() {
+ return USKFetcher.this.ctx;
}
- processAttemptsAfterStoreCheck(attempts, context);
-
- long lastEd = uskManager.lookupLatestSlot(origUSK);
- if (!fillKeysWatching(lastEd, context) && checkStoreOnly) {
- if (LOG.isDebugEnabled()) LOG.debug("Just checking store, terminating {} ...", this);
- if (shouldDeferUntilDBRs()) {
- scheduleAfterDBRsDone = true;
- } else {
- finishSuccess(context);
- }
+ @Override
+ public USKFetcher fetcher() {
+ return USKFetcher.this;
}
-
- return toNetwork; // Store checker never sends network requests itself
- // Value is ignored when toNetwork == false
}
/**
- * Notifies progress callbacks that network sending is about to begin.
+ * Registers all staged attempts with their schedulers.
*
- * @param context client context forwarded to progress callbacks; must not be null
+ * @param context client context used to schedule attempts; must not be null
*/
- private void notifySendingToNetwork(ClientContext context) {
- USKCallback[] toCheck;
+ private void registerAttempts(ClientContext context) {
synchronized (this) {
if (cancelled || completed) return;
- toCheck = subscribers.toArray(new USKCallback[0]);
- }
- for (USKCallback cb : toCheck) {
- if (cb instanceof USKProgressCallback callback) callback.onSendingToNetwork(context);
}
+ attempts.registerAttempts(
+ new USKAttemptManager.USKAttemptRegistrationParams(
+ context, completionCoordinator.hasLastRequestData(), origUSK.suggestedEdition));
}
- /**
- * Processes attempts after the datastore check completes.
- *
- * @param attempts attempts to schedule or drop based on current known edition
- * @param context client context used to schedule attempts; must not be null
- */
- private void processAttemptsAfterStoreCheck(USKAttempt[] attempts, ClientContext context) {
- for (USKAttempt attempt : attempts) {
- long lastEd = uskManager.lookupLatestSlot(origUSK);
- synchronized (this) {
- // Note: condition may need verification.
- if (keepLastData && lastRequestData == null && lastEd == origUSK.suggestedEdition) {
- // If we want the data, then get it for the known edition, so we always get the data, so
- // USKInserter can compare it and return the old edition if it is identical.
- lastEd--;
- }
- }
- if (attempt == null) continue;
- if (attempt.number > lastEd) attempt.schedule(context);
- else {
- synchronized (this) {
- runningAttempts.remove(attempt.number);
- pollingAttempts.remove(attempt.number);
- }
- }
- }
- }
-
- /**
- * Determines whether scheduling should wait for DBR hint fetches to finish.
- *
- * @return {@code true} when outstanding DBR hint fetches are still running
- */
- private boolean shouldDeferUntilDBRs() {
- return dbrHintFetches.hasOutstanding();
+ @SuppressWarnings("BooleanMethodIsAlwaysInverted")
+ private boolean fillKeysWatching(long ed, ClientContext context) {
+ return storeChecks.fillKeysWatching(ed, context);
}
/**
@@ -2128,9 +1151,10 @@ public KeyListener makeKeyListener(ClientContext context, boolean onStartup) {
*
* The count reflects the internal watch list and is used by schedulers to estimate work
* breadth. It does not necessarily equal the number of outstanding network requests and may
- * include keys derived from subscriber hints that are not currently scheduled.
+ * include keys derived from subscriber hints that are not currently scheduled. The value is a
+ * snapshot that may change immediately after return as subscriptions evolve.
*
- * @return estimated count of watched keys
+ * @return current estimate of watched keys for scheduling heuristics and diagnostics
*/
@Override
public synchronized long countKeys() {
@@ -2155,7 +1179,7 @@ public short definitelyWantKey(Key key, byte[] saltedKey, ClientContext context)
if (!origUSK.samePubKeyHash(k)) return -1;
long lastSlot = uskManager.lookupLatestSlot(origUSK) + 1;
synchronized (this) {
- if (watchingKeys.match(k, lastSlot) != -1) return progressPollPriority;
+ if (watchingKeys.match(k, lastSlot) != -1) return subscriberRegistry.progressPriority();
}
return -1;
}
@@ -2183,7 +1207,22 @@ public HasKeyListener getHasKeyListener() {
*/
@Override
public short getPriorityClass() {
- return progressPollPriority;
+ return subscriberRegistry.progressPriority();
+ }
+
+ @Override
+ public boolean isBackgroundPoll() {
+ return backgroundPoll;
+ }
+
+ @Override
+ public short getProgressPollPriority() {
+ return getPriorityClass();
+ }
+
+ @Override
+ public short getNormalPollPriority() {
+ return subscriberRegistry.normalPriority();
}
/**
@@ -2217,19 +1256,10 @@ public SendableGet[] getRequestsForKey(Key key, byte[] saltedKey, ClientContext
*/
@Override
public boolean handleBlock(Key key, byte[] saltedKey, KeyBlock found, ClientContext context) {
- if (!(found instanceof SSKBlock)) return false;
long lastSlot = uskManager.lookupLatestSlot(origUSK) + 1;
- long edition = watchingKeys.match((NodeSSK) key, lastSlot);
- if (edition == -1) return false;
- if (LOG.isDebugEnabled()) LOG.debug("Matched edition {} for {}", edition, origUSK);
-
- ClientSSKBlock data;
- try {
- data = watchingKeys.decode((SSKBlock) found, edition);
- } catch (SSKVerifyException _) {
- data = null;
- }
- onSuccess(null, edition, false, data, context);
+ USKKeyWatchSet.MatchedBlock matched = watchingKeys.matchBlock(key, found, lastSlot);
+ if (matched == null) return false;
+ onSuccess(null, matched.edition(), false, matched.block(), context);
return true;
}
@@ -2237,7 +1267,8 @@ public boolean handleBlock(Key key, byte[] saltedKey, KeyBlock found, ClientCont
* Reports whether this fetcher has no further work to perform.
*
* This is used by scheduling infrastructure to decide whether the request should remain
- * registered. It mirrors {@link #isCancelled()} semantics for this fetcher.
+ * registered. It mirrors {@link #isCancelled()} semantics for this fetcher and returns a snapshot
+ * of state that may change immediately after return.
*
* @return {@code true} when canceled or completed; otherwise {@code false}
*/
@@ -2301,7 +1332,7 @@ public byte[] getWantedKey() {
* and is used for quick filtering. It matches only {@link NodeSSK} keys for the tracked USK. The
* check is conservative and may return {@code false} for keys outside the current watch window.
*
- * @param key candidate key to evaluate; must not be null
+ * @param key candidate key to evaluate; must be non-null
* @param saltedKey scheduler-provided salted key bytes; unused by this implementation
* @return {@code true} when the key appears relevant; otherwise {@code false}
*/
@@ -2319,8 +1350,8 @@ public boolean probablyWantKey(Key key, byte[] saltedKey) {
* Updates the cooldown parameters used by USK polling.
*
* This targeted mechanism applies updated cooldown values to the active contexts and live
- * polling attempts so they take effect without reconstructing requests. For broader
- * configuration, see the tracker discussion linked below.
+ * polling attempts so they take effect without reconstructing requests. It updates both the
+ * normal and no-store contexts, then refreshes the live polling attempts to adopt the change.
*
* See: https://bugs.freenetproject.org/view.php?id=4984
@@ -2334,802 +1365,7 @@ public void changeUSKPollParameters(long time, int tries) {
this.ctxNoStore.setCooldownRetries(tries);
this.ctx.setCooldownTime(time);
this.ctxNoStore.setCooldownTime(time);
- USKAttempt[] pollers;
- synchronized (this) {
- pollers = pollingAttempts.values().toArray(new USKAttempt[0]);
- }
- for (USKAttempt a : pollers) a.reloadPollParameters();
- }
-
- /**
- * Tracks the list of editions that we want to fetch, from various sources - subscribers, origUSK,
- * last known slot from USKManager, etc.
- *
- * LOCKING: Take the lock on this class last and always pass in lookup values. Do not look up
- * values in USKManager inside this class's lock.
- *
- * @author Matthew Toseland <toad@amphibian.dyndns.org> (0xE43DA450)
- */
- private class USKWatchingKeys {
-
- // Common for whole USK
- /** Public key hash for the USK namespace being tracked. */
- final byte[] pubKeyHash;
-
- /** Crypto algorithm identifier for derived SSKs. */
- final byte cryptoAlgorithm;
-
- // List of slots since the USKManager's current last known good edition.
- /** Key list anchored at the last known good slot. */
- private final KeyList fromLastKnownSlot;
-
- /** Per-subscriber key lists keyed by hinted edition. */
- private final TreeMap The method reuses and extends the cached document-name hashes as needed and returns a
- * sub-checker describing the keys to check in the datastore.
- *
- * @param lastSlot starting edition to check from
- * @return a sub-checker describing keys to check, or {@code null} when no work is needed
- */
- public synchronized StoreSubChecker checkStore(long lastSlot) {
- if (LOG.isDebugEnabled())
- LOG.debug("check store from {} current first slot {}", lastSlot, firstSlot);
- long checkFrom = lastSlot;
- long checkTo = lastSlot + WATCH_KEYS;
- if (checkedDatastoreTo >= checkFrom) {
- checkFrom = checkedDatastoreTo;
- }
- if (checkFrom >= checkTo) return null; // Nothing to check.
- // Update the cache.
- RemoveRangeArrayList Hints greater than the current last-known slot are remembered and may expand the search
* window. Duplicate or stale hints are ignored. This method does not trigger immediate network
- * activity; it only updates the internal watch list used for subsequent scheduling rounds.
+ * activity; it only updates the internal watch list used for later scheduling rounds.
*
- * @param suggestedEdition the edition number to add as a hint; must be greater than the last
+ * @param suggestedEdition edition number to add as a hint; must be greater than the last
* looked-up slot to have any effect
*/
public void addHintEdition(long suggestedEdition) {
watchingKeys.addHintEdition(suggestedEdition, uskManager.lookupLatestSlot(origUSK));
}
- /** Describes a specific edition lookup and its derived key. */
- private class Lookup {
- /** Edition value represented by this lookup. */
- long val;
-
- /** Client SSK key derived for the edition. */
- ClientSSK key;
-
- /** Whether this lookup should bypass store checks. */
- boolean ignoreStore;
-
- /** Creates an empty lookup descriptor. */
- Lookup() {}
-
- @Override
- public boolean equals(Object o) {
- if (o instanceof Lookup lookup) {
- return lookup.val == val;
- } else return false;
- }
-
- @Override
- public int hashCode() {
- return Long.hashCode(val);
- }
-
- @Override
- public String toString() {
- return origUSK + ":" + val;
- }
- }
-
/**
* Resumes the request after a restart.
*
* USKFetcher does not persist across restarts; callers should recreate it via the manager
- * instead of resuming.
+ * instead of resuming. The method exists to satisfy interface requirements and always throws.
*
- * @param context client context that would be used for resuming; must not be null
+ * @param context client context that would be used for resuming; must be non-null
* @throws UnsupportedOperationException always, because this fetcher is not persistent
*/
@Override
@@ -3195,9 +1399,10 @@ public void onResume(ClientContext context) {
/**
* Notifies the fetcher that the node is shutting down.
*
- * USKFetcher does not persist state, so shutdown handling is not supported.
+ * USKFetcher does not persist state, so shutdown handling is not supported. The method exists
+ * to satisfy interface requirements and always throws.
*
- * @param context client context associated with shutdown; must not be null
+ * @param context client context associated with shutdown; must be non-null
* @throws UnsupportedOperationException always, because this fetcher is not persistent
*/
@Override
diff --git a/src/main/java/network/crypta/client/async/USKKeyWatchSet.java b/src/main/java/network/crypta/client/async/USKKeyWatchSet.java
new file mode 100644
index 0000000000..934f786f6c
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKKeyWatchSet.java
@@ -0,0 +1,1137 @@
+package network.crypta.client.async;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import network.crypta.keys.ClientSSK;
+import network.crypta.keys.ClientSSKBlock;
+import network.crypta.keys.Key;
+import network.crypta.keys.KeyBlock;
+import network.crypta.keys.NodeSSK;
+import network.crypta.keys.SSKBlock;
+import network.crypta.keys.SSKVerifyException;
+import network.crypta.keys.USK;
+import network.crypta.support.RemoveRangeArrayList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tracks edition windows and lookup plans for a single {@link USK} namespace.
+ *
+ * This watch set aggregates the last known good edition, per-subscriber hints, and persistent
+ * hints to decide which editions should be fetched immediately and which can be polled in the
+ * background. It maintains short caches of derived document-name hashes so matches against inbound
+ * keys and datastore blocks can be resolved without recomputing hashes on each request. State
+ * evolves as callers report new hints and as successful lookups advance the baseline slot.
+ *
+ * All mutable states are guarded by this instance lock. Callers must acquire the lock on this
+ * object last and pass in any looked-up values; do not perform external lookups while holding this
+ * lock.
+ *
+ * The constructor initializes the shared hash cache for the last known good edition and
+ * records the configuration used to plan future lookups. If the USK already suggests an edition
+ * ahead of {@code lookedUp}, a subscriber list is seeded so that edition is fetched even before
+ * explicit hint updates arrive.
+ *
+ * @param origUSK base USK, whose editions and keys will be tracked, must not be null
+ * @param lookedUp current best-known slot from the manager; {@code -1} means unknown
+ * @param origMinFailures minimum number of failed edition probes to schedule past {@code
+ * lookedUp}
+ * @param backgroundPoll whether newly scheduled lookups should be polled rather than fetched
+ */
+ USKKeyWatchSet(USK origUSK, long lookedUp, int origMinFailures, boolean backgroundPoll) {
+ this.origUSK = origUSK;
+ this.origMinFailures = origMinFailures;
+ this.backgroundPoll = backgroundPoll;
+ this.pubKeyHash = origUSK.getPubKeyHash();
+ this.cryptoAlgorithm = origUSK.cryptoAlgorithm;
+ if (LOG.isDebugEnabled()) LOG.debug("Creating KeyList from last known good: {}", lookedUp);
+ fromLastKnownSlot = new KeyList(lookedUp);
+ fromSubscribers = new TreeMap<>();
+ if (origUSK.suggestedEdition > lookedUp)
+ fromSubscribers.put(origUSK.suggestedEdition, new KeyList(origUSK.suggestedEdition));
+ }
+
+ /**
+ * Bundles lookup descriptors to fetch immediately and to poll in the background.
+ *
+ * The two arrays represent a single planning cycle produced by {@link #getEditionsToFetch}.
+ * Callers typically enqueue the {@link #fetch} entries for immediate network fetches and schedule
+ * {@link #poll} entries for lower-priority background polling. The arrays are immutable snapshots
+ * of the lists provided to the constructor.
+ */
+ static class ToFetch {
+
+ /**
+ * Creates a fetch plan from the provided lookup lists.
+ *
+ * The constructor copies the list contents into fixed arrays. The original lists are not
+ * retained, so callers may continue to mutate them after construction without affecting the
+ * stored plan. The ordering of entries is preserved from the input lists.
+ *
+ * @param toFetch2 lookups to fetch immediately; non-null, in planned execution order
+ * @param toPoll2 lookups to poll without immediate fetch; non-null, in planned order
+ */
+ public ToFetch(List This array represents higher-priority fetches that should be started right away. Entries
+ * are unique for a given planning cycle and already filtered against the running set. The array
+ * is owned by this instance and should be treated as read-only by callers.
+ */
+ public final Lookup[] fetch;
+
+ /**
+ * Lookups to poll in background cycles.
+ *
+ * This array represents lower-priority probes suitable for periodic polling. Entries are
+ * stable for the planning cycle and already deduplicated against active lookups. The array is
+ * owned by this instance and should be treated as read-only by callers.
+ */
+ public final Lookup[] poll;
+ }
+
+ /**
+ * Builds a plan of editions to fetch immediately and to poll in the background.
+ *
+ * The plan is derived from the last known good slot, active subscriber hints, and optional
+ * random sampling. The method removes lookups that are already running from the supplied list, so
+ * callers can reuse that list as a deduplication set. When background polling is enabled, the
+ * method prefers polling new editions rather than immediate fetches. The returned plan is a
+ * snapshot; later updates to hints do not retroactively change it.
+ *
+ * @param lookedUp current best-known slot from the manager; {@code -1} when unknown
+ * @param random random source used for optional sampling; must not be null when {@code doRandom}
+ * @param alreadyRunning lookups already in flight; entries that remain valid are removed in-place
+ * @param doRandom whether to include randomized probes beyond deterministic windows
+ * @param isFirstLoop whether this is the first polling loop of a watch cycle
+ * @return plan containing lookups to fetch immediately and to poll later
+ */
+ public synchronized ToFetch getEditionsToFetch(
+ long lookedUp,
+ Random random,
+ List The supplied hint array is sorted and deduplicated, then merged with persistent hints and
+ * the USK's suggested edition when it is still ahead of {@code lookedUp}. Any hints at or below
+ * the current slot are discarded. The subscriber map is then updated to reflect the surviving
+ * hints, creating or removing {@link KeyList} instances as needed.
+ *
+ * @param hints latest subscriber hint values; non-null, may contain duplicates
+ * @param lookedUp current best-known slot used to discard stale hints and prune lists
+ */
+ public synchronized void updateSubscriberHints(Long[] hints, long lookedUp) {
+ List The hint is stored in the persistent set so it survives transient subscribers. If the hint
+ * is new and still ahead of {@code lookedUp}, a {@link KeyList} is created to schedule fetches
+ * for that edition. Hints at or behind the current slot are ignored.
+ *
+ * @param suggestedEdition edition number to add; must be greater than {@code lookedUp}
+ * @param lookedUp the current best-known slot used to ignore stale hints
+ */
+ public synchronized void addHintEdition(long suggestedEdition, long lookedUp) {
+ if (suggestedEdition <= lookedUp) return;
+ if (!persistentHints.add(suggestedEdition)) return;
+ if (fromSubscribers.containsKey(suggestedEdition)) return;
+ fromSubscribers.put(suggestedEdition, new KeyList(suggestedEdition));
+ }
+
+ /**
+ * Estimates the number of watched keys based on the current subscriber state.
+ *
+ * The returned value multiplies the configured watch window by the number of active subscriber
+ * lists, plus the base watch list. The estimate does not account for overlapping editions across
+ * lists, so callers should treat it as an upper bound for scheduling heuristics.
+ *
+ * @return estimated count of watched keys for scheduling and load decisions
+ */
+ public synchronized long size() {
+ return WATCH_KEYS + (long) fromSubscribers.size() * WATCH_KEYS; // Note: does not account for
+ // overlap
+ }
+
+ /**
+ * Builds datastore sub-checkers for the current watch lists.
+ *
+ * The method creates sub-checkers that cover a window of {@link #WATCH_KEYS} editions for the
+ * last known good slot and any subscriber-provided hints. Each sub-checker encapsulates the set
+ * of {@link NodeSSK} keys that should be checked in the datastore. When no checks are required,
+ * the method returns {@code null} to avoid unnecessary work.
+ *
+ * @param lastSlot the last known good edition used to seed checks and prune stale lists
+ * @return datastore sub-checkers to run, or {@code null} when no checks are required
+ */
+ public synchronized List The method derives the expected {@link ClientSSK} from the USK and verifies that the
+ * document-name hash in the block matches the derived value. On success, the block is wrapped in
+ * a {@link ClientSSKBlock} for higher-level consumers. Verification is strict and will throw when
+ * the block does not correspond to the expected edition.
+ *
+ * @param block low-level block to decode; must not be null and must be an SSK block
+ * @param edition edition number that the block is expected to represent
+ * @return decoded client block for the edition, ready for higher-level processing
+ * @throws SSKVerifyException if the block does not match the expected document-name hash
+ */
+ public ClientSSKBlock decode(SSKBlock block, long edition) throws SSKVerifyException {
+ ClientSSK csk = origUSK.getSSK(edition);
+ if (!Arrays.equals(csk.ehDocname, block.getKey().getKeyBytes())) {
+ throw new SSKVerifyException("Docname hash mismatch for decoded block");
+ }
+ return ClientSSKBlock.construct(block, csk);
+ }
+
+ /**
+ * Attempts to match the provided node key against watched key lists.
+ *
+ * The method checks the base watch list anchored at the last known good slot and then scans
+ * any subscriber-provided lists. Subscriber lists whose edition anchors are at or behind {@code
+ * lastSlot} are discarded as stale. Matching is performed against cached document-name hashes and
+ * returns the edition number when the key corresponds to a watched slot.
+ *
+ * @param key node key to match; must not be null and must belong to the same USK
+ * @param lastSlot the last known good edition used to prune stale lists and bound matching
+ * @return matched edition number, or {@code -1} when no match is found
+ */
+ public synchronized long match(NodeSSK key, long lastSlot) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Trying to match {} from slot {} for {}", key, lastSlot, origUSK);
+ long ret = fromLastKnownSlot.match(key, lastSlot);
+ if (ret != -1) return ret;
+
+ for (Iterator The check is strict: the key must be a {@link NodeSSK} that shares the USK public key hash
+ * and must match one of the currently watched editions. When a match is found, the supplied
+ * {@code progressPriority} is returned so callers can preserve their scheduling class.
+ *
+ * @param key candidate key to evaluate; must not be null and must be a {@link NodeSSK}
+ * @param lastSlot the last known good edition used to bound the match
+ * @param progressPriority priority class to return on match
+ * @return priority class when wanted, or {@code -1} when not wanted
+ */
+ public short definitelyWantKey(Key key, long lastSlot, short progressPriority) {
+ if (!(key instanceof NodeSSK k)) return -1;
+ if (!origUSK.samePubKeyHash(k)) return -1;
+ synchronized (this) {
+ if (match(k, lastSlot) != -1) return progressPriority;
+ }
+ return -1;
+ }
+
+ /**
+ * Reports whether a key is probably wanted by this watch set.
+ *
+ * This check is a softer version of {@link #definitelyWantKey(Key, long, short)} and returns
+ * only a boolean. The key must be a {@link NodeSSK} for the same USK and must match a watched
+ * edition. The result reflects the current watch lists and may change as hints are updated.
+ *
+ * @param key candidate key to evaluate; must not be null and must be a {@link NodeSSK}
+ * @param lastSlot the last known good edition used to bound the match
+ * @return {@code true} if the key appears relevant, {@code false} otherwise
+ */
+ @SuppressWarnings("unused")
+ public boolean probablyWantKey(Key key, long lastSlot) {
+ if (!(key instanceof NodeSSK k)) return false;
+ if (!origUSK.samePubKeyHash(k)) return false;
+ synchronized (this) {
+ return match(k, lastSlot) != -1;
+ }
+ }
+
+ /**
+ * Attempts to match and decode a found block against the watch lists.
+ *
+ * The method first verifies that the incoming key and block are of the SSK type, then attempts
+ * to match the key against the watched editions. If a match is found, the block is decoded and
+ * verified against the expected document-name hash for that edition. Verification failures return
+ * a {@link MatchedBlock} with a {@code null} payload to indicate the match but failed to decode.
+ *
+ * @param key key associated with the found block; must be a {@link NodeSSK}
+ * @param found block returned from the datastore; must be an {@link SSKBlock}
+ * @param lastSlot the last known good edition used to bound the match
+ * @return a matched block result, or {@code null} when no match was found
+ */
+ public MatchedBlock matchBlock(Key key, KeyBlock found, long lastSlot) {
+ if (!(found instanceof SSKBlock sskBlock)) return null;
+ if (!(key instanceof NodeSSK)) return null;
+ long edition;
+ synchronized (this) {
+ edition = match((NodeSSK) key, lastSlot);
+ }
+ if (edition == -1) return null;
+ if (LOG.isDebugEnabled()) LOG.debug("Matched edition {} for {}", edition, origUSK);
+
+ ClientSSKBlock data;
+ try {
+ data = decode(sskBlock, edition);
+ } catch (SSKVerifyException _) {
+ data = null;
+ }
+ return new MatchedBlock(edition, data);
+ }
+
+ /**
+ * Describes a matched block and its resolved edition number.
+ *
+ * @param edition resolved edition value that matched the watch list
+ * @param block decoded client block, or {@code null} when verification failed
+ */
+ record MatchedBlock(long edition, ClientSSKBlock block) {}
+
+ /**
+ * Caches derived document-name hashes for a sliding window of editions.
+ *
+ * Each {@code KeyList} is anchored at a specific base edition and maintains a fixed-size
+ * window of {@link #WATCH_KEYS} hashes derived from the owning USK. The cache is stored in a weak
+ * reference, so it can be reclaimed when memory is tight, with regeneration on demand. The list
+ * is used to match incoming {@link NodeSSK} keys or to build datastore checkers without
+ * recomputing hashes for every request.
+ */
+ class KeyList {
+
+ /**
+ * USK edition number represented by cache index 0.
+ *
+ * This value advances as the cache is realigned to newer base editions. It is always greater
+ * than or equal to zero and acts as the base offset for indexing into {@link #cache}.
+ */
+ long firstSlot;
+
+ /**
+ * Weakly referenced cache of document-name hashes for each watched slot.
+ *
+ * The list contains {@code WATCH_KEYS} entries whenever populated. It can be cleared by the
+ * garbage collector, in which case it is regenerated on the next access.
+ */
+ private WeakReference Initialized to {@code -1} to represent "unchecked". Updated as sub-checkers report
+ * completion in {@link StoreSubChecker#checked()}.
+ */
+ private long checkedDatastoreFrom = -1;
+
+ /**
+ * The highest edition (exclusive) for which datastore checks have been confirmed.
+ *
+ * Initialized to {@code -1} to represent "unchecked". Updated as sub-checkers report
+ * completion in {@link StoreSubChecker#checked()}.
+ */
+ private long checkedDatastoreTo = -1;
+
+ /**
+ * Creates a key list anchored at the provided slot.
+ *
+ * The cache window is initialized immediately with {@link #WATCH_KEYS} hashes derived from
+ * the USK. The window can later be realigned as newer base editions are reported, preserving
+ * any overlapping entries when possible.
+ *
+ * @param slot the first slot to include in the cache; must be zero or higher
+ */
+ public KeyList(long slot) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Creating KeyList from {} on {} {}", slot, origUSK, this, new Exception("debug"));
+ firstSlot = slot;
+ RemoveRangeArrayList The method advances forward from {@code lookedUp}, scheduling up to {@code
+ * origMinFailures} editions. Already-running lookups are removed from {@code alreadyRunning} to
+ * avoid duplicate scheduling. When background polling is enabled, the editions are appended to
+ * the poll list instead of the immediate fetch list.
+ *
+ * @param toFetch destination list for editions that should be fetched immediately when not in
+ * background polling mode; entries are appended, not cleared
+ * @param toPoll destination list for editions that should be polled (no immediate fetch) when
+ * in background polling mode; entries are appended, not cleared
+ * @param lookedUp current best known slot (edition) used as a base for computing the next
+ * candidate editions; values below zero are treated as zero
+ * @param alreadyRunning list of lookups currently in progress; this method removes any edition
+ * that remains valid so it is not scheduled twice
+ */
+ public synchronized void getNextEditions(
+ List The lookup is deduplicated against both the target list and the already-running list. If a
+ * matching lookup is found in {@code alreadyRunning}, it is removed and no new entry is added.
+ * The resulting {@link Lookup} contains the derived {@link ClientSSK} key for the edition.
+ *
+ * @param lookupList destination list for new lookups; entries are appended in order
+ * @param alreadyRunning list of lookups already in progress; this method removes matches
+ * @param ed edition number to add as a lookup candidate
+ * @param ignoreStore whether this lookup should bypass store checks
+ * @return {@code true} when the edition was added, {@code false} when deduplicated
+ */
+ public boolean getEditionIfNotAlreadyRunning(
+ List The method samples future editions using {@link #sampleGeometric(long, Random)} and adds
+ * them to {@code toFetch} until {@code allowed} entries are accepted. Each sampled edition is
+ * deduplicated against the running set. The random probes help catch up to fast-moving editions
+ * without needing to scan every intermediate slot.
+ *
+ * @param toFetch destination list for random probes; entries are appended
+ * @param lookedUp current best-known slot used as a base for sampling
+ * @param alreadyRunning list of lookups already in progress; used for deduplication
+ * @param random random source used for sampling; must not be null
+ * @param allowed maximum number of random editions to add
+ */
+ public synchronized void getRandomEditions(
+ List The sampling uses a mix of means to bias toward nearer editions while still allowing
+ * larger jumps. The returned edition is always greater than or equal to {@code baseEdition}.
+ *
+ * @param baseEdition base edition offset for sampling; must be zero or higher
+ * @param random random source used to sample; must not be null
+ * @return sampled edition number at or above {@code baseEdition}
+ */
+ private static long sampleGeometric(long baseEdition, Random random) {
+ // Geometric distribution.
+ // 20% chance of mean 100, 80% chance of mean 10. Thanks evanbd.
+ while (true) {
+ int mean = random.nextInt(5) == 0 ? 100 : 10;
+ double u = uniform01FromLong(random);
+ long fetch = baseEdition + (long) Math.floor(Math.log(u) / Math.log(1.0 - 1.0 / mean));
+ if (fetch >= baseEdition) return fetch;
+ }
+ }
+
+ /**
+ * Creates a uniform random value in (0,1] using {@link Random#nextLong()}.
+ *
+ * The helper converts the positive {@code long} range into a floating-point value in the
+ * open interval (0,1]. It never returns zero, which avoids taking {@code log(0)} when sampling.
+ *
+ * @param random random source used for sampling; must not be null
+ * @return uniform value in the open interval (0,1]
+ */
+ private static double uniform01FromLong(Random random) {
+ long bits = random.nextLong() & Long.MAX_VALUE; // 0 .. 2^63-1
+ return (bits + 1.0) / (Long.MAX_VALUE + 1.0);
+ }
+
+ /**
+ * Attempts to add a random edition if it is not already scheduled.
+ *
+ * The lookup is deduplicated against the running set and uses the {@code ignoreStore} flag
+ * when the sampled edition is close enough to {@code lookedUp}. The method logs diagnostic
+ * information when debug logging is enabled.
+ *
+ * @param toFetch destination list for random probes; entries are appended
+ * @param lookedUp current best-known slot used for range decisions
+ * @param alreadyRunning list of lookups already in progress; used for deduplication
+ * @param fetch sampled edition to add
+ * @return {@code true} when the edition was added to the fetch list
+ */
+ private boolean tryAddRandomEdition(
+ List The sub-checker encapsulates a contiguous range of editions and the corresponding {@link
+ * NodeSSK} keys. Once the caller verifies those keys against the datastore, it should invoke
+ * {@link #checked()} to update the parent {@link KeyList} state.
+ */
+ public class StoreSubChecker {
+
+ /**
+ * Keys to check in the datastore for this range.
+ *
+ * The array is ordered by increasing edition and is owned by the sub-checker.
+ */
+ final NodeSSK[] keysToCheck;
+
+ /**
+ * The edition from which the datastore will be checked after execution.
+ *
+ * This value is inclusive and marks the start of the checked range.
+ */
+ private final long checkedFrom;
+
+ /**
+ * The edition up to which the datastore will be checked after execution.
+ *
+ * This value is exclusive and marks the end of the checked range.
+ */
+ private final long checkedTo;
+
+ /**
+ * Creates a sub-checker for a contiguous range of editions.
+ *
+ * The caller is responsible for running datastore checks for each key in {@code
+ * keysToCheck} and then calling {@link #checked()} to advance the cached datastore bounds.
+ *
+ * @param keysToCheck node keys to check; must not be null and in ascending edition order
+ * @param checkFrom starting edition of the range, inclusive
+ * @param checkTo ending edition of the range, exclusive
+ */
+ private StoreSubChecker(NodeSSK[] keysToCheck, long checkFrom, long checkTo) {
+ this.keysToCheck = keysToCheck;
+ this.checkedFrom = checkFrom;
+ this.checkedTo = checkTo;
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Checking datastore from {} to {} for {} on {}", checkFrom, checkTo, origUSK, this);
+ }
+
+ /**
+ * Marks this checker as completed and updates datastore bounds.
+ *
+ * The method updates the parent {@link KeyList} with the completed range. It keeps the
+ * existing lower bound if it already covers {@code checkedFrom}, but always advances the
+ * upper bound to {@code checkedTo}. Callers should invoke this once per sub-checker after all
+ * keys have been verified.
+ */
+ void checked() {
+ synchronized (KeyList.this) {
+ // Update the start bound only when the previous range does not already cover it.
+ if (!(checkedDatastoreTo >= checkedFrom && checkedDatastoreFrom <= checkedFrom)) {
+ checkedDatastoreFrom = checkedFrom;
+ }
+ checkedDatastoreTo = checkedTo;
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Checked from {} to {} (now overall is {} to {}) for {}",
+ checkedFrom,
+ checkedTo,
+ checkedDatastoreFrom,
+ checkedDatastoreTo,
+ origUSK);
+ }
+ }
+ }
+
+ /**
+ * Builds a datastore checker for a window of slots starting at {@code lastSlot}.
+ *
+ * The checker describes a contiguous range of editions beginning at {@code lastSlot} and
+ * spanning up to {@link #WATCH_KEYS} entries. The method reuses cached hashes whenever possible
+ * and skips work already covered by prior datastore checks. When no new range remains, the
+ * method returns {@code null}.
+ *
+ * @param lastSlot starting edition to check from; values below zero are treated as zero
+ * @return a sub-checker describing keys to check, or {@code null} when no work is needed
+ */
+ public synchronized StoreSubChecker checkStore(long lastSlot) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("check store from {} current first slot {}", lastSlot, firstSlot);
+ long checkFrom = lastSlot;
+ long checkTo = lastSlot + WATCH_KEYS;
+ if (checkedDatastoreTo >= checkFrom) {
+ checkFrom = checkedDatastoreTo;
+ }
+ if (checkFrom >= checkTo) return null; // Nothing to check.
+ // Update the cache.
+ RemoveRangeArrayList The cache is regenerated if it has been reclaimed by the garbage collector. Otherwise, the
+ * existing list is realigned to {@code curBaseEdition} by trimming or extending entries as
+ * needed. The returned cache is always populated with {@link #WATCH_KEYS} entries.
+ *
+ * @param curBaseEdition base edition used to realign the cache
+ * @return updated cache containing hashes for the current window
+ */
+ synchronized RemoveRangeArrayList If the cache is missing, it is regenerated for {@code curBaseEdition}. Otherwise, the
+ * method checks the current cache first and only performs a realignment when needed. A {@code
+ * null} key skips matching and simply ensures the cache is aligned.
+ *
+ * @param key key to match, or {@code null} to only update the cache
+ * @param curBaseEdition new base edition used to realign the cache
+ * @return edition number for the key, or {@code -1} when not matched
+ */
+ public synchronized long match(NodeSSK key, long curBaseEdition) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("match from {} current first slot {}", curBaseEdition, firstSlot);
+ RemoveRangeArrayList This helper avoids rechecking the entire cache by updating only the sections that changed
+ * due to the base edition moving forward or backward. When the base edition regresses, the
+ * cache is left intact and matching uses the existing window.
+ *
+ * @param key key to match; may be {@code null} to skip matching
+ * @param curBaseEdition edition to align the cache with
+ * @param ehDocnames cached document-name hashes to update
+ * @return edition number for the key, or {@code -1} when not matched
+ */
+ private long match(NodeSSK key, long curBaseEdition, RemoveRangeArrayList If the new base edition is beyond the cached window, the cache is rebuilt from scratch. If
+ * there is overlap, the cache is trimmed at the front and extended at the end. Matching is
+ * limited to the updated window when a key is provided.
+ *
+ * @param key key to match; may be {@code null} to skip matching
+ * @param curBaseEdition new base edition
+ * @param ehDocnames cached document-name hashes to update
+ * @return edition number for the key, or {@code -1} when not matched
+ */
+ private long handleFirstSlotBehind(
+ NodeSSK key, long curBaseEdition, RemoveRangeArrayList The method treats the regression as a transient condition and continues to use the current
+ * cache window. Matching is therefore performed against the existing cache rather than
+ * rebuilding it for the older base edition.
+ *
+ * @param key key to match; may be {@code null} to skip matching
+ * @param ehDocnames cached document-name hashes to consult
+ * @param curBaseEdition new base edition that lags behind {@code firstSlot}
+ * @return edition number for the key, or {@code -1} when not matched
+ */
+ private long handleFirstSlotAhead(
+ NodeSSK key, RemoveRangeArrayList The method compares the key's bytes against the cached hash window between {@code offset}
+ * and {@code offset + size}. It returns the edition number derived from {@code firstSlot} when
+ * a match is found. The scan is linear over the specified slice.
+ *
+ * @param key key to match; must not be null
+ * @param ehDocnames cached document-name hashes to scan
+ * @param offset start offset within the cache
+ * @param size number of entries to scan
+ * @param firstSlot edition represented by cache index 0
+ * @return matched edition number, or {@code -1} when not found
+ */
+ private long innerMatch(
+ NodeSSK key,
+ RemoveRangeArrayList The method derives {@link ClientSSK} instances for each edition starting at {@code
+ * baseEdition} and appends their document-name hashes to {@code ehDocnames}. The caller is
+ * responsible for ensuring the cache size does not exceed {@link #WATCH_KEYS}.
+ *
+ * @param baseEdition edition to start from
+ * @param keys number of keys to add
+ * @param ehDocnames cache to append to; must not be null
+ */
+ private void generate(long baseEdition, int keys, RemoveRangeArrayList Lookup instances are value-like and are considered equal based on their edition value.
+ * Callers populate {@link #key} and {@link #ignoreStore} when scheduling network fetches or
+ * datastore checks. The {@link #label} is used for log output only and may be null.
+ */
+ static class Lookup {
+ /**
+ * Edition value represented by this lookup.
+ *
+ * Equality and hashing are based solely on this value.
+ */
+ long val;
+
+ /**
+ * Client SSK key derived for the edition.
+ *
+ * Set when the lookup is scheduled so callers can initiate fetches without recomputing.
+ */
+ ClientSSK key;
+
+ /**
+ * Whether this lookup should bypass store checks.
+ *
+ * When {@code true}, the lookup is intended for direct fetch without checking the datastore.
+ */
+ boolean ignoreStore;
+
+ /**
+ * Descriptive label for logging, usually the owning USK.
+ *
+ * This field is optional and may be {@code null}.
+ */
+ String label;
+
+ /**
+ * Creates an empty lookup descriptor.
+ *
+ * Fields are populated by the scheduling methods that construct lookups.
+ */
+ Lookup() {}
+
+ @Override
+ public boolean equals(Object o) {
+ if (o instanceof Lookup lookup) {
+ return lookup.val == val;
+ } else return false;
+ }
+
+ @Override
+ public int hashCode() {
+ return Long.hashCode(val);
+ }
+
+ @Override
+ public String toString() {
+ return (label == null ? "?" : label) + ":" + val;
+ }
+ }
+}
diff --git a/src/main/java/network/crypta/client/async/USKPollingRound.java b/src/main/java/network/crypta/client/async/USKPollingRound.java
new file mode 100644
index 0000000000..761e32c6fb
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKPollingRound.java
@@ -0,0 +1,277 @@
+package network.crypta.client.async;
+
+import network.crypta.keys.USK;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Encapsulates completion checks and background rescheduling for a single USK polling round.
+ *
+ * This helper owns the lightweight state that bridges store checks, polling attempts, and
+ * subscriber notifications while a {@link USKFetcher} progresses through one background polling
+ * cycle. It evaluates whether all required checks have finished, ensures that attempts have cooled
+ * down at least once, and emits progress callbacks when a round can be treated as finished for now.
+ * It also tracks the current backoff interval and decides when to reset the backoff based on
+ * observed progress in the manager.
+ *
+ * The instance is mutable and not internally synchronized; callers are expected to invoke its
+ * methods from a scheduling thread or otherwise serialize access. Each instance is scoped to a
+ * single fetcher and USK, and it is typically reused across multiple scheduling ticks until the
+ * polling cycle completes.
+ *
+ * The helper keeps references to stable collaborators from {@code context} and stores the
+ * current backoff window and loop state. The initial sleep time is used for the first delay and
+ * is later doubled (with a cap) until progress is observed. The baseline and maximum sleep times
+ * are retained so that backoff resets can restore the original interval without consulting the
+ * caller again.
+ *
+ * @param context shared collaborators used to resolve attempts, store checks, and subscribers;
+ * must be non-null and scoped to a single fetcher
+ * @param sleepTime initial backoff delay in milliseconds for the first rescheduling attempt
+ * @param firstLoop whether the round should treat the next scheduling step as the initial loop
+ * @param origSleepTime baseline delay in milliseconds to restore when progress is observed
+ * @param maxSleepTime upper bound in milliseconds for exponential backoff delays
+ */
+ USKPollingRound(
+ USKPollingRoundContext context,
+ long sleepTime,
+ boolean firstLoop,
+ long origSleepTime,
+ long maxSleepTime) {
+ this.attempts = context.attempts();
+ this.storeChecks = context.storeChecks();
+ this.dbrHintFetches = context.dbrHintFetches();
+ this.subscribers = context.subscribers();
+ this.uskManager = context.uskManager();
+ this.origUSK = context.origUSK();
+ this.realTimeFlag = context.realTimeFlag();
+ this.sleepTime = sleepTime;
+ this.firstLoop = firstLoop;
+ this.origSleepTime = origSleepTime;
+ this.maxSleepTime = maxSleepTime;
+ }
+
+ /**
+ * Outcome of resolving polling attempts for a round.
+ *
+ * The {@link #ready} flag indicates whether all prerequisite checks have finished, and the
+ * {@link #attempts} array provides a snapshot of polling attempts relevant to the completion
+ * decision. The snapshot may be empty when the round is not ready to complete.
+ */
+ static final class PollingResolution {
+ /** True when the round is eligible for completion checks. */
+ final boolean ready;
+
+ /** Snapshot of polling attempts considered for completion. */
+ final USKAttempt[] attempts;
+
+ /**
+ * Creates a resolution snapshot for the current round.
+ *
+ * @param ready whether the round is ready for completion evaluation
+ * @param attempts snapshot of polling attempts; may be empty but never null
+ */
+ PollingResolution(boolean ready, USKAttempt[] attempts) {
+ this.ready = ready;
+ this.attempts = attempts;
+ }
+ }
+
+ /**
+ * Determines whether all prerequisite checks are complete and snapshots polling attempts.
+ *
+ * The method checks for active datastore scans, running random probes, missing polling
+ * attempts, and outstanding DBR hint fetches. If any prerequisite is still in flight, it returns
+ * a non-ready resolution with an empty attempt list. When all checks are complete, it returns a
+ * ready resolution with a snapshot of current polling attempts for further evaluation.
+ *
+ * @param cancelled whether the owning fetcher has been canceled and should stop checking
+ * @param completed whether the owning fetcher has already completed and should not re-evaluate
+ * @return a resolution indicating readiness and a snapshot of polling attempts for the round
+ */
+ PollingResolution resolvePollingAttemptsIfAllChecksDone(boolean cancelled, boolean completed) {
+ if (cancelled || completed) return new PollingResolution(false, new USKAttempt[0]);
+ if (storeChecks.isStoreCheckRunning()) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Not finished because still running store checker on {}", this);
+ return new PollingResolution(false, new USKAttempt[0]);
+ }
+ if (attempts.hasRunningAttempts()) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Not finished because running attempts (random probes) on {}", this);
+ return new PollingResolution(false, new USKAttempt[0]);
+ }
+ if (attempts.hasNoPollingAttempts()) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Not finished because no polling attempts (not started???) on {}", this);
+ return new PollingResolution(false, new USKAttempt[0]);
+ }
+ if (dbrHintFetches.hasOutstanding()) {
+ if (LOG.isDebugEnabled())
+ LOG.debug("Not finished because still waiting for DBR attempts on {}", this);
+ return new PollingResolution(false, new USKAttempt[0]);
+ }
+ return new PollingResolution(true, attempts.snapshotPollingAttempts());
+ }
+
+ /**
+ * Evaluates whether the current round is finished for now and notifies callbacks if appropriate.
+ *
+ * This method first resolves whether prerequisite checks have completed, then confirms that
+ * every polling attempt has entered a cooldown at least once. If any attempt has not cooled down,
+ * the round remains active and no callbacks are fired. When all attempts have cooled down, it
+ * delegates to {@link #notifyFinishedForNow(ClientContext, boolean, boolean)} to inform progress
+ * subscribers.
+ *
+ * @param context client context used for callback notifications; must be non-null
+ * @param cancelled whether the owning fetcher has been canceled and should halt notifications
+ * @param completed whether the owning fetcher has already completed and should not notify
+ */
+ void checkFinishedForNow(ClientContext context, boolean cancelled, boolean completed) {
+ PollingResolution res = resolvePollingAttemptsIfAllChecksDone(cancelled, completed);
+ if (!res.ready) return;
+ for (USKAttempt a : res.attempts) {
+ if (!a.everInCooldown()) {
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Not finished because polling attempt {} never entered cooldown on {}", a, this);
+ return;
+ }
+ }
+ notifyFinishedForNow(context, cancelled, completed);
+ }
+
+ /**
+ * Notifies progress callbacks that the round is finished for now.
+ *
+ * The notification is skipped when the fetcher has been canceled or completed. When invoked,
+ * the method snapshots subscribers and calls {@link USKProgressCallback#onRoundFinished} for each
+ * eligible callback, allowing clients to observe that a steady-state polling cycle has settled.
+ *
+ * @param context client context forwarded to callbacks; must be non-null for valid notifications
+ * @param cancelled whether the owning fetcher has been canceled and should suppress callbacks
+ * @param completed whether the owning fetcher has completed and should suppress callbacks
+ */
+ void notifyFinishedForNow(ClientContext context, boolean cancelled, boolean completed) {
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Notifying finished for now on {} for {}{}",
+ this,
+ origUSK,
+ this.realTimeFlag ? " (realtime)" : " (bulk)");
+ if (cancelled || completed) return;
+ USKCallback[] toCheck = subscribers.snapshotSubscribers();
+ for (USKCallback cb : toCheck) {
+ if (cb instanceof USKProgressCallback callback) callback.onRoundFinished(context);
+ }
+ }
+
+ /**
+ * Computes the next backoff delay for background polling and updates internal state.
+ *
+ * The sleep interval is doubled on each call until it reaches {@link #maxSleepTime}. If the
+ * manager reports that progress has been made since the round was scheduled, the sleep interval
+ * is reset to {@link #origSleepTime}, {@link #firstLoop} is cleared, and the delay is set to zero
+ * so the next cycle runs immediately. The returned value is the delay in milliseconds to pass to
+ * the scheduler.
+ *
+ * @param context client context used for randomness when choosing the next delay
+ * @param valueAtSchedule latest slot value captured when the round was scheduled
+ * @return delay in milliseconds until the next polling cycle should be scheduled
+ */
+ long rescheduleBackgroundPoll(ClientContext context, long valueAtSchedule) {
+ long valAtEnd = uskManager.lookupLatestSlot(origUSK);
+ long end;
+ long now = System.currentTimeMillis();
+ long newSleepTime = sleepTime * 2;
+ if (newSleepTime > maxSleepTime) newSleepTime = maxSleepTime;
+ sleepTime = newSleepTime;
+ end = now + context.random.nextInt((int) sleepTime);
+
+ if (valAtEnd > valueAtSchedule && valAtEnd > origUSK.suggestedEdition) {
+ sleepTime = origSleepTime;
+ firstLoop = false;
+ end = now;
+ if (LOG.isDebugEnabled())
+ LOG.debug("We have advanced: at start, {} at end, {}", valueAtSchedule, valAtEnd);
+ }
+ if (LOG.isDebugEnabled())
+ LOG.debug("Sleep time is {} this sleep is {} for {}", sleepTime, end - now, this);
+ return end - now;
+ }
+
+ /**
+ * Returns the current backoff sleep interval.
+ *
+ * @return sleep duration in milliseconds for the next scheduling decision
+ */
+ @SuppressWarnings("unused")
+ long sleepTime() {
+ return sleepTime;
+ }
+
+ /**
+ * Indicates whether the round is still in its initial loop.
+ *
+ * @return {@code true} when the round has not yet exited the first loop
+ */
+ boolean firstLoop() {
+ return firstLoop;
+ }
+
+ /**
+ * Updates whether the polling round should treat the next cycle as the first loop.
+ *
+ * @param value {@code true} to mark the round as being in its first loop, otherwise {@code false}
+ */
+ @SuppressWarnings({"unused", "SameParameterValue"})
+ void setFirstLoop(boolean value) {
+ firstLoop = value;
+ }
+}
diff --git a/src/main/java/network/crypta/client/async/USKPollingRoundContext.java b/src/main/java/network/crypta/client/async/USKPollingRoundContext.java
new file mode 100644
index 0000000000..97d0f351bf
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKPollingRoundContext.java
@@ -0,0 +1,26 @@
+package network.crypta.client.async;
+
+import network.crypta.keys.USK;
+
+/**
+ * Shared dependencies for configuring a {@link USKPollingRound}.
+ *
+ * This bundles the stable collaborators used during polling rounds so they can be reused when
+ * scheduling background polling.
+ *
+ * @param attempts polling attempt manager used to track active attempts
+ * @param storeChecks coordinator for datastore checks
+ * @param dbrHintFetches date-hint fetch coordinator
+ * @param subscribers registry for USK callbacks
+ * @param uskManager USK manager used to look up latest slots
+ * @param origUSK base USK that is being polled
+ * @param realTimeFlag whether polling is scheduled with real-time bias
+ */
+record USKPollingRoundContext(
+ USKAttemptManager attempts,
+ USKStoreCheckCoordinator storeChecks,
+ USKDateHintFetches dbrHintFetches,
+ USKSubscriberRegistry subscribers,
+ USKManager uskManager,
+ USK origUSK,
+ boolean realTimeFlag) {}
diff --git a/src/main/java/network/crypta/client/async/USKPriorityPolicy.java b/src/main/java/network/crypta/client/async/USKPriorityPolicy.java
new file mode 100644
index 0000000000..42a43935e6
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKPriorityPolicy.java
@@ -0,0 +1,212 @@
+package network.crypta.client.async;
+
+import network.crypta.node.RequestStarter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Computes effective polling priority classes for USK fetchers.
+ *
+ * This policy aggregates priority hints from subscriber callbacks and fetcher-level callbacks to
+ * determine the priority classes used by {@link USKAttemptManager} when scheduling background
+ * polls. Callers typically invoke {@link #updatePriorities(USKCallback[], USKFetcherCallback[],
+ * String)} whenever callback sets change so that the current polling priorities reflect the most
+ * urgent subscriber. The policy maintains the derived priorities as mutable state and exposes them
+ * through lightweight accessors.
+ *
+ * The policy favors the minimum (highest urgency) priority class among all callbacks. When no
+ * callbacks are present, it resets to default normal and progress priorities. Instances are not
+ * thread-safe; callers should synchronize externally or confine usage to a single scheduling
+ * thread. The logic is intentionally conservative to avoid oscillation and uses the existing
+ * scheduler constants without performing any blocking work.
+ *
+ * The manager reference is used to reload polling parameters whenever derived priorities
+ * change. The policy does not take ownership of the manager and assumes its lifecycle matches
+ * that of the owning fetcher.
+ *
+ * @param attempts attempt manager that should be updated after priority changes; must be non-null
+ */
+ USKPriorityPolicy(USKAttemptManager attempts) {
+ this.attempts = attempts;
+ }
+
+ /**
+ * Returns the current normal polling priority class.
+ *
+ * The value reflects the minimum priority requested by all callbacks or the default priority
+ * when no callbacks are present.
+ *
+ * @return priority class used for steady-state background polling
+ */
+ short normalPriority() {
+ return normalPollPriority;
+ }
+
+ /**
+ * Returns the current progress polling priority class.
+ *
+ * The value reflects the minimum progress priority requested by callbacks, which can be more
+ * urgent than the normal priority when fast progress is desired.
+ *
+ * @return priority class used when progress-oriented polling is needed
+ */
+ short progressPriority() {
+ return progressPollPriority;
+ }
+
+ /**
+ * Recomputes polling priorities based on the active callback sets.
+ *
+ * The method aggregates the minimum normal and progress priorities across subscriber and
+ * fetcher callbacks. If no callbacks are present, it falls back to default priorities. After
+ * updating the derived priorities, it triggers a reload of poll parameters so that ongoing
+ * attempts adopt the new scheduling classes. The method is deterministic and idempotent for the
+ * same input arrays.
+ *
+ * @param subscribers subscriber callbacks providing polling priority preferences; must not be
+ * null but may be empty
+ * @param fetcherCallbacks fetcher callbacks providing polling priority preferences; must not be
+ * null but may be empty
+ * @param fetcherName human-readable identifier used only for debug logging
+ */
+ void updatePriorities(
+ USKCallback[] subscribers, USKFetcherCallback[] fetcherCallbacks, String fetcherName) {
+ Prio prio = initialPrio();
+ if (noCallbacks(subscribers, fetcherCallbacks)) {
+ setDefaultPriorities(fetcherName);
+ return;
+ }
+
+ accumulatePriorities(subscribers, prio);
+ accumulatePriorities(fetcherCallbacks, prio);
+
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Updating priorities: normal={} progress={} for {}",
+ prio.normal,
+ prio.progress,
+ fetcherName);
+ normalPollPriority = prio.normal;
+ progressPollPriority = prio.progress;
+ attempts.reloadPollParameters();
+ }
+
+ /**
+ * Resets polling priorities to the default values and reloads poll parameters.
+ *
+ * This is used when no callbacks provide priority hints. It restores normal and progress
+ * priorities to their configured defaults and then refreshes the attempt manager's scheduling
+ * parameters.
+ *
+ * @param fetcherName human-readable identifier used only for debug logging
+ */
+ private void setDefaultPriorities(String fetcherName) {
+ normalPollPriority = DEFAULT_NORMAL_POLL_PRIORITY;
+ progressPollPriority = DEFAULT_PROGRESS_POLL_PRIORITY;
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Updating priorities: normal = {} progress = {} for {}",
+ normalPollPriority,
+ progressPollPriority,
+ fetcherName);
+ attempts.reloadPollParameters();
+ }
+
+ /** Mutable container for derived polling priorities. */
+ private static final class Prio {
+ /** Normal polling priority class. */
+ short normal;
+
+ /** Progress polling priority class. */
+ short progress;
+
+ /** Creates a priority container with unset values. */
+ Prio() {}
+ }
+
+ /**
+ * Creates a priority container initialized to the paused priority class.
+ *
+ * @return a new priority container with paused defaults
+ */
+ private static Prio initialPrio() {
+ Prio p = new Prio();
+ p.normal = RequestStarter.PAUSED_PRIORITY_CLASS;
+ p.progress = RequestStarter.PAUSED_PRIORITY_CLASS;
+ return p;
+ }
+
+ /**
+ * Checks whether there are no callbacks influencing priority selection.
+ *
+ * @param localCallbacks subscriber callbacks to test
+ * @param fetcherCallbacks fetcher-level callbacks to test
+ * @return {@code true} when both callback arrays are empty
+ */
+ private static boolean noCallbacks(
+ USKCallback[] localCallbacks, USKFetcherCallback[] fetcherCallbacks) {
+ return localCallbacks.length == 0 && fetcherCallbacks.length == 0;
+ }
+
+ /**
+ * Accumulates priority preferences from subscriber callbacks.
+ *
+ * @param cbs callbacks providing priority hints; must not be null
+ * @param prio mutable container to update with minimum priorities
+ */
+ private static void accumulatePriorities(USKCallback[] cbs, Prio prio) {
+ for (USKCallback cb : cbs) {
+ short n = cb.getPollingPriorityNormal();
+ if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n);
+ if (n < prio.normal) prio.normal = n;
+ if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n);
+ short p = cb.getPollingPriorityProgress();
+ if (p < prio.progress) prio.progress = p;
+ }
+ }
+
+ /**
+ * Accumulates priority preferences from fetcher-level callbacks.
+ *
+ * @param cbs callbacks providing priority hints; must not be null
+ * @param prio mutable container to update with minimum priorities
+ */
+ private static void accumulatePriorities(USKFetcherCallback[] cbs, Prio prio) {
+ for (USKFetcherCallback cb : cbs) {
+ short n = cb.getPollingPriorityNormal();
+ if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n);
+ if (n < prio.normal) prio.normal = n;
+ if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n);
+ short p = cb.getPollingPriorityProgress();
+ if (p < prio.progress) prio.progress = p;
+ }
+ }
+}
diff --git a/src/main/java/network/crypta/client/async/USKSchedulingCoordinator.java b/src/main/java/network/crypta/client/async/USKSchedulingCoordinator.java
new file mode 100644
index 0000000000..85da97fe31
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKSchedulingCoordinator.java
@@ -0,0 +1,173 @@
+package network.crypta.client.async;
+
+/**
+ * Coordinates scheduling decisions for a USK polling round.
+ *
+ * This coordinator encapsulates the state required to decide whether a round should register a
+ * datastore check, schedule network activity, or conclude early when store-only checks are
+ * complete. Callers provide the current known edition value and an execution context; the
+ * coordinator updates its internal flags and returns a {@link SchedulePlan} describing the next
+ * action. The class keeps track of whether a scheduling cycle has started, when DBR hint fetches
+ * should gate scheduling, and the last value observed at schedule time.
+ *
+ * The coordinator is mutable and synchronizes its public methods to keep the state consistent.
+ * It is typically owned by a {@link USKFetcher} and invoked from scheduling threads, so callers
+ * should avoid holding external locks while calling into it. The logic favors correctness over
+ * immediate scheduling by deferring actions until prerequisite datastore checks or DBR hint fetches
+ * have finished.
+ *
+ * The coordinator holds references to the attempt manager, store check coordinator, and DBR
+ * hint fetches so it can build a consistent schedule plan. The {@code checkStoreOnly} flag
+ * influences whether network activity is scheduled or whether the coordinator should conclude
+ * once datastore checks complete.
+ *
+ * @param attempts attempt manager that tracks polling attempts; must be non-null
+ * @param storeChecks store check coordinator used to register datastore checks; must be non-null
+ * @param dbrHintFetches DBR hint fetch tracker used to gate scheduling; must be non-null
+ * @param checkStoreOnly whether the round should avoid network fetches and only check the store
+ */
+ USKSchedulingCoordinator(
+ USKAttemptManager attempts,
+ USKStoreCheckCoordinator storeChecks,
+ USKDateHintFetches dbrHintFetches,
+ boolean checkStoreOnly) {
+ this.attempts = attempts;
+ this.storeChecks = storeChecks;
+ this.dbrHintFetches = dbrHintFetches;
+ this.checkStoreOnly = checkStoreOnly;
+ }
+
+ /**
+ * Plan returned by {@link #buildSchedulePlan(long, boolean, ClientContext, boolean)}.
+ *
+ * The plan indicates whether a datastore check should be registered immediately, whether the
+ * caller should conclude the round, and whether store-only checking has completed. The flags are
+ * deliberately simple and are interpreted by the caller to decide the next scheduling step.
+ */
+ static final class SchedulePlan {
+ /** Whether to register a datastore check immediately. */
+ boolean registerNow;
+
+ /** Whether the caller should stop scheduling and conclude the round. */
+ boolean bye;
+
+ /** Whether store-only checking has completed and should be finalized. */
+ boolean completeCheckingStore;
+
+ /** Creates an empty plan; fields default to {@code false}. */
+ SchedulePlan() {}
+ }
+
+ /**
+ * Builds the next scheduling plan for the current polling round.
+ *
+ * The method records the latest observed edition value, ensures polling attempts are scheduled
+ * when no attempts are running, and determines whether datastore checks should be registered
+ * immediately. When DBR hint fetches are in progress, it may defer scheduling until those hints
+ * are complete. In store-only mode, the returned plan can indicate that checking is complete once
+ * outstanding datastore checks finish.
+ *
+ * @param lookedUp latest edition value observed before scheduling; may be negative for unknown
+ * @param startedDBRs whether DBR hint fetches have already started for this round
+ * @param context client context used to schedule new polling attempts; must be non-null
+ * @param firstLoop whether the current scheduling cycle is the first loop of the round
+ * @return a schedule plan describing the next action the caller should take
+ */
+ synchronized SchedulePlan buildSchedulePlan(
+ long lookedUp, boolean startedDBRs, ClientContext context, boolean firstLoop) {
+ boolean registerNow = false;
+ boolean completeCheckingStore;
+ valueAtSchedule = Math.max(lookedUp + 1, valueAtSchedule);
+ if ((!checkStoreOnly)
+ && !attempts.hasPendingAttempts()
+ && !attempts.hasRunningAttempts()
+ && attempts.hasNoPollingAttempts()) {
+ attempts.addNewAttempts(lookedUp, context, firstLoop);
+ }
+ started = true;
+ if (lookedUp <= 0 && startedDBRs) {
+ scheduleAfterDBRsDone = true;
+ } else if ((!scheduleAfterDBRsDone) || !dbrHintFetches.hasOutstanding()) {
+ registerNow = !storeChecks.fillKeysWatching(lookedUp, context);
+ }
+ completeCheckingStore =
+ checkStoreOnly && scheduleAfterDBRsDone && !storeChecks.isStoreCheckRunning();
+ SchedulePlan plan = new SchedulePlan();
+ plan.registerNow = registerNow;
+ plan.bye = false;
+ plan.completeCheckingStore = completeCheckingStore;
+ return plan;
+ }
+
+ /**
+ * Returns whether a scheduling cycle has been started.
+ *
+ * @return {@code true} once a scheduling plan has been built for this coordinator
+ */
+ synchronized boolean isStarted() {
+ return started;
+ }
+
+ /** Resets the started flag so the next call treats the cycle as not yet started. */
+ synchronized void resetStarted() {
+ started = false;
+ }
+
+ /**
+ * Updates whether scheduling should wait for DBR hint fetches to complete.
+ *
+ * @param value {@code true} to defer scheduling until DBR hint fetches finish
+ */
+ synchronized void setScheduleAfterDBRsDone(boolean value) {
+ scheduleAfterDBRsDone = value;
+ }
+
+ /**
+ * Returns whether scheduling is currently deferred until DBR hint fetches finish.
+ *
+ * @return {@code true} when scheduling should wait for DBR hint completion
+ */
+ synchronized boolean scheduleAfterDBRsDone() {
+ return scheduleAfterDBRsDone;
+ }
+
+ /**
+ * Returns the latest value captured at schedule time.
+ *
+ * @return the last {@code lookedUp} value recorded when building a schedule plan
+ */
+ synchronized long valueAtSchedule() {
+ return valueAtSchedule;
+ }
+}
diff --git a/src/main/java/network/crypta/client/async/USKStoreCheckCoordinator.java b/src/main/java/network/crypta/client/async/USKStoreCheckCoordinator.java
new file mode 100644
index 0000000000..5a38049287
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKStoreCheckCoordinator.java
@@ -0,0 +1,572 @@
+package network.crypta.client.async;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import network.crypta.client.FetchContext;
+import network.crypta.keys.Key;
+import network.crypta.keys.USK;
+import network.crypta.node.SendableGet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Coordinates datastore checking and store-check request lifecycle for USK polling.
+ *
+ * This coordinator owns the lifecycle for datastore-only checks that precede or complement
+ * polling attempts. It registers store checkers with the scheduler, monitors their completion, and
+ * then decides whether to start polling attempts or conclude in store-only mode. The coordinator is
+ * constructed with shared dependencies and a callback interface that bridges back into the owning
+ * fetcher when scheduling and completion decisions are made.
+ *
+ * The class is mutable and synchronizes around its running checker state to prevent concurrent
+ * registration. Callers generally invoke {@link #fillKeysWatching(long, ClientContext)} and {@link
+ * #preRegisterStoreChecker(USKStoreCheckerGetter, USKStoreChecker, ClientContext, boolean)} from
+ * scheduler threads. The design favors correctness and safe cancellation over aggressive
+ * parallelism; only one store check may run at a time, and callers must respect cancellation flags
+ * supplied via {@link USKStoreCheckCallbacks}.
+ *
+ * This bundle captures the stable collaborators required to schedule store checks. It is
+ * constructed via the nested {@link Builder} to keep constructor signatures small and encourage
+ * explicit configuration.
+ */
+ static final class Params {
+ /** Key watch set that supplies datastore checkers. */
+ private final USKKeyWatchSet watchingKeys;
+
+ /** Attempt manager that schedules polling attempts after store checks. */
+ private final USKAttemptManager attempts;
+
+ /** Parent requester used for network scheduling and priority decisions. */
+ private final ClientRequester parent;
+
+ /** Whether the fetcher should perform store-only checks without network activity. */
+ private final boolean checkStoreOnly;
+
+ /** Manager used to query the latest known slot values. */
+ private final USKManager uskManager;
+
+ /** Base USK that is being checked. */
+ private final USK origUSK;
+
+ /** Callback interface used to notify the owning fetcher. */
+ private final USKStoreCheckCallbacks callbacks;
+
+ /** Whether store checks should run with real-time bias. */
+ private final boolean realTimeFlag;
+
+ /**
+ * Creates a parameter bundle from the provided builder.
+ *
+ * @param builder builder that supplies all required fields
+ */
+ private Params(Builder builder) {
+ this.watchingKeys = builder.watchingKeys;
+ this.attempts = builder.attempts;
+ this.parent = builder.parent;
+ this.checkStoreOnly = builder.checkStoreOnly;
+ this.uskManager = builder.uskManager;
+ this.origUSK = builder.origUSK;
+ this.callbacks = builder.callbacks;
+ this.realTimeFlag = builder.realTimeFlag;
+ }
+
+ /**
+ * Returns a new builder for assembling {@link Params}.
+ *
+ * @return a fresh builder instance with unset fields
+ */
+ static Builder builder() {
+ return new Builder();
+ }
+
+ /**
+ * Builder for {@link Params}.
+ *
+ * Each setter returns the builder to allow chaining. Call {@link #build()} once all fields
+ * are configured.
+ */
+ static final class Builder {
+ /** Key watch set that supplies datastore checkers. */
+ private USKKeyWatchSet watchingKeys;
+
+ /** Attempt manager that schedules polling attempts after store checks. */
+ private USKAttemptManager attempts;
+
+ /** Parent requester used for network scheduling and priority decisions. */
+ private ClientRequester parent;
+
+ /** Whether the fetcher should perform store-only checks without network activity. */
+ private boolean checkStoreOnly;
+
+ /** Manager used to query the latest known slot values. */
+ private USKManager uskManager;
+
+ /** Base USK that is being checked. */
+ private USK origUSK;
+
+ /** Callback interface used to notify the owning fetcher. */
+ private USKStoreCheckCallbacks callbacks;
+
+ /** Whether store checks should run with real-time bias. */
+ private boolean realTimeFlag;
+
+ /** Creates a new builder with unset fields. */
+ Builder() {}
+
+ /**
+ * Sets the key watch set used to derive datastore checkers.
+ *
+ * @param watchingKeys watch set used to build store checkers; must be non-null
+ * @return this builder for method chaining
+ */
+ Builder watchingKeys(USKKeyWatchSet watchingKeys) {
+ this.watchingKeys = watchingKeys;
+ return this;
+ }
+
+ /**
+ * Sets the attempt manager used to schedule polling attempts.
+ *
+ * @param attempts attempt manager to be updated after store checks; must be non-null
+ * @return this builder for method chaining
+ */
+ Builder attempts(USKAttemptManager attempts) {
+ this.attempts = attempts;
+ return this;
+ }
+
+ /**
+ * Sets the parent requester used for scheduling decisions.
+ *
+ * @param parent requester used to schedule network activity; must be non-null
+ * @return this builder for method chaining
+ */
+ Builder parent(ClientRequester parent) {
+ this.parent = parent;
+ return this;
+ }
+
+ /**
+ * Sets whether the coordinator should only check the store.
+ *
+ * @param checkStoreOnly {@code true} to avoid network fetches and only check the store
+ * @return this builder for method chaining
+ */
+ Builder checkStoreOnly(boolean checkStoreOnly) {
+ this.checkStoreOnly = checkStoreOnly;
+ return this;
+ }
+
+ /**
+ * Sets the USK manager used to query the latest known slot.
+ *
+ * @param uskManager manager used to look up slot values; must be non-null
+ * @return this builder for method chaining
+ */
+ Builder uskManager(USKManager uskManager) {
+ this.uskManager = uskManager;
+ return this;
+ }
+
+ /**
+ * Sets the base USK being checked.
+ *
+ * @param origUSK base USK to check; must be non-null
+ * @return this builder for method chaining
+ */
+ Builder origUSK(USK origUSK) {
+ this.origUSK = origUSK;
+ return this;
+ }
+
+ /**
+ * Sets the callbacks used to notify the owning fetcher.
+ *
+ * @param callbacks callback interface for completion and scheduling events; must be non-null
+ * @return this builder for method chaining
+ */
+ Builder callbacks(USKStoreCheckCallbacks callbacks) {
+ this.callbacks = callbacks;
+ return this;
+ }
+
+ /**
+ * Sets whether scheduling should use real-time bias.
+ *
+ * @param realTimeFlag {@code true} to prefer real-time scheduling priorities
+ * @return this builder for method chaining
+ */
+ Builder realTimeFlag(boolean realTimeFlag) {
+ this.realTimeFlag = realTimeFlag;
+ return this;
+ }
+
+ /**
+ * Builds the {@link Params} instance from the configured fields.
+ *
+ * @return an immutable parameter bundle for the coordinator
+ */
+ Params build() {
+ return new Params(this);
+ }
+ }
+ }
+
+ /**
+ * Creates a coordinator using a parameter bundle.
+ *
+ * The parameter bundle should contain fully initialized collaborators that remain valid for
+ * the lifetime of the coordinator.
+ *
+ * @param params parameter bundle with collaborators and scheduling flags; must be non-null
+ */
+ USKStoreCheckCoordinator(Params params) {
+ this.watchingKeys = params.watchingKeys;
+ this.attempts = params.attempts;
+ this.parent = params.parent;
+ this.checkStoreOnly = params.checkStoreOnly;
+ this.uskManager = params.uskManager;
+ this.origUSK = params.origUSK;
+ this.callbacks = params.callbacks;
+ this.realTimeFlag = params.realTimeFlag;
+ }
+
+ /**
+ * Starts or continues datastore checking for watched keys.
+ *
+ * The coordinator ensures only one store checker is active at a time. If a checker is already
+ * running, the method returns {@code true} to indicate that no new registration was performed. If
+ * there are no datastore checkers to run, it returns {@code false} to signal that no store check
+ * is required.
+ *
+ * @param ed latest known edition used to seed datastore checks
+ * @param context client context used to register the store checker; must not be null
+ * @return {@code true} when a store check is already running or was started; {@code false} when
+ * no store check is required
+ */
+ @SuppressWarnings("BooleanMethodIsAlwaysInverted")
+ boolean fillKeysWatching(long ed, ClientContext context) {
+ synchronized (this) {
+ // Do not run a new one until this one has finished.
+ // USKStoreCheckerGetter itself will automatically call back to fillKeysWatching, so there is
+ // no
+ // chance of losing it.
+ if (runningStoreChecker != null) return true;
+ USKStoreChecker checker = buildStoreChecker(ed);
+ if (checker == null) {
+ if (LOG.isDebugEnabled()) LOG.debug("No datastore checker");
+ return false;
+ }
+
+ runningStoreChecker = new USKStoreCheckerGetter(this, callbacks, parent, checker);
+ }
+ try {
+ context
+ .getSskFetchScheduler(realTimeFlag)
+ .register(null, new SendableGet[] {runningStoreChecker}, false, null, false);
+ } catch (Exception t) {
+ USKStoreCheckerGetter storeChecker;
+ synchronized (this) {
+ storeChecker = runningStoreChecker;
+ runningStoreChecker = null;
+ }
+ LOG.error("Unable to start: {}", t, t);
+ if (storeChecker != null) {
+ try {
+ storeChecker.unregister(context, storeChecker.getPriorityClass());
+ } catch (Exception _) {
+ // Ignore, hopefully it's already unregistered
+ }
+ }
+ }
+ if (LOG.isDebugEnabled()) LOG.debug("Registered {} for {}", runningStoreChecker, callbacks);
+ return true;
+ }
+
+ /**
+ * Completes registration after a datastore checker finishes its pre-registration phase.
+ *
+ * The method unregisters the checker, marks it complete, then schedules any pending attempts
+ * based on the datastore results. When running in store-only mode, it may immediately conclude
+ * the round after DBR handling.
+ *
+ * @param storeChecker active store checker getter instance; must not be null
+ * @param checker datastore checker wrapper used to mark completion; must not be null
+ * @param context client context used for scheduling and callbacks; must not be null
+ * @param toNetwork whether the scheduler intended a network sending for the checker
+ * @return {@code toNetwork} to preserve scheduler semantics; never sends network requests here
+ */
+ @SuppressWarnings("java:S3516")
+ boolean preRegisterStoreChecker(
+ USKStoreCheckerGetter storeChecker,
+ USKStoreChecker checker,
+ ClientContext context,
+ boolean toNetwork) {
+ if (callbacks.isCancelled()) {
+ storeChecker.unregister(context, storeChecker.getPriorityClass());
+ synchronized (this) {
+ runningStoreChecker = null;
+ }
+ if (LOG.isDebugEnabled())
+ LOG.debug("StoreChecker preRegister aborted: fetcher cancelled/completed");
+ return toNetwork; // cancel network send when scheduler planned to send
+ // value ignored by scheduler when toNetwork == false
+ }
+
+ storeChecker.unregister(context, storeChecker.getPriorityClass());
+
+ USKAttempt[] attemptsToStart;
+ synchronized (this) {
+ runningStoreChecker = null;
+ // Note: optionally start USKAttempts only when a datastore check shows no progress.
+ attemptsToStart = attempts.snapshotAttemptsToStart();
+ attempts.clearAttemptsToStart();
+ if (callbacks.isCancelled()) attemptsToStart = new USKAttempt[0];
+ }
+
+ checker.checked();
+
+ if (LOG.isDebugEnabled())
+ LOG.debug(
+ "Checked datastore, finishing registration for {} checkers for {}",
+ attemptsToStart.length,
+ origUSK);
+
+ if (attemptsToStart.length > 0) {
+ parent.toNetwork(context);
+ callbacks.notifySendingToNetwork(context);
+ }
+
+ callbacks.processAttemptsAfterStoreCheck(attemptsToStart, context);
+
+ long lastEd = uskManager.lookupLatestSlot(origUSK);
+ if (!fillKeysWatching(lastEd, context) && checkStoreOnly) {
+ if (LOG.isDebugEnabled()) LOG.debug("Just checking store, terminating {} ...", callbacks);
+ if (callbacks.shouldDeferUntilDBRs()) {
+ callbacks.setScheduleAfterDBRsDone(true);
+ } else {
+ callbacks.finishSuccess(context);
+ }
+ }
+
+ return toNetwork; // Store checker never sends network requests itself
+ // Value is ignored when toNetwork == false
+ }
+
+ /**
+ * Returns whether a store check is currently running.
+ *
+ * @return {@code true} if a store checker getter is active, otherwise {@code false}
+ */
+ boolean isStoreCheckRunning() {
+ synchronized (this) {
+ return runningStoreChecker != null;
+ }
+ }
+
+ /**
+ * Cancels any running store checker and unregisters it from the scheduler.
+ *
+ * If no checker is running, the method is a no-op.
+ *
+ * @param context client context used to unregister the checker; must not be null
+ */
+ void cancelStoreChecker(ClientContext context) {
+ USKStoreCheckerGetter checker;
+ synchronized (this) {
+ checker = runningStoreChecker;
+ runningStoreChecker = null;
+ }
+ if (checker != null) {
+ checker.unregister(context, checker.getPriorityClass());
+ }
+ }
+
+ /**
+ * Builds a store checker for the given edition.
+ *
+ * @param ed edition used to select datastore sub-checkers
+ * @return a store checker instance, or {@code null} if no checks are required
+ */
+ private USKStoreChecker buildStoreChecker(long ed) {
+ List This helper merges keys from multiple sources and forwards completion notifications back to
+ * the underlying sub-checkers.
+ */
+ final class USKStoreChecker {
+
+ /** Sub-checkers contributing keys to a query in the datastore. */
+ final USKKeyWatchSet.KeyList.StoreSubChecker[] checkers;
+
+ /**
+ * Creates a store checker from a list of sub-checkers.
+ *
+ * @param c sub-checkers that contribute keys; must not be null
+ */
+ public USKStoreChecker(List This getter is created by {@link USKFetcher} when it wants to cheaply answer the question
- * "does the datastore already contain any likely next editions?" before attempting any network
- * fetch. It exposes a set of candidate {@link Key}s via {@link #listKeys()} and relies on the
- * surrounding request machinery to perform local checks only; it does not select a single key to
- * send, and it does not initiate network traffic itself.
+ * This getter is created by {@link USKStoreCheckCoordinator} when it wants to cheaply answer the
+ * question "does the datastore already contain any likely next editions?" before attempting any
+ * network fetch. It exposes a set of candidate {@link Key}s via {@link #listKeys()} and relies on
+ * the surrounding request machinery to perform local checks only; it does not select a single key
+ * to send, and it does not initiate network traffic itself.
*
* Lifecycle-wise, the instance is intended to be single-shot: {@link #preRegister(ClientContext,
- * boolean)} delegates to {@link USKFetcher#preRegisterStoreChecker(USKStoreCheckerGetter,
- * USKFetcher.USKStoreChecker, ClientContext, boolean)} and then permanently marks the request as
- * done so that subsequent scheduling treats it as canceled. This keeps the store-check wiring
+ * boolean)} delegates to {@link
+ * USKStoreCheckCoordinator#preRegisterStoreChecker(USKStoreCheckerGetter,
+ * USKStoreCheckCoordinator.USKStoreChecker, ClientContext, boolean)} and then permanently marks the
+ * request as done so that later scheduling treats it as canceled. This keeps the store-check wiring
* separate from {@code USKFetcher}'s polling logic, reducing coupling and making the probe behavior
* explicit.
*
* This class does not perform its own synchronization; it assumes the threading model used by
- * the request scheduler and the owning {@link USKFetcher}.
+ * the request scheduler and the owning {@link USKStoreCheckCoordinator}.
*
* The instance delegates most behavior to {@code fetcher} and {@code checker} and is designed
- * to be short-lived: once {@link #preRegister(ClientContext, boolean)} completes, the getter
- * marks itself done so that the scheduler stops considering it for further work.
+ * The instance delegates most behavior to {@code coordinator} and {@code checker} and is
+ * designed to be short-lived: once {@link #preRegister(ClientContext, boolean)} completes, the
+ * getter marks itself done so that the scheduler stops considering it for further work.
*
- * @param fetcher owning {@link USKFetcher} that supplies context and policy.
- * @param parent request owner used for scheduling and real-time flag.
+ * @param coordinator store-check coordinator for lifecycle events.
+ * @param callbacks fetcher-level callbacks used for context and state.
+ * @param owner request the owner used for scheduling and real-time flag.
* @param checker candidate-key provider used for datastore probing decisions.
*/
USKStoreCheckerGetter(
- USKFetcher fetcher, ClientRequester parent, USKFetcher.USKStoreChecker checker) {
- super(parent, parent.realTimeFlag());
- this.fetcher = fetcher;
+ USKStoreCheckCoordinator coordinator,
+ USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks,
+ ClientRequester owner,
+ USKStoreCheckCoordinator.USKStoreChecker checker) {
+ super(owner, owner.realTimeFlag());
+ this.coordinator = coordinator;
+ this.callbacks = callbacks;
+ this.owner = owner;
this.checker = checker;
}
@@ -74,14 +87,14 @@ final class USKStoreCheckerGetter extends SendableGet {
*
* This implementation reuses the context configured on the owning {@link USKFetcher} and
* returns the exact instance stored on the fetcher (no defensive copy). Sharing the context keeps
- * datastore behavior and fetch-policy settings consistent between the probe and any subsequent
- * USK polling actions.
+ * datastore behavior and fetch-policy settings consistent between the probe and any later USK
+ * polling actions.
*
* @return the fetch context to use for store checks, shared with the owning fetcher.
*/
@Override
public FetchContext getContext() {
- return fetcher.ctx;
+ return callbacks.fetcherContext();
}
/**
@@ -120,10 +133,11 @@ public ClientKey getKey(SendableRequestItem token) {
/**
* Lists the candidate datastore keys to probe for likely USK editions.
*
- * The returned set is determined by {@link USKFetcher.USKStoreChecker} and represents the
- * editions that the owning {@link USKFetcher} considers plausible next steps. The scheduler uses
- * this list for local store checking only; this getter never turns these keys into network
- * requests directly. This method returns the array provided by the checker without copying it.
+ * The returned set is determined by {@link USKStoreCheckCoordinator.USKStoreChecker} and
+ * represents the editions that the owning {@link USKFetcher} considers plausible next steps. The
+ * scheduler uses this list for local store checking only; this getter never turns these keys into
+ * network requests directly. This method returns the array provided by the checker without
+ * copying it.
*
* @return an array of candidate {@link Key} instances to probe; may be empty.
*/
@@ -136,8 +150,8 @@ public Key[] listKeys() {
* Handles a failure for this getter.
*
* Failures are treated as non-fatal for the local store-check probe. The higher-level {@link
- * USKFetcher} logic decides how to proceed (for example, whether to attempt a network fetch), so
- * this callback intentionally performs no action.
+ * USKStoreCheckCoordinator} logic decides how to proceed (for example, whether to attempt a
+ * network fetch), so this callback intentionally performs no action.
*
* The parameters are accepted to satisfy the {@link SendableGet} contract but are otherwise
* ignored.
@@ -154,10 +168,11 @@ public void onFailure(LowLevelGetException e, SendableRequestItem token, ClientC
/**
* Registers this getter with the scheduler, delegating the actual work to the owning fetcher.
*
- * This method forwards to {@link USKFetcher#preRegisterStoreChecker(USKStoreCheckerGetter,
- * USKFetcher.USKStoreChecker, ClientContext, boolean)} and then marks the request as done in a
- * {@code finally} block so that {@link #isCancelled()} returns {@code true} afterward. It is
- * intended to run once per instance as part of a single store-check pass.
+ * This method forwards to {@link
+ * USKStoreCheckCoordinator#preRegisterStoreChecker(USKStoreCheckerGetter,
+ * USKStoreCheckCoordinator.USKStoreChecker, ClientContext, boolean)} and then marks the request
+ * as done in a {@code finally} block so that {@link #isCancelled()} returns {@code true}
+ * afterward. It is intended to run once per instance as part of a single store-check pass.
*
* @param context client context used during registration; must not be null.
* @param toNetwork whether the scheduler is attempting a network registration; forwarded as-is.
@@ -166,18 +181,18 @@ public void onFailure(LowLevelGetException e, SendableRequestItem token, ClientC
@Override
public boolean preRegister(ClientContext context, boolean toNetwork) {
try {
- return fetcher.preRegisterStoreChecker(this, checker, context, toNetwork);
+ return coordinator.preRegisterStoreChecker(this, checker, context, toNetwork);
} finally {
done = true;
}
}
/**
- * Selects a key to send based on local-fetching state.
+ * Selects a key to send based on the local-fetching state.
*
* This getter never selects a network-sendable key. It exists only to drive local store
* checking via {@link #listKeys()}, and the input parameters are unused. Returning {@code null}
- * prevents any attempt to schedule a network send for this helper request. As a result, the
+ * prevents any attempt to schedule a network sending for this helper request. As a result, the
* scheduler sees no sendable work from this getter.
*
* @param keys keys currently being fetched locally; ignored by this implementation.
@@ -201,7 +216,7 @@ public SendableRequestItem chooseKey(KeysFetchingLocally keys, ClientContext con
*/
@Override
public long countAllKeys(ClientContext context) {
- return fetcher.countKeys();
+ return callbacks.fetcher().countKeys();
}
/**
@@ -225,14 +240,14 @@ public long countSendableKeys(ClientContext context) {
*
* Although this getter does not perform network I/O, it still participates in the same
* scheduling and accounting paths as other requests. Selecting the client based on the real-time
- * flag of the owning {@code parent} keeps the probe aligned with the rest of the USK polling
- * workflow and ensures it is attributed to the correct request queue.
+ * flag of the owning requester keeps the probe aligned with the rest of the USK polling workflow
+ * and ensures it is attributed to the correct request queue.
*
- * @return the request client matching the parent's real-time scheduling mode.
+ * @return the request client matching the owner's real-time scheduling mode.
*/
@Override
public RequestClient getClient() {
- return parent.realTimeFlag() ? USKManager.rcRT : USKManager.rcBulk;
+ return owner.realTimeFlag() ? USKManager.rcRT : USKManager.rcBulk;
}
/**
@@ -240,14 +255,14 @@ public RequestClient getClient() {
*
* The request machinery uses this link to attribute accounting and cancellation. This getter
* is a helper object and does not represent an independent client request, so it returns the
- * parent requester supplied at construction time. Callers should treat the returned requester as
- * the authoritative owner of this probe.
+ * owner requester supplied at construction time. Callers should treat the returned requester as
+ * the authoritative owner of this probe and its scheduling.
*
- * @return the parent requester that owns this store-check probe.
+ * @return the owner requester that owns this store-check probe.
*/
@Override
public ClientRequester getClientRequest() {
- return parent;
+ return owner;
}
/**
@@ -261,7 +276,7 @@ public ClientRequester getClientRequest() {
*/
@Override
public short getPriorityClass() {
- return fetcher.getPriorityClass();
+ return callbacks.fetcher().getPriorityClass();
}
/**
@@ -276,7 +291,7 @@ public short getPriorityClass() {
*/
@Override
public boolean isCancelled() {
- return done || fetcher.isCancelled();
+ return done || callbacks.isCancelled();
}
/**
@@ -285,7 +300,7 @@ public boolean isCancelled() {
* USK datastore lookups are performed using SSK-derived keys, so this getter always reports
* {@code true} to match the underlying key type expectations of the request machinery. This
* classification can influence request routing, accounting, and key-handling behavior. It has no
- * side effects and does not vary per instance.
+ * side effects and does not vary, per instance.
*
* @return {@code true}, as this getter operates on SSK-derived keys.
*/
@@ -323,6 +338,6 @@ public long getWakeupTime(ClientContext context, long now) {
*/
@Override
protected ClientGetState getClientGetState() {
- return fetcher;
+ return callbacks.fetcher();
}
}
diff --git a/src/main/java/network/crypta/client/async/USKSubscriberRegistry.java b/src/main/java/network/crypta/client/async/USKSubscriberRegistry.java
new file mode 100644
index 0000000000..e9ede283c9
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKSubscriberRegistry.java
@@ -0,0 +1,210 @@
+package network.crypta.client.async;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import network.crypta.keys.USK;
+
+/**
+ * Tracks USK subscribers, edition hints, and polling priority preferences.
+ *
+ * The registry maintains a set of {@link USKCallback} subscribers and their associated edition
+ * hints. It updates the {@link USKKeyWatchSet} with subscriber hints, recalculates polling
+ * priorities through {@link USKPriorityPolicy}, and exposes snapshot views of registered
+ * subscribers. Callers generally use it when adding or removing subscribers from a {@link
+ * USKFetcher} so that the polling attempts remain aligned with the most recent subscriber state.
+ *
+ * The registry is mutable and synchronizes around the subscriber state. It does not synchronize
+ * accesses to the {@link USKKeyWatchSet} or {@link USKManager}; those collaborators are expected to
+ * be thread-safe or externally synchronized. Hint updates and priority changes are applied in a
+ * predictable sequence: update the registry, refresh priorities, and then update watching keys with
+ * the latest hint snapshot.
+ *
+ * The registry holds the dependencies needed to update watch keys and compute polling
+ * priorities. It assumes the {@code attempts} manager and {@code uskManager} remain valid for the
+ * lifetime of the owning fetcher.
+ *
+ * @param watchingKeys watch set updated with subscriber hints; must be non-null
+ * @param uskManager manager used to query latest slot values; must be non-null
+ * @param attempts attempt manager used by the priority policy; must be non-null
+ * @param origUSK base USK that anchors hint and lookup calculations; must be non-null
+ */
+ USKSubscriberRegistry(
+ USKKeyWatchSet watchingKeys, USKManager uskManager, USKAttemptManager attempts, USK origUSK) {
+ this.watchingKeys = watchingKeys;
+ this.uskManager = uskManager;
+ this.priorityPolicy = new USKPriorityPolicy(attempts);
+ this.origUSK = origUSK;
+ }
+
+ /**
+ * Adds a subscriber and updates polling priorities and watch hints.
+ *
+ * The subscriber and its hint are stored, then the priority policy is refreshed using the
+ * provided fetcher callbacks. Finally, the updated hint set is pushed to the watch set so that
+ * future polling attempts can incorporate the new hint values.
+ *
+ * @param cb subscriber callback to register; must be non-null
+ * @param hint edition hint provided by the subscriber
+ * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null
+ * @param fetcherName human-readable fetcher identifier used for debug logging
+ */
+ void addSubscriber(
+ USKCallback cb, long hint, USKFetcherCallback[] fetcherCallbacks, String fetcherName) {
+ Long[] hints;
+ synchronized (this) {
+ subscribers.add(cb);
+ subscriberHints.put(cb, hint);
+ hints = subscriberHints.values().toArray(new Long[0]);
+ }
+ updatePriorities(fetcherCallbacks, fetcherName);
+ watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK));
+ }
+
+ /**
+ * Removes a subscriber and updates polling priorities and watch hints.
+ *
+ * The subscriber and its hint are removed, priorities are refreshed using the provided fetcher
+ * callbacks, and the remaining hint set is propagated to the watch set. The method is safe to
+ * call even if the subscriber was not registered.
+ *
+ * @param cb subscriber callback to remove; must be non-null
+ * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null
+ * @param fetcherName human-readable fetcher identifier used for debug logging
+ */
+ void removeSubscriber(USKCallback cb, USKFetcherCallback[] fetcherCallbacks, String fetcherName) {
+ Long[] hints;
+ synchronized (this) {
+ subscribers.remove(cb);
+ subscriberHints.remove(cb);
+ hints = subscriberHints.values().toArray(new Long[0]);
+ }
+ updatePriorities(fetcherCallbacks, fetcherName);
+ watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK));
+ }
+
+ /**
+ * Removes a subscriber without updating polling priorities.
+ *
+ * This is used when the caller is already managing priority changes elsewhere. The method
+ * still updates the watch set with the remaining hint values.
+ *
+ * @param cb subscriber callback to remove; must be non-null
+ */
+ void removeCallback(USKCallback cb) {
+ Long[] hints;
+ synchronized (this) {
+ subscribers.remove(cb);
+ subscriberHints.remove(cb);
+ hints = subscriberHints.values().toArray(new Long[0]);
+ }
+ watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK));
+ }
+
+ /**
+ * Returns whether any subscribers are registered.
+ *
+ * @return {@code true} if at least one subscriber is present
+ */
+ boolean hasSubscribers() {
+ synchronized (this) {
+ return !subscribers.isEmpty();
+ }
+ }
+
+ /**
+ * Returns whether any fetcher callbacks are present.
+ *
+ * @param fetcherCallbacks fetcher callbacks to evaluate; must not be null
+ * @return {@code true} when the array contains at least one callback
+ */
+ boolean hasCallbacks(USKFetcherCallback[] fetcherCallbacks) {
+ return fetcherCallbacks.length != 0;
+ }
+
+ /**
+ * Refreshes priorities and returns the current progress polling priority.
+ *
+ * The method recalculates polling priorities using the provided fetcher callbacks and then
+ * returns the progress priority, allowing callers to use the updated value immediately.
+ *
+ * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null
+ * @param fetcherName human-readable fetcher identifier used for debug logging
+ * @return the updated progress polling priority class
+ */
+ short refreshAndGetProgressPollPriority(
+ USKFetcherCallback[] fetcherCallbacks, String fetcherName) {
+ updatePriorities(fetcherCallbacks, fetcherName);
+ return progressPriority();
+ }
+
+ /**
+ * Returns the current progress polling priority class.
+ *
+ * @return progress polling priority derived from subscriber preferences
+ */
+ short progressPriority() {
+ return priorityPolicy.progressPriority();
+ }
+
+ /**
+ * Returns the current normal polling priority class.
+ *
+ * @return normal polling priority derived from subscriber preferences
+ */
+ short normalPriority() {
+ return priorityPolicy.normalPriority();
+ }
+
+ /**
+ * Returns a snapshot of registered subscribers.
+ *
+ * @return an array snapshot of subscribers; may be empty but never null
+ */
+ USKCallback[] snapshotSubscribers() {
+ synchronized (this) {
+ return subscribers.toArray(new USKCallback[0]);
+ }
+ }
+
+ /**
+ * Updates polling priorities using the current subscriber snapshot.
+ *
+ * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null
+ * @param fetcherName human-readable fetcher identifier used for debug logging
+ */
+ void updatePriorities(USKFetcherCallback[] fetcherCallbacks, String fetcherName) {
+ USKCallback[] localCallbacks;
+ synchronized (this) {
+ localCallbacks = subscribers.toArray(new USKCallback[0]);
+ }
+ priorityPolicy.updatePriorities(localCallbacks, fetcherCallbacks, fetcherName);
+ }
+}
diff --git a/src/main/java/network/crypta/client/async/USKSuccessPlanner.java b/src/main/java/network/crypta/client/async/USKSuccessPlanner.java
new file mode 100644
index 0000000000..560167922b
--- /dev/null
+++ b/src/main/java/network/crypta/client/async/USKSuccessPlanner.java
@@ -0,0 +1,134 @@
+package network.crypta.client.async;
+
+import java.util.List;
+import network.crypta.keys.ClientSSKBlock;
+
+/**
+ * Builds plan objects for handling successful or discovered USK editions.
+ *
+ * This helper centralizes the decision-making data needed when a polling attempt succeeds or
+ * discovers a newer edition. Callers use it to construct immutable-looking plan objects that carry
+ * flags about whether to decode data, which attempts should be canceled, and whether a store check
+ * should be registered immediately. The planner does not execute any actions itself; it simply
+ * prepares structured data for the owning {@link USKFetcher} or related coordinators.
+ *
+ * The class is stateless and thread-safe, and it may be reused freely across scheduling cycles.
+ * Plan instances are mutable data holders and are typically short-lived, created for a single
+ * scheduling decision, and then discarded.
+ *
+ * The plan records whether to decode data, the current latest edition value, and whether
+ * registration should happen immediately. It also includes any polling attempts that should be
+ * terminated after successful handling is completed.
+ */
+ static final class SuccessPlan {
+ /** Whether the caller should decode the associated data block. */
+ boolean decode;
+
+ /** Current latest edition value after applying the successful result. */
+ long curLatest;
+
+ /** Whether the caller should register follow-up work immediately. */
+ boolean registerNow;
+
+ /** Attempts that should be canceled after the success is processed. */
+ List The plan records whether to decode data, whether a store check should be registered
+ * immediately, and which polling attempts should be terminated after handling the discovery.
+ */
+ static final class FoundPlan {
+ /** Whether the caller should decode the associated data block. */
+ boolean decode;
+
+ /** Attempts that should be canceled after the discovery is processed. */
+ List The returned plan aggregates the caller's decision flags and the list of attempts that
+ * should be terminated after success handling. The method does not validate the inputs; it simply
+ * packages them for downstream consumers.
+ *
+ * @param decode whether the success path should decode the returned data block
+ * @param curLatest latest edition value after applying the successful fetch
+ * @param registerNow whether follow-up registration should occur immediately
+ * @param killAttempts polling attempts to cancel after success handling; may be empty but not
+ * null
+ * @return a success plan populated with the provided values
+ */
+ SuccessPlan createSuccessPlan(
+ boolean decode, long curLatest, boolean registerNow, List The returned plan captures decode and registration choices along with any polling attempts
+ * that should be terminated after the discovery is processed.
+ *
+ * @param decode whether the discovery path should decode the returned data block
+ * @param registerNow whether follow-up registration should occur immediately
+ * @param killAttempts polling attempts to cancel after handling the discovery; may be empty but
+ * not null
+ * @return a found plan populated with the provided values
+ */
+ FoundPlan createFoundPlan(boolean decode, boolean registerNow, List The decision is based on the current latest edition value, the last known edition, and
+ * whether the caller has requested a no-update path without a data block. A {@code null} block is
+ * treated as non-decodable when {@code dontUpdate} is set.
+ *
+ * @param curLatest current latest edition value tracked by the caller
+ * @param lastEd last known edition value to compare against
+ * @param dontUpdate whether the caller is explicitly avoiding updates
+ * @param block decoded block candidate; may be null when only metadata is available
+ * @return {@code true} when the result is eligible for decoding
+ */
+ static boolean shouldDecode(
+ long curLatest, long lastEd, boolean dontUpdate, ClientSSKBlock block) {
+ return curLatest >= lastEd && !(dontUpdate && block == null);
+ }
+}
diff --git a/src/test/java/network/crypta/client/async/USKAttemptManagerTest.java b/src/test/java/network/crypta/client/async/USKAttemptManagerTest.java
new file mode 100644
index 0000000000..ac261dd6f6
--- /dev/null
+++ b/src/test/java/network/crypta/client/async/USKAttemptManagerTest.java
@@ -0,0 +1,617 @@
+package network.crypta.client.async;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyList;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.net.MalformedURLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+import java.util.stream.Collectors;
+import network.crypta.client.ArchiveManager;
+import network.crypta.client.FetchContext;
+import network.crypta.client.FetchContextOptions;
+import network.crypta.client.InsertContext;
+import network.crypta.client.InsertContextOptions;
+import network.crypta.client.events.SimpleEventProducer;
+import network.crypta.client.filter.LinkFilterExceptionProvider;
+import network.crypta.clients.fcp.PersistentRequestRoot;
+import network.crypta.config.Config;
+import network.crypta.crypt.MasterSecret;
+import network.crypta.crypt.RandomSource;
+import network.crypta.keys.ClientSSK;
+import network.crypta.keys.Key;
+import network.crypta.keys.KeyBlock;
+import network.crypta.keys.NodeSSK;
+import network.crypta.keys.USK;
+import network.crypta.node.ClientContextResources;
+import network.crypta.node.RequestClient;
+import network.crypta.support.MemoryLimitedJobRunner;
+import network.crypta.support.PriorityAwareExecutor;
+import network.crypta.support.Ticker;
+import network.crypta.support.api.LockableRandomAccessBufferFactory;
+import network.crypta.support.io.FileRandomAccessBufferFactory;
+import network.crypta.support.io.FilenameGenerator;
+import network.crypta.support.io.PersistentFileTracker;
+import network.crypta.support.io.PersistentTempBucketFactory;
+import network.crypta.support.io.TempBucketFactory;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+@ExtendWith(MockitoExtension.class)
+@SuppressWarnings("java:S100")
+class USKAttemptManagerTest {
+
+ private static final RequestClient TRANSIENT_CLIENT =
+ new RequestClient() {
+ @Override
+ public boolean persistent() {
+ return false;
+ }
+
+ @Override
+ public boolean realTimeFlag() {
+ return false;
+ }
+ };
+
+ private static final class DirectExecutor implements PriorityAwareExecutor {
+ @Override
+ public void execute(Runnable job) {
+ job.run();
+ }
+
+ @Override
+ public void execute(Runnable job, String jobName) {
+ job.run();
+ }
+
+ @Override
+ public void execute(Runnable job, String jobName, boolean fromTicker) {
+ job.run();
+ }
+
+ @Override
+ public int[] waitingThreads() {
+ return new int[0];
+ }
+
+ @Override
+ public int[] runningThreads() {
+ return new int[0];
+ }
+
+ @Override
+ public int getWaitingThreadsCount() {
+ return 0;
+ }
+ }
+
+ private static final class DirectTicker implements Ticker {
+ private final PriorityAwareExecutor executor = new DirectExecutor();
+
+ @Override
+ public void queueTimedJob(Runnable job, long offset) {
+ job.run();
+ }
+
+ @Override
+ public void queueTimedJob(
+ Runnable job, String name, long offset, boolean runOnTickerAnyway, boolean noDupes) {
+ job.run();
+ }
+
+ @Override
+ public PriorityAwareExecutor getExecutor() {
+ return executor;
+ }
+
+ @Override
+ public void removeQueuedJob(Runnable job) {
+ // no-op
+ }
+
+ @Override
+ public void queueTimedJobAbsolute(
+ Runnable runner, String name, long time, boolean runOnTickerAnyway, boolean noDupes) {
+ runner.run();
+ }
+ }
+
+ private static final class TestRequester extends ClientRequester {
+ private final ClientBaseCallback callback;
+ private final network.crypta.keys.FreenetURI uri;
+ private int toNetworkCalls;
+ private boolean cancelled;
+
+ private TestRequester(network.crypta.keys.FreenetURI uri, RequestClient client) {
+ super((short) 1, client);
+ this.uri = uri;
+ this.callback =
+ new ClientBaseCallback() {
+ @Override
+ public void onResume(ClientContext context) {
+ // no-op
+ }
+
+ @Override
+ public RequestClient getRequestClient() {
+ return client;
+ }
+ };
+ }
+
+ @Override
+ public void onTransition(
+ ClientGetState oldState, ClientGetState newState, ClientContext context) {
+ // no-op
+ }
+
+ @Override
+ public void cancel(ClientContext context) {
+ cancelled = true;
+ }
+
+ @Override
+ public network.crypta.keys.FreenetURI getURI() {
+ return uri;
+ }
+
+ @Override
+ public boolean isFinished() {
+ return cancelled;
+ }
+
+ @Override
+ protected void innerNotifyClients(ClientContext context) {
+ // no-op
+ }
+
+ @Override
+ protected void innerToNetwork(ClientContext context) {
+ toNetworkCalls++;
+ }
+
+ @Override
+ protected ClientBaseCallback getCallback() {
+ return callback;
+ }
+
+ int toNetworkCalls() {
+ return toNetworkCalls;
+ }
+ }
+
+ private static FetchContext newFetchContext() {
+ return new FetchContext(
+ FetchContextOptions.builder()
+ .limits(16 * 1024L, 16 * 1024L, 4096)
+ .archiveLimits(1, 0, 0, true)
+ .retryLimits(0, 0, 2)
+ .splitfileLimits(true, 0, 0)
+ .behavior(false, false, false)
+ .clientOptions(new SimpleEventProducer(), true, false)
+ .filterOverrides(null, null, null)
+ .build());
+ }
+
+ private static InsertContext newInsertContext() {
+ return new InsertContext(
+ InsertContextOptions.builder()
+ .retryLimits(0, 0)
+ .splitfileSegmentLimits(0, 0)
+ .clientOptions(new SimpleEventProducer(), true, false, false)
+ .compressorDescriptor(null)
+ .redundancy(0, 0)
+ .compatibility(InsertContext.CompatibilityMode.COMPAT_CURRENT)
+ .build());
+ }
+
+ private static ClientContext minimalContext(USKManager uskManager, RandomSource randomSource) {
+ return new ClientContext(
+ 1L,
+ new ClientContextRuntime(
+ mock(ClientLayerPersister.class),
+ new DirectExecutor(),
+ mock(MemoryLimitedJobRunner.class),
+ new DirectTicker(),
+ randomSource,
+ new Random(123),
+ mock(MasterSecret.class)),
+ new ClientContextStorageFactories(
+ mock(PersistentTempBucketFactory.class),
+ mock(TempBucketFactory.class),
+ mock(PersistentFileTracker.class),
+ mock(FilenameGenerator.class),
+ mock(FilenameGenerator.class),
+ mock(FileRandomAccessBufferFactory.class),
+ mock(FileRandomAccessBufferFactory.class)),
+ new ClientContextRafFactories(
+ mock(LockableRandomAccessBufferFactory.class),
+ mock(LockableRandomAccessBufferFactory.class)),
+ new ClientContextServices(
+ new ClientContextResources(mock(ArchiveManager.class), mock(HealingQueue.class)),
+ uskManager,
+ mock(network.crypta.support.compress.RealCompressor.class),
+ mock(DatastoreChecker.class),
+ mock(PersistentRequestRoot.class),
+ mock(LinkFilterExceptionProvider.class)),
+ new ClientContextDefaults(newFetchContext(), newInsertContext(), mock(Config.class)));
+ }
+
+ private static USK newUSK() throws MalformedURLException {
+ byte[] pubKeyHash = new byte[NodeSSK.PUBKEY_HASH_SIZE];
+ byte[] cryptoKey = new byte[ClientSSK.CRYPTO_KEY_LENGTH];
+ byte[] extras =
+ new byte[] {
+ NodeSSK.SSK_VERSION, 0, Key.ALGO_AES_PCFB_256_SHA256, 0, (byte) KeyBlock.HASH_SHA256
+ };
+ return new USK(pubKeyHash, cryptoKey, extras, "site", 0L);
+ }
+
+ private static USKKeyWatchSet.Lookup lookup(USK usk, long edition, boolean ignoreStore) {
+ USKKeyWatchSet.Lookup lookup = new USKKeyWatchSet.Lookup();
+ lookup.val = edition;
+ lookup.key = usk.getSSK(edition);
+ lookup.ignoreStore = ignoreStore;
+ lookup.label = "test";
+ return lookup;
+ }
+
+ private static USKAttemptManager newManager(
+ USKAttemptCallbacks callbacks,
+ USKManager uskManager,
+ USKKeyWatchSet watchingKeys,
+ boolean checkStoreOnly,
+ boolean keepLastData,
+ USK usk,
+ ClientRequester parent) {
+ USKAttemptContext attemptContext =
+ new USKAttemptContext(callbacks, usk, newFetchContext(), newFetchContext(), parent, false);
+ return new USKAttemptManager(
+ attemptContext, uskManager, watchingKeys, checkStoreOnly, keepLastData);
+ }
+
+ @Test
+ void cancelBefore_whenRunningAndPollingBeforeCutoff_removesAndReturns() throws Exception {
+ USKManager uskManager = mock(USKManager.class);
+ USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class);
+ USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class);
+ USK usk = newUSK();
+ TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT);
+ ClientContext context = minimalContext(uskManager, mock(RandomSource.class));
+
+ USKKeyWatchSet.Lookup runningOld = lookup(usk, 1L, false);
+ USKKeyWatchSet.Lookup runningNew = lookup(usk, 4L, false);
+ USKKeyWatchSet.Lookup pollingOld = lookup(usk, 2L, true);
+ USKKeyWatchSet.Lookup pollingNew = lookup(usk, 5L, true);
+ USKKeyWatchSet.ToFetch plan =
+ new USKKeyWatchSet.ToFetch(
+ Arrays.asList(runningOld, runningNew), Arrays.asList(pollingOld, pollingNew));
+
+ when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false);
+ when(watchingKeys.getEditionsToFetch(
+ anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean()))
+ .thenReturn(plan);
+
+ USKAttemptManager manager =
+ newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent);
+
+ manager.addNewAttempts(0L, context, true);
+ manager.clearAttemptsToStart();
+
+ List
+ *
+ */
+final class USKCompletionCoordinator {
+ /** Logger for callback completion errors. */
+ private static final Logger LOG = LoggerFactory.getLogger(USKCompletionCoordinator.class);
+
+ /** Completion handler that performs decoding and retained-data management. */
+ private final USKCompletionHandler completionHandler;
+
+ /** Manager used to unsubscribe and record completion. */
+ private final USKManager uskManager;
+
+ /** Base USK used for slot lookups and found-edition callbacks. */
+ private final USK origUSK;
+
+ /** Requester used for decoding and scheduling context. */
+ private final ClientRequester parent;
+
+ /** Whether cleanup should use real-time scheduling queues. */
+ private final boolean realTimeFlag;
+
+ /**
+ * Creates a completion coordinator for a USK fetcher.
+ *
+ *
+ *
+ *
+ * @see USKCompletionCoordinator
+ * @see USKFetcher
+ */
+final class USKCompletionHandler {
+ private static final Logger LOG = LoggerFactory.getLogger(USKCompletionHandler.class);
+
+ /** Last successfully fetched data bucket, retained when keepLastData is enabled. */
+ private Bucket lastRequestData;
+
+ /** Compression codec used for the last fetched data payload. */
+ private short lastCompressionCodec;
+
+ /** Whether the last fetched block represented metadata rather than raw data. */
+ private boolean lastWasMetadata;
+
+ private final boolean keepLastData;
+
+ /**
+ * Creates a handler that may optionally retain the most recently decoded payload.
+ *
+ *
- *
*
- * {@code
+ * // Example: register a callback before scheduling
+ * fetcher.addCallback(callback);
+ * }
*/
@SuppressWarnings("UnusedReturnValue")
public boolean addCallback(USKFetcherCallback cb) {
@@ -147,259 +141,32 @@ public boolean addCallback(USKFetcherCallback cb) {
// DBR (date-hint) fetching is handled by USKDateHintFetches.
- /**
- * Tracks a single edition probe, including its checker state and polling metadata.
- *
- * {@code
+ * // Example: schedule immediately after construction
+ * fetcher.schedule(context);
+ * }
+ *
* @param context client context that provides schedulers, timing, and factories required to run
- * the discovery loop; must not be {@code null}
+ * the discovery loop; must be non-null
*/
@Override
public void schedule(ClientContext context) {
@@ -1256,7 +714,12 @@ public void schedule(ClientContext context) {
uskManager.subscribe(origUSK, this, false, parent.getClient());
boolean startedDBRs = dbrHintFetches.maybeStart(context);
long lookedUp = uskManager.lookupLatestSlot(origUSK);
- SchedulePlan plan = buildSchedulePlan(lookedUp, startedDBRs, context);
+ if (shouldAbortSchedule()) return;
+ USKSchedulingCoordinator.SchedulePlan plan = buildSchedulePlan(lookedUp, startedDBRs, context);
+ if (plan == null) return;
+ synchronized (this) {
+ plan.bye = cancelled || completed;
+ }
if (plan.registerNow) registerAttempts(context);
else if (plan.completeCheckingStore) {
this.finishSuccess(context);
@@ -1280,71 +743,30 @@ private boolean shouldAbortSchedule() {
}
}
+ private USKFetcherCallback[] snapshotCallbacks() {
+ synchronized (this) {
+ return callbacks.toArray(new USKFetcherCallback[0]);
+ }
+ }
+
/**
* Builds a plan describing how to proceed with scheduling for this round.
*
*
+ *
+ */
+final class USKKeyWatchSet {
+ /** Default number of edition slots probed per lookup window. */
+ static final int WATCH_KEYS = 50;
+
+ /** Logger for watch-set diagnostics and trace output. */
+ private static final Logger LOG = LoggerFactory.getLogger(USKKeyWatchSet.class);
+
+ /** USK, whose editions are being monitored and expanded into SSK lookups. */
+ private final USK origUSK;
+
+ /** Minimum number of failed edition probes to schedule beyond {@code lookedUp}. */
+ private final int origMinFailures;
+
+ /** Whether new lookups should be scheduled as background polls instead of immediate fetches. */
+ private final boolean backgroundPoll;
+
+ // Common for the whole USK
+ /** Public key hash for the USK namespace being tracked. */
+ private final byte[] pubKeyHash;
+
+ /** Crypto algorithm identifier for derived SSKs. */
+ private final byte cryptoAlgorithm;
+
+ // List of slots since the USKManager's current last known good edition.
+ /** Key list anchored at the last known good slot. */
+ private final KeyList fromLastKnownSlot;
+
+ /** Per-subscriber key lists keyed by the hinted edition. */
+ private final TreeMap
+ *
+ */
+final class USKPollingRound {
+ /** Logger for debugging and lifecycle diagnostics. */
+ private static final Logger LOG = LoggerFactory.getLogger(USKPollingRound.class);
+
+ /** Coordinates and tracks in-flight polling attempts. */
+ private final USKAttemptManager attempts;
+
+ /** Runs datastore check cycles before scheduling attempts. */
+ private final USKStoreCheckCoordinator storeChecks;
+
+ /** Tracks date-based hint fetches that gate polling completion. */
+ private final USKDateHintFetches dbrHintFetches;
+
+ /** Provides a stable snapshot of subscribed callbacks. */
+ private final USKSubscriberRegistry subscribers;
+
+ /** Manager used to query the latest known slots. */
+ private final USKManager uskManager;
+
+ /** Base USK that is being polled by this round. */
+ private final USK origUSK;
+
+ /** Indicates whether scheduling is biased for real-time activity. */
+ private final boolean realTimeFlag;
+
+ /** Baseline sleep duration restored when progress is detected, in milliseconds. */
+ private final long origSleepTime;
+
+ /** Maximum sleep duration allowed during backoff, in milliseconds. */
+ private final long maxSleepTime;
+
+ /** Current sleep duration used for the next backoff interval, in milliseconds. */
+ private long sleepTime;
+
+ /** Tracks whether the round has completed its initial loop. */
+ private boolean firstLoop;
+
+ /**
+ * Creates a polling round helper for a single fetcher cycle.
+ *
+ *
+ *
+ */
+final class USKPriorityPolicy {
+ /** Logger for priority updates and trace diagnostics. */
+ private static final Logger LOG = LoggerFactory.getLogger(USKPriorityPolicy.class);
+
+ /** Default polling priority for normal background checks. */
+ private static final short DEFAULT_NORMAL_POLL_PRIORITY = RequestStarter.PREFETCH_PRIORITY_CLASS;
+
+ /** Default polling priority for progress-oriented checks. */
+ private static final short DEFAULT_PROGRESS_POLL_PRIORITY = RequestStarter.UPDATE_PRIORITY_CLASS;
+
+ /** Current polling priority for normal background checks. */
+ private short normalPollPriority = DEFAULT_NORMAL_POLL_PRIORITY;
+
+ /** Current polling priority for progress-oriented checks. */
+ private short progressPollPriority = DEFAULT_PROGRESS_POLL_PRIORITY;
+
+ /** Attempt manager that consumes polling priorities. */
+ private final USKAttemptManager attempts;
+
+ /**
+ * Creates a priority policy bound to a specific attempt manager.
+ *
+ *
+ *
+ */
+final class USKSchedulingCoordinator {
+ /** Attempt manager used to schedule or inspect polling attempts. */
+ private final USKAttemptManager attempts;
+
+ /** Coordinator responsible for datastore store checks. */
+ private final USKStoreCheckCoordinator storeChecks;
+
+ /** DBR hint fetch tracker used to decide when to defer scheduling. */
+ private final USKDateHintFetches dbrHintFetches;
+
+ /** Whether the owning fetcher should operate in store-only mode. */
+ private final boolean checkStoreOnly;
+
+ /** Latest value captured when a scheduling cycle was built. */
+ private long valueAtSchedule;
+
+ /** Tracks whether the coordinator has started at least one scheduling cycle. */
+ private boolean started;
+
+ /** Tracks whether scheduling must wait until DBR hint fetches finish. */
+ private boolean scheduleAfterDBRsDone;
+
+ /**
+ * Creates a scheduling coordinator for a USK polling round.
+ *
+ *
+ *
+ */
+final class USKStoreCheckCoordinator {
+ /** Logger for store-check lifecycle events and diagnostics. */
+ private static final Logger LOG = LoggerFactory.getLogger(USKStoreCheckCoordinator.class);
+
+ /** Active store checker getter, or {@code null} when no store scan is running. */
+ private USKStoreCheckerGetter runningStoreChecker;
+
+ /** Watched key set used to derive datastore checks. */
+ private final USKKeyWatchSet watchingKeys;
+
+ /** Attempt manager used to schedule polling attempts after store checks. */
+ private final USKAttemptManager attempts;
+
+ /** Parent requester used for scheduling and network accounting. */
+ private final ClientRequester parent;
+
+ /** Whether this coordinator should avoid network fetches and only check the store. */
+ private final boolean checkStoreOnly;
+
+ /** USK manager used to query the latest known slot. */
+ private final USKManager uskManager;
+
+ /** Base USK being checked for datastore availability. */
+ private final USK origUSK;
+
+ /** Callback interface used to bridge to the owning fetcher. */
+ private final USKStoreCheckCallbacks callbacks;
+
+ /** Whether store checks should run with real-time scheduling bias. */
+ private final boolean realTimeFlag;
+
+ /**
+ * Parameters used to configure {@link USKStoreCheckCoordinator}.
+ *
+ *
*
*
- * @see USKFetcher
- * @see USKFetcher.USKStoreChecker
+ * @see USKStoreCheckCoordinator
+ * @see USKStoreCheckCoordinator.USKStoreChecker
*/
final class USKStoreCheckerGetter extends SendableGet {
- /** Owning fetcher that provides context, policy, and accounting for this probe. */
- private final transient USKFetcher fetcher;
+ /** Coordinator for store-check lifecycle and callbacks. */
+ private final transient USKStoreCheckCoordinator coordinator;
- /** Candidate-key provider used to enumerate likely USK edition datastore keys. */
- private final transient USKFetcher.USKStoreChecker checker;
+ /** Callbacks for fetcher-level state needed by the store check. */
+ private final transient USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks;
+
+ /** Candidate-key provider used to list likely USK edition datastore keys. */
+ private final transient USKStoreCheckCoordinator.USKStoreChecker checker;
+
+ /** Request the owner supplied at construction and passed to the superclass. */
+ private final ClientRequester owner;
/**
* Tracks whether {@link #preRegister(ClientContext, boolean)} has run and this request is
@@ -54,18 +61,24 @@ final class USKStoreCheckerGetter extends SendableGet {
/**
* Creates a new local-only store-check getter for a single USK polling pass.
*
- *
+ *
+ */
+final class USKSubscriberRegistry {
+ /** Live subscriber set used for callback updates and snapshots. */
+ private final HashSet
+ *
+ */
+final class USKSuccessPlanner {
+ /** Creates a stateless planner instance. */
+ USKSuccessPlanner() {}
+
+ /**
+ * Plan describing how to handle a successful fetch.
+ *
+ *