diff --git a/src/main/java/network/crypta/client/async/USKAttempt.java b/src/main/java/network/crypta/client/async/USKAttempt.java new file mode 100644 index 0000000000..34fdf1e06b --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttempt.java @@ -0,0 +1,242 @@ +package network.crypta.client.async; + +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.USK; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tracks a single edition probe, including its checker state and polling metadata. + * + *

Each attempt owns a {@link USKChecker} that performs the actual request and reports completion + * through {@link USKCheckerCallback}. The attempt records whether it has succeeded, failed (DNF), + * or been canceled, and it exposes scheduling hooks used by the owning fetcher. The attempt also + * tracks whether it has ever entered finite cooldown so that polling rounds can determine when a + * round is finished for now. + * + *

The class is mutable and relies on synchronization for checker state updates. Callers usually + * treat each attempt as part of a larger scheduling loop, invoking {@link #schedule(ClientContext)} + * and reacting to callbacks from the checker. Instances are short-lived and are replaced as polling + * rounds advance. + * + *

+ */ +public final class USKAttempt implements USKCheckerCallback { + /** Logger for attempt scheduling diagnostics. */ + private static final Logger LOG = LoggerFactory.getLogger(USKAttempt.class); + + /** Literal used in attempt descriptions to keep log formatting consistent. */ + private static final String FOR_LITERAL = " for "; + + /** Edition number. */ + long number; + + /** Attempt to fetch that edition number (or null if the fetch has finished). */ + USKChecker checker; + + /** Successful fetch? */ + boolean succeeded; + + /** DNF? */ + boolean dnf; + + /** Whether this attempt has been explicitly canceled. */ + boolean cancelled; + + /** The lookup descriptor associated with this attempt. */ + final USKKeyWatchSet.Lookup lookup; + + /** Whether this attempt is a long-lived polling attempt. */ + final boolean forever; + + /** Whether this attempt has ever entered finite cooldown. */ + private boolean everInCooldown; + + /** Whether cancellation has already been reported to callbacks. */ + private boolean cancelNotified; + + /** Callback target for attempt lifecycle events. */ + private final USKAttemptCallbacks callbacks; + + /** Base USK used for logging and manager lookups. */ + private final USK origUSK; + + /** Parent requester that supplies priority and scheduling policy. */ + private final ClientRequester parent; + + /** + * Creates a new attempt for the provided lookup descriptor. + * + *

The constructor wires the checker used to probe the target edition and initializes the + * attempt state for scheduling. When {@code forever} is {@code true}, the checker is created for + * a long-lived polling attempt; otherwise it represents a one-off probe that will retire after + * completion. + * + * @param attemptContext shared configuration for attempt construction + * @param lookup descriptor containing edition and key information + * @param forever {@code true} to create a polling attempt; {@code false} for a one-off probe + */ + USKAttempt(USKAttemptContext attemptContext, USKKeyWatchSet.Lookup lookup, boolean forever) { + this.callbacks = attemptContext.callbacks(); + this.origUSK = attemptContext.origUSK(); + this.parent = attemptContext.parent(); + this.lookup = lookup; + this.number = lookup.val; + this.succeeded = false; + this.dnf = false; + this.forever = forever; + this.checker = + new USKChecker( + this, + lookup.key, + forever ? -1 : attemptContext.ctx().maxUSKRetries, + lookup.ignoreStore ? attemptContext.ctxNoStore() : attemptContext.ctx(), + attemptContext.parent(), + attemptContext.realTimeFlag()); + } + + @Override + public void onDNF(ClientContext context) { + synchronized (this) { + checker = null; + dnf = true; + } + callbacks.onDNF(this, context); + } + + @Override + public void onSuccess(ClientSSKBlock block, ClientContext context) { + synchronized (this) { + checker = null; + succeeded = true; + } + callbacks.onSuccess(this, false, block, context); + } + + @Override + public void onFatalAuthorError(ClientContext context) { + synchronized (this) { + checker = null; + } + // Counts as success except it doesn't update + callbacks.onSuccess(this, true, null, context); + } + + @Override + public void onNetworkError(ClientContext context) { + synchronized (this) { + checker = null; + } + // Treat network error as DNF for scheduling purposes + callbacks.onDNF(this, context); + } + + @Override + public void onCancelled(ClientContext context) { + synchronized (this) { + checker = null; + if (cancelNotified) return; + cancelNotified = true; + } + callbacks.onCancelled(this, context); + } + + /** + * Cancels this attempt and propagates cancellation to the checker if present. + * + * @param context client context used to cancel scheduling; must not be null + */ + public void cancel(ClientContext context) { + cancelled = true; + USKChecker c; + synchronized (this) { + c = checker; + } + if (c != null) { + c.cancel(context); + } + onCancelled(context); + } + + /** + * Schedules this attempt with its checker if still active. + * + * @param context client context used to schedule the checker; must not be null + */ + public void schedule(ClientContext context) { + USKChecker c; + synchronized (this) { + c = checker; + } + if (c == null) { + if (LOG.isDebugEnabled()) LOG.debug("Checker == null in schedule() for {}", this); + } else { + assert (!c.persistent()); + c.schedule(context); + } + } + + @Override + public String toString() { + return "USKAttempt for " + + number + + FOR_LITERAL + + origUSK.getURI() + + (forever ? " (forever)" : ""); + } + + @Override + public short getPriority() { + if (callbacks.isBackgroundPoll()) { + synchronized (this) { + if (forever) { + if (!everInCooldown) { + // Boost the priority initially, so that finding the first edition takes precedence + // over ongoing polling after we're fairly sure we're not going to find anything. + // The ongoing polling keeps the ULPRs up to date so that we will get told quickly, + // but if we are overloaded, we won't be able to keep up regardless. + return callbacks.getProgressPollPriority(); + } else { + return callbacks.getNormalPollPriority(); + } + } else { + // If !forever, this is a random-probe. + // It's not that important. + return callbacks.getNormalPollPriority(); + } + } + } + return parent.getPriorityClass(); + } + + @Override + public void onEnterFiniteCooldown(ClientContext context) { + synchronized (this) { + everInCooldown = true; + } + callbacks.onEnterFiniteCooldown(context); + } + + /** + * Reports whether this attempt has ever entered a finite cooldown. + * + * @return {@code true} if the attempt has cooled down at least once + */ + public synchronized boolean everInCooldown() { + return everInCooldown; + } + + /** Refreshes cached poll parameters on the underlying checker, if active. */ + public void reloadPollParameters() { + USKChecker c; + synchronized (this) { + c = checker; + } + if (c == null) return; + c.onChangedFetchContext(); + } +} diff --git a/src/main/java/network/crypta/client/async/USKAttemptCallbacks.java b/src/main/java/network/crypta/client/async/USKAttemptCallbacks.java new file mode 100644 index 0000000000..6a5fa40f93 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttemptCallbacks.java @@ -0,0 +1,101 @@ +package network.crypta.client.async; + +import java.util.Random; +import network.crypta.keys.ClientSSKBlock; + +/** + * Callback interface for {@link USKAttempt} lifecycle events. + * + *

Implementations receive completion and scheduling signals from polling attempts. These hooks + * allow the owning fetcher to react to success, DNF, cancellation, and cooldown transitions while + * providing priority information used by the scheduler. The callbacks are intentionally minimal and + * are expected to be fast, as they are invoked on scheduling or network threads. + * + *

The interface is stateful in the sense that implementations can depend on the owning fetcher + * state, but callers should treat each method as a synchronous notification. No concurrency + * guarantees are enforced beyond what the caller provides, so implementations should provide their + * own synchronization if they mutate a shared state. + * + *

+ */ +interface USKAttemptCallbacks { + /** + * Notifies that an attempt resulted in a DNF outcome. + * + *

Implementations may record the failure, reschedule work, or update the UI state. The attempt + * is already marked as complete when this callback runs. + * + * @param attempt attempt that reported the DNF result; never null + * @param context client context associated with the attempt; must not be null + */ + void onDNF(USKAttempt attempt, ClientContext context); + + /** + * Notifies that an attempt succeeded. + * + *

The callback receives the decoded block if available and a flag indicating that the success + * should not update internal edition tracking. Implementations typically decide whether to decode + * or propagate data based on these inputs. + * + * @param attempt attempt that reported success; never null + * @param dontUpdate whether the success should avoid updating edition tracking + * @param block decoded block returned by the attempt; may be null + * @param context client context associated with the attempt; must not be null + */ + void onSuccess( + USKAttempt attempt, boolean dontUpdate, ClientSSKBlock block, ClientContext context); + + /** + * Notifies that an attempt was canceled. + * + *

This callback is invoked after the attempt has been marked canceled and any checker has been + * shut down. + * + * @param attempt attempt that was canceled; never null + * @param context client context associated with the attempt; must not be null + */ + void onCancelled(USKAttempt attempt, ClientContext context); + + /** + * Notifies that an attempt entered a finite cooldown period. + * + *

This signal is used to determine when a polling round can be treated as finished for now. + * + * @param context client context associated with the attempt; must not be null + */ + void onEnterFiniteCooldown(ClientContext context); + + /** + * Indicates whether the owning fetcher is running background polling. + * + * @return {@code true} when background polling is active + */ + boolean isBackgroundPoll(); + + /** + * Returns the polling priority used while making progress on a round. + * + * @return priority class for progress-oriented polling + */ + short getProgressPollPriority(); + + /** + * Returns the polling priority used during steady-state background polling. + * + * @return priority class for normal background polling + */ + short getNormalPollPriority(); + + /** + * Determines whether random editions should be added during polling. + * + * @param random random source used to sample candidates; must not be null + * @param firstLoop whether the round is in its initial loop + * @return {@code true} to schedule random editions, otherwise {@code false} + */ + boolean shouldAddRandomEditions(Random random, boolean firstLoop); +} diff --git a/src/main/java/network/crypta/client/async/USKAttemptContext.java b/src/main/java/network/crypta/client/async/USKAttemptContext.java new file mode 100644 index 0000000000..771efc6b91 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttemptContext.java @@ -0,0 +1,25 @@ +package network.crypta.client.async; + +import network.crypta.client.FetchContext; +import network.crypta.keys.USK; + +/** + * Shared configuration for creating {@link USKAttempt} instances. + * + *

This bundles the stable dependencies required to spawn attempt checkers so callers can reuse a + * single parameter object when scheduling multiple attempts. + * + * @param callbacks owning callback handler for lifecycle events + * @param origUSK base USK used for logging + * @param ctx base fetch context for scheduling + * @param ctxNoStore no-store fetch context for probes that bypass the store + * @param parent parent requester providing scheduling policy + * @param realTimeFlag whether to use real-time scheduling for the checker + */ +record USKAttemptContext( + USKAttemptCallbacks callbacks, + USK origUSK, + FetchContext ctx, + FetchContext ctxNoStore, + ClientRequester parent, + boolean realTimeFlag) {} diff --git a/src/main/java/network/crypta/client/async/USKAttemptManager.java b/src/main/java/network/crypta/client/async/USKAttemptManager.java new file mode 100644 index 0000000000..a898bb3c26 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKAttemptManager.java @@ -0,0 +1,484 @@ +package network.crypta.client.async; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Manages USK attempt lifecycle, staging, and scheduling. + * + *

This helper owns the attempt maps and the mechanics for adding, cancelling, and registering + * probe attempts. It delegates scheduling callbacks to the owning {@link USKFetcher} through the + * {@link USKAttemptCallbacks} interface. The manager tracks both short-lived random-probe attempts + * and long-lived polling attempts, ensuring that duplicate editions are not scheduled twice. It + * also coordinates the transition from datastore checks to network scheduling. + * + *

The class is mutable and synchronizes access to attempt collections. Callers typically invoke + * it from scheduler threads and should avoid holding external locks to prevent deadlocks. It + * prefers deterministic, ordered behavior by using {@link TreeMap} for edition-keyed attempts and + * by snapshotting collections before scheduling network work. + * + *

+ */ +final class USKAttemptManager { + /** Logger for attempt scheduling diagnostics. */ + private static final Logger LOG = LoggerFactory.getLogger(USKAttemptManager.class); + + /** Literal used in attempt descriptions to keep log formatting consistent. */ + private static final String FOR_LITERAL = " for "; + + /** Attempt context shared across all created attempts. */ + private final USKAttemptContext attemptContext; + + /** Manager used to resolve the latest known slot for comparisons. */ + private final USKManager uskManager; + + /** Watch set used to plan which editions should be probed. */ + private final USKKeyWatchSet watchingKeys; + + /** Whether attempts should be suppressed because this is a store-only mode. */ + private final boolean checkStoreOnly; + + /** Whether the fetcher should keep the last data when probing newer editions. */ + private final boolean keepLastData; + + /** Attempts staged for immediate scheduling on the next registration cycle. */ + private final ArrayList attemptsToStart = new ArrayList<>(); + + /** Active random-probe attempts keyed by edition number. */ + private final TreeMap runningAttempts = new TreeMap<>(); + + /** Polling attempts keyed by edition number for background tracking. */ + private final TreeMap pollingAttempts = new TreeMap<>(); + + /** + * Creates a manager for USK attempts. + * + *

The manager holds the context and collaborators required to build and schedule attempts. It + * assumes the provided dependencies remain valid for the lifetime of the owning fetcher. + * + * @param attemptContext shared configuration used for new attempt construction; must be non-null + * @param uskManager manager used to query the latest slots; must be non-null + * @param watchingKeys watch set used to plan fetch and poll editions; must be non-null + * @param checkStoreOnly whether to suppress network attempts and only check the store + * @param keepLastData whether to retain the last data when scheduling new attempts + */ + USKAttemptManager( + USKAttemptContext attemptContext, + USKManager uskManager, + USKKeyWatchSet watchingKeys, + boolean checkStoreOnly, + boolean keepLastData) { + this.attemptContext = attemptContext; + this.uskManager = uskManager; + this.watchingKeys = watchingKeys; + this.checkStoreOnly = checkStoreOnly; + this.keepLastData = keepLastData; + } + + /** + * Cancels attempts for editions older than the current latest value. + * + *

The method removes attempts from the internal maps and returns a list of attempts that + * should be canceled by the caller. It does not perform cancellation itself so that callers can + * decide when to propagate the cancellation on their own thread. + * + * @param curLatest latest edition value used as a cutoff for cancellation + * @return list of attempts to cancel, or {@code null} when none were removed + */ + List cancelBefore(long curLatest) { + List v = null; + int count = 0; + synchronized (this) { + for (Iterator i = runningAttempts.values().iterator(); i.hasNext(); ) { + USKAttempt att = i.next(); + if (att.number < curLatest) { + if (v == null) v = new ArrayList<>(runningAttempts.size() - count); + v.add(att); + i.remove(); + } + count++; + } + for (Iterator> i = pollingAttempts.entrySet().iterator(); + i.hasNext(); ) { + Map.Entry entry = i.next(); + if (entry.getKey() < curLatest) { + if (v == null) v = new ArrayList<>(Math.max(1, pollingAttempts.size() - count)); + v.add(entry.getValue()); + i.remove(); + } else break; // TreeMap is ordered. + } + } + return v; + } + + /** + * Cancels the provided attempts by invoking {@link USKAttempt#cancel(ClientContext)}. + * + * @param attempts attempts returned by {@link #cancelBefore(long)}; may be null + * @param context client context used for cancellation; must not be null + */ + void finishCancelBefore(List attempts, ClientContext context) { + if (attempts == null) return; + for (USKAttempt att : attempts) { + att.cancel(context); + } + } + + /** + * Plans and stages new attempts for the next scheduling cycle. + * + *

The method consults the watch set to determine which editions should be polled or fetched + * and stages the resulting attempts in {@link #attemptsToStart}. Duplicate editions are filtered + * out, and no attempts are created when running in store-only mode. + * + * @param curLatest latest edition value used to seed scheduling decisions + * @param context client context providing randomness and scheduling information + * @param firstLoop whether this is the first scheduling loop in the round + */ + void addNewAttempts(long curLatest, ClientContext context, boolean firstLoop) { + USKKeyWatchSet.ToFetch list = + watchingKeys.getEditionsToFetch( + curLatest, + context.random, + getRunningFetchEditions(), + shouldAddRandomEditions(context, firstLoop), + firstLoop); + USKKeyWatchSet.Lookup[] toPoll = list.poll; + USKKeyWatchSet.Lookup[] toFetch = list.fetch; + synchronized (this) { + for (USKKeyWatchSet.Lookup lookup : toPoll) { + if (LOG.isTraceEnabled()) LOG.trace("Polling {} for {}", lookup, attemptContext.origUSK()); + USKAttempt attempt = add(lookup, true); + if (attempt != null) attemptsToStart.add(attempt); + } + for (USKKeyWatchSet.Lookup lookup : toFetch) { + if (LOG.isDebugEnabled()) + LOG.debug("Adding checker for edition {} for {}", lookup, attemptContext.origUSK()); + USKAttempt attempt = add(lookup, false); + if (attempt != null) attemptsToStart.add(attempt); + } + } + } + + /** + * Returns whether random editions should be added during scheduling. + * + * @param context client context providing randomness for selection + * @param firstLoop whether this is the first scheduling loop in the round + * @return {@code true} if random editions should be added, otherwise {@code false} + */ + boolean shouldAddRandomEditions(ClientContext context, boolean firstLoop) { + return attemptContext.callbacks().shouldAddRandomEditions(context.random, firstLoop); + } + + /** + * Adds a new attempt for the given lookup descriptor. + * + *

This method enforces duplicate checks and stores the attempt in the appropriate map based on + * whether it is a polling attempt. It returns {@code null} when the attempt is suppressed (for + * example, in store-only mode or when a duplicate is detected). + * + * @param lookup descriptor containing the edition to probe + * @param forever whether the attempt should be treated as a polling attempt + * @return the created attempt, or {@code null} when no attempt was added + */ + private synchronized USKAttempt add(USKKeyWatchSet.Lookup lookup, boolean forever) { + long edition = lookup.val; + if (lookup.val < 0) + throw new IllegalArgumentException( + "Can't check <0" + FOR_LITERAL + lookup.val + " on " + attemptContext.origUSK()); + if (checkStoreOnly) return null; + if (LOG.isDebugEnabled()) + LOG.debug("Adding USKAttempt for {} for {}", edition, attemptContext.origUSK()); + if (isDuplicateAttempt(forever, edition)) return null; + USKAttempt attempt = new USKAttempt(attemptContext, lookup, forever); + if (forever) pollingAttempts.put(edition, attempt); + else { + runningAttempts.put(edition, attempt); + } + if (LOG.isDebugEnabled()) LOG.debug("Added {} for {}", attempt, attemptContext.origUSK()); + return attempt; + } + + /** + * Returns whether an attempt already exists for the given edition. + * + * @param forever whether the attempt is a polling attempt + * @param edition edition number to check for duplicates + * @return {@code true} if a duplicate attempt is already present + */ + private synchronized boolean isDuplicateAttempt(boolean forever, long edition) { + if (forever) { + if (pollingAttempts.containsKey(edition)) { + if (LOG.isDebugEnabled()) + LOG.debug("Already polling edition: {} for {}", edition, attemptContext.origUSK()); + return true; + } + } else { + if (runningAttempts.containsKey(edition)) { + if (LOG.isDebugEnabled()) + LOG.debug("Returning because already running for {}", attemptContext.origUSK().getURI()); + + return true; + } + } + return false; + } + + /** + * Returns whether any random-probe attempts are running. + * + * @return {@code true} if there are active running attempts + */ + synchronized boolean hasRunningAttempts() { + return !runningAttempts.isEmpty(); + } + + /** + * Returns whether any polling attempts are registered. + * + * @return {@code true} if no polling attempts are registered + */ + synchronized boolean hasNoPollingAttempts() { + return pollingAttempts.isEmpty(); + } + + /** + * Returns a snapshot of polling attempts. + * + * @return array of polling attempts; may be empty but never null + */ + synchronized USKAttempt[] snapshotPollingAttempts() { + return pollingAttempts.values().toArray(new USKAttempt[0]); + } + + /** + * Returns a snapshot of running attempts. + * + * @return array of running attempts; may be empty but never null + */ + synchronized USKAttempt[] snapshotRunningAttempts() { + return runningAttempts.values().toArray(new USKAttempt[0]); + } + + /** + * Returns a snapshot of attempts staged for registration. + * + * @return array of attempts staged to start; may be empty but never null + */ + synchronized USKAttempt[] snapshotAttemptsToStart() { + return attemptsToStart.toArray(new USKAttempt[0]); + } + + /** + * Returns whether any attempts are staged for registration. + * + * @return {@code true} when staged attempts are available + */ + synchronized boolean hasPendingAttempts() { + return !attemptsToStart.isEmpty(); + } + + /** Clears the staged attempts list. */ + synchronized void clearAttemptsToStart() { + attemptsToStart.clear(); + } + + /** Clears all attempt collections, removing staged, running, and polling attempts. */ + synchronized void clearAllAttempts() { + attemptsToStart.clear(); + runningAttempts.clear(); + pollingAttempts.clear(); + } + + /** + * Removes a running attempt by edition. + * + * @param edition edition number to remove + */ + synchronized void removeRunningAttempt(long edition) { + runningAttempts.remove(edition); + } + + /** + * Removes a polling attempt by edition. + * + * @param edition edition number to remove + */ + synchronized void removePollingAttempt(long edition) { + pollingAttempts.remove(edition); + } + + /** + * Returns the count of running attempts. + * + * @return number of running attempts + */ + @SuppressWarnings("unused") + synchronized int runningAttemptCount() { + return runningAttempts.size(); + } + + /** + * Returns the count of polling attempts. + * + * @return number of polling attempts + */ + @SuppressWarnings("unused") + synchronized int pollingAttemptCount() { + return pollingAttempts.size(); + } + + /** + * Returns a human-readable description of running attempts. + * + * @return description string containing edition numbers and flags + */ + synchronized String runningAttemptsDescription() { + StringBuilder sb = new StringBuilder(); + boolean first = true; + for (USKAttempt attempt : runningAttempts.values()) { + if (!first) sb.append(", "); + first = false; + sb.append(attempt.number); + if (attempt.cancelled) sb.append("(cancelled)"); + if (attempt.succeeded) sb.append("(succeeded)"); + } + return sb.toString(); + } + + /** + * Returns lookup descriptors for currently running fetch editions. + * + * @return list of lookup descriptors associated with running or polling attempts + */ + synchronized List getRunningFetchEditions() { + List ret = new ArrayList<>(); + for (USKAttempt attempt : runningAttempts.values()) { + if (!ret.contains(attempt.lookup)) ret.add(attempt.lookup); + } + for (USKAttempt attempt : pollingAttempts.values()) { + if (!ret.contains(attempt.lookup)) ret.add(attempt.lookup); + } + return ret; + } + + /** + * Registers staged attempts with the scheduler. + * + *

The method drains the staged attempt list, notifies the parent requester when network work + * is about to start, and schedules each attempt if it is still newer than the latest known slot. + * Attempts that are already obsolete are removed from the internal maps. + * + * @param params registration parameters containing context and edition tracking information + */ + void registerAttempts(USKAttemptRegistrationParams params) { + USKAttempt[] attempts; + int runningCount; + int pollingCount; + synchronized (this) { + attempts = attemptsToStart.toArray(new USKAttempt[0]); + attemptsToStart.clear(); + runningCount = runningAttempts.size(); + pollingCount = pollingAttempts.size(); + } + + if (attempts.length > 0) attemptContext.parent().toNetwork(params.context()); + if (LOG.isDebugEnabled()) + LOG.debug( + "Registering {} USKChecker's for {} running={} polling={}", + attempts.length, + attemptContext.origUSK(), + runningCount, + pollingCount); + for (USKAttempt attempt : attempts) { + long lastEd = uskManager.lookupLatestSlot(attemptContext.origUSK()); + + if (keepLastData && !params.hasLastRequestData() && lastEd == params.suggestedEdition()) + lastEd--; + + if (attempt == null) continue; + if (attempt.number > lastEd) attempt.schedule(params.context()); + else { + removeRunningAttempt(attempt.number); + removePollingAttempt(attempt.number); + } + } + } + + /** + * Processes attempts after a datastore store check completes. + * + *

This method mirrors {@link #registerAttempts(USKAttemptRegistrationParams)} but operates on + * a provided attempt array after a store check completes. It schedules attempts that remain newer + * than the latest known slot and removes those that are already obsolete. + * + * @param params registration parameters containing context and edition tracking information + * @param attempts attempts to schedule after the store check; may be empty but not null + */ + void processAttemptsAfterStoreCheck(USKAttemptRegistrationParams params, USKAttempt[] attempts) { + for (USKAttempt attempt : attempts) { + long lastEd = uskManager.lookupLatestSlot(attemptContext.origUSK()); + if (keepLastData && !params.hasLastRequestData() && lastEd == params.suggestedEdition()) + lastEd--; + if (attempt == null) continue; + if (attempt.number > lastEd) attempt.schedule(params.context()); + else { + removeRunningAttempt(attempt.number); + removePollingAttempt(attempt.number); + } + } + } + + /** + * Notes that an attempt succeeded and removes it from running attempts. + * + * @param attempt attempt that succeeded; may be null + */ + @SuppressWarnings("unused") + void noteAttemptSuccess(USKAttempt attempt) { + if (attempt == null) return; + removeRunningAttempt(attempt.number); + } + + /** + * Notes that an attempt was canceled and removes it from running attempts. + * + * @param attempt attempt that was canceled; may be null + */ + @SuppressWarnings("unused") + void noteAttemptCancelled(USKAttempt attempt) { + if (attempt == null) return; + if (LOG.isDebugEnabled()) + LOG.debug("Attempt {} cancelled for {}", attempt.number, attemptContext.origUSK()); + removeRunningAttempt(attempt.number); + } + + /** Refreshes poll parameters on all polling attempts. */ + void reloadPollParameters() { + USKAttempt[] pollers; + synchronized (this) { + pollers = pollingAttempts.values().toArray(new USKAttempt[0]); + } + for (USKAttempt attempt : pollers) attempt.reloadPollParameters(); + } + + /** + * Registration parameters used when scheduling or processing attempts. + * + * @param context client context for scheduling callbacks and networking + * @param hasLastRequestData whether the fetcher has retained the last request data + * @param suggestedEdition edition value suggested by the original USK + */ + record USKAttemptRegistrationParams( + ClientContext context, boolean hasLastRequestData, long suggestedEdition) {} +} diff --git a/src/main/java/network/crypta/client/async/USKCompletionCoordinator.java b/src/main/java/network/crypta/client/async/USKCompletionCoordinator.java new file mode 100644 index 0000000000..e33f9081c9 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKCompletionCoordinator.java @@ -0,0 +1,198 @@ +package network.crypta.client.async; + +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.USK; +import network.crypta.support.api.Bucket; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Coordinates completion callbacks and retained data handling for USK fetchers. + * + *

This helper wraps a {@link USKCompletionHandler} to decode data, retain the most recent + * payload, and deliver completion callbacks when a polling cycle finishes. It owns references to + * the manager, original USK, and requester so it can unregister and emit callbacks consistently. + * Callers typically invoke it when a fetcher is finished or canceled, and the coordinator handles + * cleanup of scheduler state and subscriber notification. + * + *

The class is mutable but relies on the caller for synchronization; it performs no internal + * locking beyond the underlying collaborators. It also keeps track of real-time scheduling bias to + * interact with the correct scheduler queue when cleaning up pending keys. + * + *

    + *
  • Decodes and applies data based on completion decisions. + *
  • Exposes retained-data accessors for completion logic. + *
  • Handles unsubscribe and callback delivery on completion. + *
+ */ +final class USKCompletionCoordinator { + /** Logger for callback completion errors. */ + private static final Logger LOG = LoggerFactory.getLogger(USKCompletionCoordinator.class); + + /** Completion handler that performs decoding and retained-data management. */ + private final USKCompletionHandler completionHandler; + + /** Manager used to unsubscribe and record completion. */ + private final USKManager uskManager; + + /** Base USK used for slot lookups and found-edition callbacks. */ + private final USK origUSK; + + /** Requester used for decoding and scheduling context. */ + private final ClientRequester parent; + + /** Whether cleanup should use real-time scheduling queues. */ + private final boolean realTimeFlag; + + /** + * Creates a completion coordinator for a USK fetcher. + * + *

The coordinator depends on collaborators that are expected to remain valid for the life of + * the fetcher. The {@code parent} and {@code realTimeFlag} are used to align cleanup operations + * with the same scheduling bias as the fetcher itself. + * + * @param completionHandler handler that decodes and stores retained data; must be non-null + * @param uskManager manager used to unsubscribe and track completion; must be non-null + * @param origUSK base USK used for lookups and callback payloads; must be non-null + * @param parent requester used for decode context and scheduling; must be non-null + * @param realTimeFlag whether cleanup should use real-time scheduling queues + */ + USKCompletionCoordinator( + USKCompletionHandler completionHandler, + USKManager uskManager, + USK origUSK, + ClientRequester parent, + boolean realTimeFlag) { + this.completionHandler = completionHandler; + this.uskManager = uskManager; + this.origUSK = origUSK; + this.parent = parent; + this.realTimeFlag = realTimeFlag; + } + + /** + * Decodes and applies a data block when decoding is requested. + * + *

If {@code decode} is {@code false}, the method returns immediately. Otherwise, it delegates + * to {@link USKCompletionHandler#decodeBlockIfNeeded(boolean, ClientSSKBlock, ClientContext, + * ClientRequester)} to produce a decoded bucket and then applies the decoded data to the + * completion handler. + * + * @param decode whether decoding should be performed + * @param block block to decode; may be null when only metadata is available + * @param context client context used for decoding; must not be null + */ + void applyDecodedData(boolean decode, ClientSSKBlock block, ClientContext context) { + if (!decode) return; + Bucket decoded = completionHandler.decodeBlockIfNeeded(true, block, context, parent); + completionHandler.applyDecodedData(true, block, decoded); + } + + /** + * Applies decoded data for a discovered edition. + * + *

This delegates to the completion handler to parse or store the supplied data payload and + * metadata flags. + * + * @param decode whether the payload should be decoded + * @param metadata whether the payload represents metadata rather than raw content + * @param codec compression codec identifier associated with the payload + * @param data raw payload bytes; may be null when data is unavailable + * @param context client context used for decoding; must not be null + */ + void applyFoundDecodedData( + boolean decode, boolean metadata, short codec, byte[] data, ClientContext context) { + completionHandler.applyFoundDecodedData(decode, metadata, codec, data, context); + } + + /** + * Releases retained data bytes, if any. + * + * @return retained data bytes, or {@code null} when none are stored + */ + @SuppressWarnings("unused") + byte[] releaseLastDataBytes() { + return completionHandler.releaseLastDataBytes(); + } + + /** + * Returns the compression codec used by the retained data. + * + * @return codec identifier for the last retained data + */ + @SuppressWarnings("unused") + short lastCompressionCodec() { + return completionHandler.lastCompressionCodec(); + } + + /** + * Returns whether the retained data represents metadata. + * + * @return {@code true} if the retained data is metadata + */ + @SuppressWarnings("unused") + boolean lastWasMetadata() { + return completionHandler.lastWasMetadata(); + } + + /** + * Returns whether retained data from the last request is available. + * + * @return {@code true} if retained data is present + */ + boolean hasLastRequestData() { + return completionHandler.hasLastRequestData(); + } + + /** Clears any retained data from the last request. */ + void clearLastRequestData() { + completionHandler.clearLastRequestData(); + } + + /** + * Completes callbacks and cleans up fetcher state. + * + *

The method unsubscribes the fetcher, removes pending keys from the scheduler, and delivers + * completion callbacks with the latest known edition and retained data. Exceptions thrown by + * callbacks are caught and logged so that remaining callbacks still receive notifications. + * + * @param context client context used for scheduling and callback payloads + * @param fetcher fetcher instance being completed; must not be null + * @param callbacks callback array to notify; may be empty but not null + */ + void completeCallbacks( + ClientContext context, USKFetcher fetcher, USKFetcherCallback[] callbacks) { + uskManager.unsubscribe(origUSK, fetcher); + uskManager.onFinished(fetcher); + context + .getSskFetchScheduler(realTimeFlag) + .schedTransient + .removePendingKeys((KeyListener) fetcher); + long ed = uskManager.lookupLatestSlot(origUSK); + byte[] data = completionHandler.releaseLastDataBytes(); + short codec = completionHandler.lastCompressionCodec(); + boolean metadata = completionHandler.lastWasMetadata(); + for (USKFetcherCallback c : callbacks) { + try { + if (ed == -1) c.onFailure(context); + else + c.onFoundEdition( + new USKFoundEdition( + ed, origUSK.copy(ed), context, metadata, codec, data, false, false)); + } catch (Exception e) { + LOG.error( + "An exception occurred while dealing with a callback:{}\n{}", c, e.getMessage(), e); + } + } + } + + /** + * Notifies callbacks that the fetcher was canceled. + * + * @param context client context supplied to cancellation callbacks + * @param callbacks callback array to notify; may be empty but not null + */ + void finishCancelled(ClientContext context, USKFetcherCallback[] callbacks) { + for (USKFetcherCallback c : callbacks) c.onCancelled(context); + } +} diff --git a/src/main/java/network/crypta/client/async/USKCompletionHandler.java b/src/main/java/network/crypta/client/async/USKCompletionHandler.java new file mode 100644 index 0000000000..2e22918fae --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKCompletionHandler.java @@ -0,0 +1,247 @@ +package network.crypta.client.async; + +import java.io.IOException; +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.KeyDecodeException; +import network.crypta.support.api.Bucket; +import network.crypta.support.io.BucketTools; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tracks the most recently decoded USK payload and exposes it to completion callbacks. + * + *

This helper is used by USK fetch coordination to retain metadata about the last successful + * fetch and optionally hold on to the decoded data bucket. Callers feed decoded blocks or already + * decoded byte arrays into this instance, then later query or release the retained data when a + * fetcher completes. The handler is intentionally stateful: it keeps the last compression codec, + * whether the last block was metadata, and an optional data bucket controlled by {@code + * keepLastData}. + * + *

All state mutations are synchronized on the instance to allow concurrent fetch activity. The + * class does not perform network I/O; it only records and releases data that has already been + * decoded. Callers must treat returned buckets and byte arrays as owned by the caller after + * retrieval. + * + *

    + *
  • Retain or discard decoded data depending on {@code keepLastData}. + *
  • Expose last-known codec and metadata flags for completion callbacks. + *
  • Release retained data safely when a fetcher terminates. + *
+ * + * @see USKCompletionCoordinator + * @see USKFetcher + */ +final class USKCompletionHandler { + private static final Logger LOG = LoggerFactory.getLogger(USKCompletionHandler.class); + + /** Last successfully fetched data bucket, retained when keepLastData is enabled. */ + private Bucket lastRequestData; + + /** Compression codec used for the last fetched data payload. */ + private short lastCompressionCodec; + + /** Whether the last fetched block represented metadata rather than raw data. */ + private boolean lastWasMetadata; + + private final boolean keepLastData; + + /** + * Creates a handler that may optionally retain the most recently decoded payload. + * + *

The {@code keepLastData} flag controls whether decoded data buckets are held so that + * completion callbacks can access them later. The handler does not decode any data on its own + * during construction; it only initializes the retention policy and starts with an empty state. + * + * @param keepLastData {@code true} to retain the last decoded bucket; {@code false} to discard + * decoded data after updating metadata flags and codec information. + */ + USKCompletionHandler(boolean keepLastData) { + this.keepLastData = keepLastData; + } + + /** + * Reports whether a retained data bucket is currently available. + * + *

The value reflects the last successful decoding that was retained. The result may change + * after {@link #applyDecodedData(boolean, ClientSSKBlock, Bucket)} or {@link + * #applyFoundDecodedData(boolean, boolean, short, byte[], ClientContext)} is called, or after + * {@link #releaseLastDataBytes()} frees the stored bucket. + * + * @return {@code true} if a bucket is currently stored; {@code false} otherwise. + */ + boolean hasLastRequestData() { + synchronized (this) { + return lastRequestData != null; + } + } + + /** + * Returns the compression codec recorded for the most recently applied block. + * + *

The codec is updated when decoded data is applied or when metadata is applied from a found + * edition. If no block has been applied yet, the value remains at the default zero value. + * + * @return the last compression codec recorded for a decoded block. + */ + short lastCompressionCodec() { + synchronized (this) { + return lastCompressionCodec; + } + } + + /** + * Returns whether the most recently applied block represented metadata. + * + *

This reflects the last known metadata flag from applied decoded data or from a found + * edition. Callers should interpret it in tandem with {@link #lastCompressionCodec()} when + * building completion callbacks. + * + * @return {@code true} if the last applied block was metadata; {@code false} otherwise. + */ + boolean lastWasMetadata() { + synchronized (this) { + return lastWasMetadata; + } + } + + /** + * Releases any retained data bucket and clears stored state. + * + *

This method frees the retained bucket if one exists and clears the handler reference so it + * can be garbage collected. It does not modify codec or metadata flags, which are updated by + * later calls to {@link #applyDecodedData(boolean, ClientSSKBlock, Bucket)}. + */ + void clearLastRequestData() { + synchronized (this) { + if (lastRequestData != null) { + lastRequestData.free(); + } + lastRequestData = null; + } + } + + /** + * Decodes the provided block into a data bucket when decoding is requested. + * + *

This method is a small adapter that checks the decode flag and the availability of the + * block. If either condition is not met, it returns {@code null} without changing internal state. + * When decoding is performed, the returned bucket is owned by the caller and may be retained or + * freed based on {@link #applyDecodedData(boolean, ClientSSKBlock, Bucket)}. + * + * @param decode {@code true} to decode the provided block; {@code false} to skip decoding. + * @param block the block to decode, or {@code null} when no block is available. + * @param context client context used to get temporary bucket factories. + * @param parent requester providing persistence information for bucket allocation. + * @return a decoded data bucket, or {@code null} if decoding was skipped or failed. + */ + Bucket decodeBlockIfNeeded( + boolean decode, ClientSSKBlock block, ClientContext context, ClientRequester parent) { + if (!decode || block == null) return null; + return ClientSSKBlockDecoder.decode(block, context, parent.persistent()); + } + + /** + * Applies decoded data and updates the recorded metadata and codec state. + * + *

The method is synchronized to serialize state updates. When decoding is disabled, it is a + * no-op. If a block is supplied, the codec and metadata flags are taken from that block, and the + * data bucket is either retained or freed based on {@code keepLastData}. If the block is {@code + * null}, codec and metadata flags are reset and any retained bucket is cleared. + * + * @param decode {@code true} to apply the block information; {@code false} to skip updates. + * @param block the decoded block, or {@code null} to clear codec and metadata state. + * @param data the decoded data bucket, or {@code null} when no payload is available. + */ + void applyDecodedData(boolean decode, ClientSSKBlock block, Bucket data) { + synchronized (this) { + if (!decode) return; + if (block != null) { + lastCompressionCodec = block.getCompressionCodec(); + lastWasMetadata = block.isMetadata(); + if (keepLastData) { + if (lastRequestData != null) lastRequestData.free(); + lastRequestData = data; + } else if (data != null) { + data.free(); + } + } else { + lastCompressionCodec = -1; + lastWasMetadata = false; + lastRequestData = null; + } + } + } + + /** + * Applies already decoded data and records metadata/codec values. + * + *

This variant is used when a decoded byte array is already available, such as when data is + * supplied by a higher-level cache. If {@code keepLastData} is enabled, the byte array is wrapped + * into an immutable bucket for retention. If decoding is disabled, no changes are made. + * + * @param decode {@code true} to apply the provided metadata and data; {@code false} to skip. + * @param metadata {@code true} when the payload represents metadata rather than raw data. + * @param codec compression codec identifier associated with the decoded payload. + * @param data decoded data bytes; must not be mutated by the caller after passing here. + * @param context client context providing the temporary bucket factory for retention. + */ + void applyFoundDecodedData( + boolean decode, boolean metadata, short codec, byte[] data, ClientContext context) { + synchronized (this) { + if (!decode) return; + lastCompressionCodec = codec; + lastWasMetadata = metadata; + if (keepLastData) { + // Note: converting bucket to byte[] and back is inefficient + if (lastRequestData != null) lastRequestData.free(); + try { + lastRequestData = BucketTools.makeImmutableBucket(context.tempBucketFactory, data); + } catch (IOException e) { + LOG.error("Caught {}", e, e); + } + } + } + } + + /** + * Releases retained data as a byte array and clears the stored bucket. + * + *

If no data is retained, this returns {@code null} to preserve the "no payload" signal used + * by downstream callbacks. The caller owns the returned byte array. The retained bucket is always + * freed, even if conversion fails, ensuring the handler does not retain buffers longer than + * needed. + * + * @return the retained data bytes, or {@code null} when no data is stored + */ + @SuppressWarnings("java:S1168") + byte[] releaseLastDataBytes() { + synchronized (this) { + if (lastRequestData == null) return null; + try { + return BucketTools.toByteArray(lastRequestData); + } catch (IOException e) { + LOG.error("Unable to turn lastRequestData into byte[]: caught I/O exception: {}", e, e); + return null; + } finally { + lastRequestData.free(); + lastRequestData = null; + } + } + } + + private static final class ClientSSKBlockDecoder { + private ClientSSKBlockDecoder() {} + + private static Bucket decode(ClientSSKBlock block, ClientContext context, boolean persistent) { + try { + return block.decode(context.getBucketFactory(persistent), 1025 /* it's an SSK */, true); + } catch (KeyDecodeException _) { + return null; + } catch (IOException e) { + LOG.error("Decode failed due to I/O error: {}", e.getMessage(), e); + return null; + } + } + } +} diff --git a/src/main/java/network/crypta/client/async/USKFetcher.java b/src/main/java/network/crypta/client/async/USKFetcher.java index 5652d493bf..a904f985e1 100644 --- a/src/main/java/network/crypta/client/async/USKFetcher.java +++ b/src/main/java/network/crypta/client/async/USKFetcher.java @@ -1,97 +1,76 @@ package network.crypta.client.async; -import java.io.IOException; -import java.lang.ref.WeakReference; import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; -import java.util.Map; -import java.util.Map.Entry; import java.util.Random; -import java.util.TreeMap; -import java.util.TreeSet; import network.crypta.client.FetchContext; -import network.crypta.keys.ClientSSK; import network.crypta.keys.ClientSSKBlock; import network.crypta.keys.FreenetURI; import network.crypta.keys.Key; import network.crypta.keys.KeyBlock; -import network.crypta.keys.KeyDecodeException; import network.crypta.keys.NodeSSK; -import network.crypta.keys.SSKBlock; -import network.crypta.keys.SSKVerifyException; import network.crypta.keys.USK; -import network.crypta.node.RequestStarter; import network.crypta.node.SendableGet; -import network.crypta.support.RemoveRangeArrayList; -import network.crypta.support.api.Bucket; -import network.crypta.support.io.BucketTools; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Coordinates discovery and fetching of editions for a {@link USK}. + * Coordinates discovery, polling, and optional data retrieval for a {@link USK} namespace. * - *

USKs (Unique SSKs) advance over time; this class drives the polling and discovery loop that - * identifies the latest available edition and optionally retrieves its data. It combines - * datastore-prechecks, targeted slot checks, and Date-Based Request (DBR) hint fetches to balance - * latency and load. The fetcher can run once for a specific request or continue in background - * polling mode to track updates over time. + *

This fetcher drives a USK discovery round by consulting the datastore, scheduling edition + * probes, and applying Date-Based Request (DBR) hints to narrow toward the latest available slot. + * Callers typically construct one instance per USK, register callbacks or subscribers, and invoke + * {@link #schedule(ClientContext)} to begin work. The instance may complete a single round or + * continue background polling; it cooperates with {@link USKManager} and scheduler infrastructure + * so network I/O stays in scheduler-managed tasks rather than in this class. * - *

Lifecycle and behavior: + *

The internal state model centers on mutable polling state: in-flight attempts, a watch window, + * the last attempted edition, and optional retained payload data. The fetcher respects a minimum + * failure threshold before declaring a round finished and may reschedule with backoff when + * configured. These invariants let callers treat each round as a bounded probe of the USK space. + * + *

Concurrency is handled with synchronized sections guarding shared fields such as completion + * flags and watch lists. Cancellation or completion is terminal and makes later schedule requests + * no-ops, and the fetcher is not persistent across restarts. * *

    - *
  • At most one {@code USKFetcher} is active per USK, and it registers itself with the {@code - * USKManager} to receive discovery events such as newly found slots. - *
  • Subscribers and callbacks do not receive data directly from this class but influence - * whether to continue polling and at which priority, enabling interactive workloads to - * promote progress checks. - *
  • Scheduling begins with datastore checks and DBR hint fetches, then probes multiple nearby - * editions. Four consecutive DNFs with no later pending work typically conclude a round. - *
  • When running with background polling, the fetcher increases its sleep between rounds unless - * progress is made, and can be re-armed after cancellation. + *
  • Collects subscriber hints and updates polling priorities for interactive workloads. + *
  • Coordinates attempt lifecycle, including store checks, DBR hints, and probe rounds. + *
  • Reports progress and completion results to registered callbacks. + *
  • Supports background polling with backoff when configured by options. *
* - *

Threading and state: instances are mutable and use fine-grained synchronization around shared - * fields to coordinate scheduling and callbacks. Cancellation short-circuits pending work and marks - * the instance as finished. This class is not persistent; persistence of intent is tracked by - * {@code USKFetcherTag} which recreates fetchers on startup as needed. - * * @see USKManager * @see USK + * @see USKDateHintFetches */ -public class USKFetcher implements ClientGetState, USKCallback, HasKeyListener, KeyListener { +public class USKFetcher + implements ClientGetState, USKCallback, HasKeyListener, KeyListener, USKAttemptCallbacks { /** Logger for polling, scheduling, and hint-processing diagnostics. */ private static final Logger LOG = LoggerFactory.getLogger(USKFetcher.class); - /** Literal used in attempt descriptions to keep log formatting consistent. */ - private static final String FOR_LITERAL = " for "; - - /** USK manager */ + /** Manager that owns known slot state and subscription coordination. */ private final USKManager uskManager; - /** The USK to fetch */ + /** Base USK namespace from which edition keys are derived. */ private final USK origUSK; - /** Callbacks */ + /** Registered completion callbacks for this fetch cycle. */ private final List callbacks; - /** Fetcher context */ + /** Base fetch context for normal network and store checks. */ final FetchContext ctx; - /** Fetcher context ignoring store */ + /** Context configured to bypass the datastore for probe attempts. */ final FetchContext ctxNoStore; - /** Fetcher context for DBR hint fetches */ + /** Specialized context for Date-Based Request hint fetches. */ final FetchContext ctxDBR; - /** Finished? */ + /** Whether this fetch cycle completed successfully or with failure. */ private boolean completed; - /** Cancelled? */ + /** Whether cancellation has been requested and further work should stop. */ private boolean cancelled; /** Whether this instance only checks the local store and avoids network fetches. */ @@ -100,40 +79,55 @@ public class USKFetcher implements ClientGetState, USKCallback, HasKeyListener, /** Parent requester that owns this fetcher and its scheduling priority. */ final ClientRequester parent; - // We keep the data from the last (highest number) request. - /** Last successfully fetched data bucket, retained when {@link #keepLastData} is enabled. */ - private Bucket lastRequestData; + /** Structure tracking which keys we want. */ + private final USKKeyWatchSet watchingKeys; + + /** Attempt lifecycle manager for polling and probe attempts. */ + private final USKAttemptManager attempts; - /** Compression codec used for the last fetched data payload. */ - private short lastCompressionCodec; + /** Coordinates datastore store checks. */ + private final USKStoreCheckCoordinator storeChecks; - /** Whether the last fetched block represented metadata rather than raw data. */ - private boolean lastWasMetadata; + /** Tracks subscribers and priority selection. */ + private final USKSubscriberRegistry subscriberRegistry; - /** Structure tracking which keys we want. */ - private final USKWatchingKeys watchingKeys; + /** Handles data retention and completion callbacks. */ + private final USKCompletionCoordinator completionCoordinator; - /** Attempts staged for immediate scheduling on the next registration cycle. */ - private final ArrayList attemptsToStart; + /** Builds plans for handling success and found editions. */ + private final USKSuccessPlanner successPlanner; - /** Maximum number of keys to watch per polling round before pruning. */ - private static final int WATCH_KEYS = 50; + /** Coordinates scheduling state for a polling round. */ + private final USKSchedulingCoordinator schedulingCoordinator; + + /** Manages polling round completion and backoff. */ + private final USKPollingRound pollingRound; /** - * Registers a fetcher-level callback. + * Registers a fetcher-level callback to observe completion results. + * + *

Callbacks are invoked when a polling round reaches a terminal outcome or when a single-shot + * fetch completes. They receive {@code onFoundEdition(...)} at most once per lifecycle unless + * background polling is enabled, in which case the callback may not be notified for long periods. + * This method also affects dynamic scheduling because callback priority hints are folded into the + * polling priority calculation and can bias progress checks for interactive users. * - *

Callbacks are notified when the overall USK fetch cycle completes. Unless background polling - * is enabled, they receive {@code onFoundEdition(...)} at most once when the final decision for - * the current cycle is known. Callbacks also participate in determining the dynamic polling - * priority via {@link #updatePriorities()} so interactive callers can promote progress checks. + *

The call is thread-safe and idempotent with respect to completed instances. Adding callbacks + * after completion has no effect and returns {@code false} without side effects. Callback + * instances are expected to remain valid for the life of the fetcher and may be called from + * scheduler threads rather than the caller's thread. The method does not trigger scheduling on + * its own, but it does update priorities immediately after the callback is stored. * - *

Note: When continuous background polling is enabled, consider whether registering a callback - * is appropriate, as the cycle may not reach a terminal state for long periods. + *

Preconditions are minimal: the callback must be non-null and should tolerate invocation on + * internal threads. Postconditions are limited to registration and priority refresh; the caller + * should not expect immediate network activity as a result of this call. * - * @param cb the callback to add; must remain valid for the lifetime of this fetch cycle; {@code - * null} is not permitted - * @return {@code true} when the callback was added successfully; {@code false} when the fetcher - * has already completed and no further callbacks are accepted + * @param cb callback instance to register; must be non-null and long-lived + * @return {@code true} when accepted; {@code false} if already completed + *

{@code
+   * // Example: register a callback before scheduling
+   * fetcher.addCallback(callback);
+   * }
*/ @SuppressWarnings("UnusedReturnValue") public boolean addCallback(USKFetcherCallback cb) { @@ -147,259 +141,32 @@ public boolean addCallback(USKFetcherCallback cb) { // DBR (date-hint) fetching is handled by USKDateHintFetches. - /** - * Tracks a single edition probe, including its checker state and polling metadata. - * - *

Each attempt owns a {@link USKChecker} that performs the actual request and reports - * completion through {@link USKCheckerCallback}. The attempt records whether it has succeeded, - * failed (DNF), or been canceled, and it exposes scheduling hooks used by the outer fetcher. - */ - class USKAttempt implements USKCheckerCallback { - /** Edition number */ - long number; - - /** Attempt to fetch that edition number (or null if the fetch has finished) */ - USKChecker checker; - - /** Successful fetch? */ - boolean succeeded; - - /** DNF? */ - boolean dnf; - - /** Whether this attempt has been explicitly canceled. */ - boolean cancelled; - - /** Lookup descriptor associated with this attempt. */ - final Lookup lookup; - - /** Whether this attempt is a long-lived polling attempt. */ - final boolean forever; - - /** Whether this attempt has ever entered finite cooldown. */ - private boolean everInCooldown; - - /** - * Creates a new attempt for the provided lookup descriptor. - * - * @param l lookup descriptor containing edition and key information; must not be null - * @param forever {@code true} to create a polling attempt; {@code false} for a one-off probe - */ - private USKAttempt(Lookup l, boolean forever) { - this.lookup = l; - this.number = l.val; - this.succeeded = false; - this.dnf = false; - this.forever = forever; - this.checker = - new USKChecker( - this, - l.key, - forever ? -1 : ctx.maxUSKRetries, - l.ignoreStore ? ctxNoStore : ctx, - parent, - realTimeFlag); - } - - @Override - public void onDNF(ClientContext context) { - synchronized (this) { - checker = null; - dnf = true; - } - USKFetcher.this.onDNF(this, context); - } - - @Override - public void onSuccess(ClientSSKBlock block, ClientContext context) { - synchronized (this) { - checker = null; - succeeded = true; - } - USKFetcher.this.onSuccess(this, false, block, context); - } - - @Override - public void onFatalAuthorError(ClientContext context) { - synchronized (this) { - checker = null; - } - // Counts as success except it doesn't update - USKFetcher.this.onSuccess(this, true, null, context); - } - - @Override - public void onNetworkError(ClientContext context) { - synchronized (this) { - checker = null; - } - // Treat network error as DNF for scheduling purposes - USKFetcher.this.onDNF(this, context); - } - - @Override - public void onCancelled(ClientContext context) { - synchronized (this) { - checker = null; - } - USKFetcher.this.onCancelled(this, context); - } - - /** - * Cancels this attempt and propagates cancellation to the checker if present. - * - * @param context client context used to cancel scheduling; must not be null - */ - public void cancel(ClientContext context) { - cancelled = true; - USKChecker c; - synchronized (this) { - c = checker; - } - if (c != null) c.cancel(context); - onCancelled(context); - } - - /** - * Schedules this attempt with its checker if still active. - * - * @param context client context used to schedule the checker; must not be null - */ - public void schedule(ClientContext context) { - USKChecker c; - synchronized (this) { - c = checker; - } - if (c == null) { - if (LOG.isDebugEnabled()) LOG.debug("Checker == null in schedule() for {}", this); - } else { - assert (!c.persistent()); - c.schedule(context); - } - } - - @Override - public String toString() { - return "USKAttempt for " - + number - + FOR_LITERAL - + origUSK.getURI() - + FOR_LITERAL - + USKFetcher.this - + (forever ? " (forever)" : ""); - } - - @Override - public short getPriority() { - if (backgroundPoll) { - synchronized (this) { - if (forever) { - if (!everInCooldown) { - // Boost the priority initially, so that finding the first edition takes precedence - // over ongoing polling after we're fairly sure we're not going to find anything. - // The ongoing polling keeps the ULPRs up to date so that we will get told quickly, - // but if we are overloaded we won't be able to keep up regardless. - return progressPollPriority; - } else { - return normalPollPriority; - } - } else { - // If !forever, this is a random-probe. - // It's not that important. - return normalPollPriority; - } - } - } - return parent.getPriorityClass(); - } - - @Override - public void onEnterFiniteCooldown(ClientContext context) { - synchronized (this) { - everInCooldown = true; - } - USKFetcher.this.onCheckEnteredFiniteCooldown(context); - } - - /** - * Reports whether this attempt has ever entered a finite cooldown. - * - * @return {@code true} if the attempt has cooled down at least once - */ - public synchronized boolean everInCooldown() { - return everInCooldown; - } - - /** Refreshes cached poll parameters on the underlying checker, if active. */ - public void reloadPollParameters() { - USKChecker c; - synchronized (this) { - c = checker; - } - if (c == null) return; - c.onChangedFetchContext(); - } - } - /** Helper for Date-Based Request (DBR) hint scheduling and parsing. */ private final USKDateHintFetches dbrHintFetches; - /** Active random-probe attempts keyed by edition number. */ - private final TreeMap runningAttempts = new TreeMap<>(); - - /** Polling attempts keyed by edition number for background tracking. */ - private final TreeMap pollingAttempts = new TreeMap<>(); - /** Highest edition number fetched or attempted during this cycle. */ private long lastFetchedEdition; - /** Minimum failures to tolerate before concluding a round. */ + /** Minimum consecutive failures tolerated before a polling round concludes. */ final long origMinFailures; - /** Whether this is the first polling loop after construction. */ - boolean firstLoop; - /** Initial sleep interval between polling rounds, in milliseconds. */ static final long ORIG_SLEEP_TIME = 30L * 60 * 1000; /** Maximum sleep interval between polling rounds, in milliseconds. */ static final long MAX_SLEEP_TIME = 24L * 60 * 60 * 1000; - /** Current sleep interval between polling rounds, in milliseconds. */ - long sleepTime = ORIG_SLEEP_TIME; - - /** Edition value captured when scheduling a round to detect progress. */ - private long valueAtSchedule; - - /** Keep going forever? */ + /** Whether this fetcher continues polling after the first successful round. */ private final boolean backgroundPoll; - /** Keep the last fetched data? */ + /** Whether the most recently fetched payload should be retained in memory. */ final boolean keepLastData; - /** Whether scheduling has begun for the current polling cycle. */ - private boolean started; - /** Whether this fetcher uses real-time scheduling policies. */ private final boolean realTimeFlag; - /** Default polling priority for normal background checks. */ - private static final short DEFAULT_NORMAL_POLL_PRIORITY = RequestStarter.PREFETCH_PRIORITY_CLASS; - - /** Current polling priority for normal background checks. */ - private short normalPollPriority = DEFAULT_NORMAL_POLL_PRIORITY; - - /** Default polling priority for progress-oriented checks. */ - private static final short DEFAULT_PROGRESS_POLL_PRIORITY = RequestStarter.UPDATE_PRIORITY_CLASS; - - /** Current polling priority for progress-oriented checks. */ - private short progressPollPriority = DEFAULT_PROGRESS_POLL_PRIORITY; - - /** Whether a scheduling attempt is deferred until DBR hints complete. */ - private boolean scheduleAfterDBRsDone; - // Options flags for constructor to reduce parameter count - /** Option flag to enable background polling. */ + /** Option flag to enable background polling beyond the first round. */ static final int OPT_POLL_FOREVER = 1; /** Option flag to retain the last fetched data in memory. */ @@ -415,15 +182,20 @@ public void reloadPollParameters() { *

The constructor wires the primary and DBR-specific {@link FetchContext} instances, captures * the parent requester, and seeds the initial watch list using the last known slot from {@link * USKManager}. It does not start network work; callers must invoke {@link - * #schedule(ClientContext)} or {@link #schedule(long, ClientContext)} to begin a cycle. - * - * @param origUSK base USK to probe for editions; must not be null - * @param manager manager used to look up and update known slots; must not be null - * @param ctx base fetch context used for normal and no-store checks; must not be null - * @param requester parent requester that supplies priority and persistence flags; must not be - * null - * @param minFailures minimum number of DNFs tolerated before concluding a round; non-negative - * values are expected + * #schedule(ClientContext)} or {@link #schedule(long, ClientContext)} to begin a cycle. The + * resulting instance is mutable and designed to be used by scheduling threads; it is not + * persistent across restarts. + * + *

Configuration flags in {@code options} can enable background polling, retain the most recent + * payload, or restrict work to datastore checks. Invalid combinations are not explicitly + * rejected, so callers should supply only supported flags. + * + * @param origUSK base USK to probe for editions; must be non-null and valid + * @param manager manager used to look up and update known slots; must be non-null and shared + * @param ctx base fetch context used for normal and no-store checks; must be non-null + * @param requester parent requester that supplies priority and persistence flags; must be + * non-null + * @param minFailures minimum DNFs tolerated before concluding a round; non-negative values only * @param options bitmask of {@code OPT_*} flags controlling polling and storage behavior * @throws IllegalArgumentException if {@code minFailures} exceeds the internal watch limit */ @@ -438,16 +210,15 @@ public void reloadPollParameters() { this.origUSK = origUSK; this.uskManager = manager; this.origMinFailures = minFailures; - if (origMinFailures > WATCH_KEYS) throw new IllegalArgumentException(); - firstLoop = true; + if (origMinFailures > USKKeyWatchSet.WATCH_KEYS) throw new IllegalArgumentException(); callbacks = new ArrayList<>(); - subscribers = new HashSet<>(); lastFetchedEdition = -1; this.realTimeFlag = parent.realTimeFlag(); this.backgroundPoll = (options & OPT_POLL_FOREVER) != 0; this.keepLastData = (options & OPT_KEEP_LAST_DATA) != 0; this.checkStoreOnly = (options & OPT_CHECK_STORE_ONLY) != 0; ctxDBR = new FetchContext(ctx, FetchContext.IDENTICAL_MASK, true, null); + if (ctx.getFollowRedirects()) { this.ctx = new FetchContext(ctx, FetchContext.IDENTICAL_MASK, true, null); this.ctx.setFollowRedirects(false); @@ -468,11 +239,54 @@ public void reloadPollParameters() { } if (checkStoreOnly && LOG.isDebugEnabled()) LOG.debug("Just checking store on {}", this); // origUSK is a hint. We *do* want to check the edition given. - // Whereas latestSlot we've definitely fetched, we don't want to re-check. + // Whereas the latestSlot we've definitely fetched, we don't want to re-check. watchingKeys = - new USKWatchingKeys(origUSK, Math.max(0, uskManager.lookupLatestSlot(origUSK) + 1)); - attemptsToStart = new ArrayList<>(); + new USKKeyWatchSet( + origUSK, + Math.max(0, uskManager.lookupLatestSlot(origUSK) + 1), + minFailures, + backgroundPoll); dbrHintFetches = new USKDateHintFetches(this, uskManager, origUSK, this.ctx, ctxDBR, parent); + attempts = + new USKAttemptManager( + new USKAttemptContext(this, origUSK, this.ctx, ctxNoStore, parent, realTimeFlag), + uskManager, + watchingKeys, + checkStoreOnly, + keepLastData); + subscriberRegistry = new USKSubscriberRegistry(watchingKeys, uskManager, attempts, origUSK); + completionCoordinator = + new USKCompletionCoordinator( + new USKCompletionHandler(keepLastData), uskManager, origUSK, parent, realTimeFlag); + successPlanner = new USKSuccessPlanner(); + storeChecks = + new USKStoreCheckCoordinator( + USKStoreCheckCoordinator.Params.builder() + .watchingKeys(watchingKeys) + .attempts(attempts) + .parent(parent) + .checkStoreOnly(checkStoreOnly) + .uskManager(uskManager) + .origUSK(origUSK) + .callbacks(new StoreCheckCallbacks()) + .realTimeFlag(realTimeFlag) + .build()); + schedulingCoordinator = + new USKSchedulingCoordinator(attempts, storeChecks, dbrHintFetches, checkStoreOnly); + pollingRound = + new USKPollingRound( + new USKPollingRoundContext( + attempts, + storeChecks, + dbrHintFetches, + subscriberRegistry, + uskManager, + origUSK, + realTimeFlag), + ORIG_SLEEP_TIME, + true, + ORIG_SLEEP_TIME, + MAX_SLEEP_TIME); } /** @@ -480,143 +294,49 @@ public void reloadPollParameters() { * *

If the main scheduling path was waiting for DBR results, this method triggers the next * scheduling step. It also checks whether the current polling round can be considered finished - * for now and notifies progress callbacks. + * for now and notifies progress callbacks. The method is safe to call from scheduler threads and + * performs no blocking work beyond scheduling follow-up tasks. + * + *

Calling this method multiple times is safe; repeated invocations simply re-evaluate the + * scheduling state and may become no-ops if the poll round has already advanced. No exceptions + * are thrown, and the only side effects are scheduling decisions and progress checks. * - * @param context the client context used for scheduling follow-up work; must not be {@code null} + * @param context client context used to schedule follow-up work; must be non-null */ public void onDBRsFinished(ClientContext context) { - boolean needSchedule = false; + boolean needSchedule; synchronized (this) { - if (scheduleAfterDBRsDone) needSchedule = true; // Note: additional conditions may apply. + needSchedule = schedulingCoordinator.scheduleAfterDBRsDone(); } if (needSchedule) schedule(context); - checkFinishedForNow(context); + pollingRound.checkFinishedForNow(context, cancelled, completed); } /** * Notifies that a USK slot check entered a finite cooldown. * - *

This is used as a progress signal during a polling round to determine whether the round can - * be considered finished for now when all active checks have cooled down at least once. + *

This acts as a progress signal during a polling round. When all active checks have cooled + * down at least once, the round can be treated as finished for now and progress callbacks may be + * invoked. The method is a lightweight hook and does not trigger network I/O itself. * - * @param context client context used to perform completion checks; must not be {@code null} + * @param context client context used to perform completion checks; must be non-null */ - public void onCheckEnteredFiniteCooldown(ClientContext context) { + @Override + public void onEnterFiniteCooldown(ClientContext context) { checkFinishedForNow(context); } /** * Evaluates whether the current polling round can be treated as finished. * - *

The method consults {@link #resolvePollingAttemptsIfAllChecksDone()} and verifies that all - * polling attempts have entered a finite cooldown at least once. When those conditions hold, it - * emits the round-finished callback to interested subscribers. + *

The method consults {@link USKPollingRound} and verifies that all polling attempts have + * entered a finite cooldown at least once. When those conditions hold, it emits the + * round-finished callback to interested subscribers. * * @param context client context used to notify progress callbacks; must not be null */ private void checkFinishedForNow(ClientContext context) { - PollingResolution res = resolvePollingAttemptsIfAllChecksDone(); - if (!res.ready) return; - for (USKAttempt a : res.attempts) { - // All the polling attempts currently running must have entered cooldown once. - // I.e. they must have done all their fetches at least once. - // If we check whether they are *currently* in cooldown, then under heavy USK load (the common - // case!), we can see them overlapping and never notify finished. - if (!a.everInCooldown()) { - if (LOG.isDebugEnabled()) - LOG.debug( - "Not finished because polling attempt {} never entered cooldown on {}", a, this); - return; - } - } - notifyFinishedForNow(context); - } - - /** - * Captures whether a polling round can be considered complete and which attempts remain. - * - *

The resolution is used to decide when to notify progress callbacks and to gate scheduling - * decisions that depend on the completion of store checks, random probes, and DBR hints. - */ - private static final class PollingResolution { - /** Whether the polling round is ready to be considered finished for now. */ - final boolean ready; - - /** Snapshot of active polling attempts at resolution time. */ - final USKAttempt[] attempts; - - /** - * Creates a resolution result for the current polling round. - * - * @param ready whether all checks are complete for the current round - * @param attempts snapshot of polling attempts to examine for cooldown state - */ - PollingResolution(boolean ready, USKAttempt[] attempts) { - this.ready = ready; - this.attempts = attempts; - } - } - - /** - * Determines whether all checks for the polling round have completed. - * - *

The method verifies that there are no running store checks, random probes, or outstanding - * DBR hints. It also ensures that polling attempts exist before reporting completion. When any of - * these conditions is not met, it returns a resolution marked not ready. - * - * @return a resolution object indicating readiness and the current polling attempts - */ - private PollingResolution resolvePollingAttemptsIfAllChecksDone() { - synchronized (this) { - if (cancelled || completed) return new PollingResolution(false, new USKAttempt[0]); - if (runningStoreChecker != null) { - if (LOG.isDebugEnabled()) - LOG.debug("Not finished because still running store checker on {}", this); - return new PollingResolution(false, new USKAttempt[0]); // Still checking the store - } - if (!runningAttempts.isEmpty()) { - if (LOG.isDebugEnabled()) - LOG.debug("Not finished because running attempts (random probes) on {}", this); - return new PollingResolution(false, new USKAttempt[0]); // Still running - } - if (pollingAttempts.isEmpty()) { - if (LOG.isDebugEnabled()) - LOG.debug("Not finished because no polling attempts (not started???) on {}", this); - return new PollingResolution(false, new USKAttempt[0]); // Not started yet - } - if (dbrHintFetches.hasOutstanding()) { - if (LOG.isDebugEnabled()) - LOG.debug("Not finished because still waiting for DBR attempts on {}", this); - return new PollingResolution(false, new USKAttempt[0]); // DBRs - } - return new PollingResolution(true, pollingAttempts.values().toArray(new USKAttempt[0])); - } - } - - /** - * Notifies {@link USKProgressCallback} subscribers that a polling round has completed. - * - *

The notification is best-effort: if the fetcher has been canceled or completed, the method - * returns without invoking callbacks. The notification does not imply that the USK has advanced, - * only that a round of polling work has reached a stable point. - * - * @param context client context forwarded to progress callbacks; must not be null - */ - private void notifyFinishedForNow(ClientContext context) { - if (LOG.isDebugEnabled()) - LOG.debug( - "Notifying finished for now on {} for {}{}", - this, - origUSK, - this.realTimeFlag ? " (realtime)" : " (bulk)"); - USKCallback[] toCheck; - synchronized (this) { - if (cancelled || completed) return; - toCheck = subscribers.toArray(new USKCallback[0]); - } - for (USKCallback cb : toCheck) { - if (cb instanceof USKProgressCallback callback) callback.onRoundFinished(context); - } + pollingRound.checkFinishedForNow(context, cancelled, completed); } // moved into USKStoreCheckerGetter to satisfy S3398 @@ -625,55 +345,46 @@ private void notifyFinishedForNow(ClientContext context) { * Handles a "data not found" result from an attempt and advances completion logic. * *

The method updates tracking structures, records the last fetched edition, and determines - * whether a polling round should be concluded. It treats the DNF as a non-fatal result that - * influences scheduling decisions rather than an immediate failure. + * whether a polling round should be concluded. A DNF is treated as non-fatal and is used only to + * drive scheduling decisions; it does not terminate the fetcher unless other completion criteria + * are met. This method is safe to call from worker threads used by individual attempts. + * + *

DNFs may occur during datastore checks or network probes; the handler treats both sources + * the same and only examines attempt state, never the payload. The method does not throw and + * performs no blocking I/O, so callers can invoke it directly from scheduling callbacks. If the + * last running attempt reports DNF, the method may trigger completion for the current polling + * round. * - * @param att attempt that reported DNF; must not be null - * @param context client context used for follow-up scheduling; must not be null + * @param att attempt that reported DNF; must be non-null and associated with this fetcher + * @param context client context used for follow-up scheduling; must be non-null */ - void onDNF(USKAttempt att, ClientContext context) { + @Override + public void onDNF(USKAttempt att, ClientContext context) { if (LOG.isDebugEnabled()) LOG.debug("DNF: {}", att); boolean finished = false; long curLatest = uskManager.lookupLatestSlot(origUSK); synchronized (this) { if (completed || cancelled) return; lastFetchedEdition = Math.max(lastFetchedEdition, att.number); - runningAttempts.remove(att.number); - if (runningAttempts.isEmpty()) { + attempts.removeRunningAttempt(att.number); + if (!attempts.hasRunningAttempts()) { if (LOG.isDebugEnabled()) LOG.debug( "latest: {}, last fetched: {}, curLatest+MIN_FAILURES: {}", curLatest, lastFetchedEdition, curLatest + origMinFailures); - if (started) { + if (schedulingCoordinator.isStarted()) { finished = true; } - } else if (LOG.isDebugEnabled()) LOG.debug("Remaining: {}", runningAttempts()); + } else if (LOG.isDebugEnabled()) + LOG.debug("Remaining: {}", attempts.runningAttemptsDescription()); } if (finished) { finishSuccess(context); } } - /** - * Builds a diagnostic string describing current running attempts. - * - * @return a comma-separated description of running attempts and their state flags - */ - private synchronized String runningAttempts() { - StringBuilder sb = new StringBuilder(); - boolean first = true; - for (USKAttempt a : runningAttempts.values()) { - if (!first) sb.append(", "); - first = false; - sb.append(a.number); - if (a.cancelled) sb.append("(cancelled)"); - if (a.succeeded) sb.append("(succeeded)"); - } - return sb.toString(); - } - /** * Completes the current round, either by rescheduling or by notifying callbacks. * @@ -702,34 +413,11 @@ private void finishSuccess(ClientContext context) { * @param context client context used to access randomness and scheduling; must not be null */ private void rescheduleBackgroundPoll(ClientContext context) { - long valAtEnd = uskManager.lookupLatestSlot(origUSK); - long end; - long now = System.currentTimeMillis(); - synchronized (this) { - started = false; // don't finish before have rescheduled - - // Find out when we should check next ('end'), in an increasing delay (unless we make - // progress). - long newSleepTime = sleepTime * 2; - if (newSleepTime > MAX_SLEEP_TIME) newSleepTime = MAX_SLEEP_TIME; - sleepTime = newSleepTime; - end = now + context.random.nextInt((int) sleepTime); - - if (valAtEnd > valueAtSchedule && valAtEnd > origUSK.suggestedEdition) { - // We have advanced; keep trying as if we just started. - // Only if we actually DO advance, not if we just confirm our suspicion (valueAtSchedule - // always starts at 0). - sleepTime = ORIG_SLEEP_TIME; - firstLoop = false; - end = now; - if (LOG.isDebugEnabled()) - LOG.debug("We have advanced: at start, {} at end, {}", valueAtSchedule, valAtEnd); - } - if (LOG.isDebugEnabled()) - LOG.debug("Sleep time is {} this sleep is {} for {}", sleepTime, end - now, this); - } - schedule(end - now, context); - checkFinishedForNow(context); + schedulingCoordinator.resetStarted(); + long delay = + pollingRound.rescheduleBackgroundPoll(context, schedulingCoordinator.valueAtSchedule()); + schedule(delay, context); + pollingRound.checkFinishedForNow(context, cancelled, completed); } /** @@ -748,56 +436,29 @@ private void completeCallbacks(ClientContext context) { completed = true; cb = callbacks.toArray(new USKFetcherCallback[0]); } - uskManager.unsubscribe(origUSK, this); - uskManager.onFinished(this); - context.getSskFetchScheduler(realTimeFlag).schedTransient.removePendingKeys((KeyListener) this); - long ed = uskManager.lookupLatestSlot(origUSK); - byte[] data; - synchronized (this) { - if (lastRequestData == null) data = null; - else { - try { - data = BucketTools.toByteArray(lastRequestData); - } catch (IOException e) { - LOG.error("Unable to turn lastRequestData into byte[]: caught I/O exception: {}", e, e); - data = null; - } - lastRequestData.free(); - } - } - for (USKFetcherCallback c : cb) { - try { - if (ed == -1) c.onFailure(context); - else - c.onFoundEdition( - new USKFoundEdition( - ed, - origUSK.copy(ed), - context, - lastWasMetadata, - lastCompressionCodec, - data, - false, - false)); - } catch (Exception e) { - LOG.error( - "An exception occured while dealing with a callback:{}\n{}", c, e.getMessage(), e); - } - } + completionCoordinator.completeCallbacks(context, this, cb); } /** * Handles a successful attempt using the attempt's edition as the current latest. * *

This is a convenience overload that forwards to the edition-aware handler and preserves the - * update flag. + * update flag. The method expects that the provided attempt originated from this fetcher; it does + * not perform deep validation beyond scheduling and tracking updates. + * + *

The outcome mirrors the full handler: scheduling decisions, decode choices, and manager + * updates are derived from the attempt's edition and the current slot state. The call is safe + * from worker threads and does not block beyond enqueuing follow-up work. Passing {@code null} + * for the attempt is permitted for synthetic success notifications that still carry a block + * payload. * * @param att attempt that completed successfully; may be null for synthetic successes * @param dontUpdate whether to suppress updating the USK manager with this edition * @param block block returned by the attempt, or {@code null} for metadata-only successes - * @param context client context used for scheduling and storage; must not be null + * @param context client context used for scheduling and storage; must be non-null */ - void onSuccess( + @Override + public void onSuccess( USKAttempt att, boolean dontUpdate, ClientSSKBlock block, final ClientContext context) { onSuccess(att, att.number, dontUpdate, block, context); } @@ -807,13 +468,20 @@ void onSuccess( * *

The method prepares a success plan, cancels obsolete attempts, optionally decodes payload * data, and updates the USK manager unless suppressed. It may also register new attempts to - * continue probing near the current latest edition. + * continue probing near the current latest edition. When {@code dontUpdate} is {@code true}, the + * manager is left untouched but local bookkeeping and decode decisions still apply. + * + *

The method is idempotent with respect to repeated success notifications for the same + * edition; it only advances the latest slot when the reported edition exceeds the current known + * value. Callers should pass the same {@link ClientContext} used by related scheduling operations + * so that follow-up tasks are enqueued on consistent queues. If the fetcher is already completed + * or canceled, the success is ignored and no additional scheduling occurs. * * @param att attempt that completed successfully; may be null for synthetic successes - * @param curLatest edition number discovered by the attempt + * @param curLatest edition number discovered by the attempt; non-negative values are expected * @param dontUpdate whether to suppress updating the USK manager with this edition * @param block fetched block containing metadata or data; may be null for author errors - * @param context client context used for scheduling and storage; must not be null + * @param context client context used for scheduling and storage; must be non-null */ void onSuccess( USKAttempt att, @@ -825,14 +493,13 @@ void onSuccess( if (LOG.isDebugEnabled()) LOG.debug("Found edition {} for {} official is {} on {}", curLatest, origUSK, lastEd, this); - SuccessPlan plan = prepareSuccessPlan(att, curLatest, dontUpdate, block, context, lastEd); + USKSuccessPlanner.SuccessPlan plan = + prepareSuccessPlan(att, curLatest, dontUpdate, block, context, lastEd); if (plan == null) return; // finished or canceled - finishCancelBefore(plan.killAttempts, context); - - Bucket data = decodeBlockIfNeeded(plan.decode, block, context); + attempts.finishCancelBefore(plan.killAttempts, context); - applyDecodedData(plan.decode, block, data); + applyDecodedData(plan.decode, block, context); if (!dontUpdate) uskManager.updateSlot(origUSK, plan.curLatest, context); if (plan.registerNow) registerAttempts(context); @@ -844,72 +511,9 @@ void onSuccess( * @param decode whether decoding should be attempted for this block * @param block block to decode; may be null when decoding is not applicable * @param context client context used for bucket allocation; must not be null - * @return a decoded bucket, or {@code null} when decoding was skipped or failed - */ - private Bucket decodeBlockIfNeeded(boolean decode, ClientSSKBlock block, ClientContext context) { - if (!decode || block == null) return null; - return ClientSSKBlockDecoder.decode(block, context, parent.persistent()); - } - - /** - * Utility for decoding {@link ClientSSKBlock} instances into buckets. - * - *

Decoding errors are treated as non-fatal and reported via logging; the caller receives - * {@code null} when decoding fails or cannot be completed. - */ - private static final class ClientSSKBlockDecoder { - /** Utility class; not instantiable. */ - private ClientSSKBlockDecoder() {} - - /** - * Decodes the provided block using the context's bucket factory. - * - * @param block block to decode; must not be null - * @param context client context used to obtain bucket factories; must not be null - * @param persistent whether the resulting bucket should be persistent - * @return the decoded bucket, or {@code null} when decoding fails - */ - private static Bucket decode(ClientSSKBlock block, ClientContext context, boolean persistent) { - try { - return block.decode(context.getBucketFactory(persistent), 1025 /* it's an SSK */, true); - } catch (KeyDecodeException _) { - return null; - } catch (IOException e) { - LOG.error("An IOE occured while decoding: {}", e.getMessage(), e); - return null; - } - } - } - - /** - * Applies decoded payload data to the fetcher's retained state. - * - *

The method updates compression metadata and either retains or frees the decoded bucket based - * on {@link #keepLastData}. When decoding was not requested, the method returns without modifying - * state. - * - * @param decode whether decoding was requested for this block - * @param block block providing metadata such as compression codec; may be null - * @param data decoded bucket to retain or free; may be null */ - private void applyDecodedData(boolean decode, ClientSSKBlock block, Bucket data) { - synchronized (this) { - if (!decode) return; - if (block != null) { - lastCompressionCodec = block.getCompressionCodec(); - lastWasMetadata = block.isMetadata(); - if (keepLastData) { - if (lastRequestData != null) lastRequestData.free(); - lastRequestData = data; - } else if (data != null) { - data.free(); - } - } else { - lastCompressionCodec = -1; - lastWasMetadata = false; - lastRequestData = null; - } - } + private void applyDecodedData(boolean decode, ClientSSKBlock block, ClientContext context) { + completionCoordinator.applyDecodedData(decode, block, context); } /** @@ -927,7 +531,7 @@ private void applyDecodedData(boolean decode, ClientSSKBlock block, Bucket data) * @param lastEd last known edition from the manager at time of success * @return a success plan, or {@code null} if the fetcher is completed or canceled */ - private SuccessPlan prepareSuccessPlan( + private USKSuccessPlanner.SuccessPlan prepareSuccessPlan( USKAttempt att, long curLatest, boolean dontUpdate, @@ -938,116 +542,57 @@ private SuccessPlan prepareSuccessPlan( List killAttempts = null; boolean registerNow; synchronized (this) { - if (att != null) runningAttempts.remove(att.number); + if (att != null) attempts.removeRunningAttempt(att.number); if (completed || cancelled) { if (LOG.isDebugEnabled()) LOG.debug("Finished already: completed={} cancelled={}", completed, cancelled); return null; } - decode = shouldDecode(curLatest, lastEd, dontUpdate, block); + decode = USKSuccessPlanner.shouldDecode(curLatest, lastEd, dontUpdate, block); curLatest = Math.max(lastEd, curLatest); if (LOG.isDebugEnabled()) LOG.debug("Latest: {} in onSuccess", curLatest); if (!checkStoreOnly) { - killAttempts = cancelBefore(curLatest); - addNewAttempts(curLatest, context); + killAttempts = attempts.cancelBefore(curLatest); + attempts.addNewAttempts(curLatest, context, pollingRound.firstLoop()); } - if ((!scheduleAfterDBRsDone) || !dbrHintFetches.hasOutstanding()) + if ((!schedulingCoordinator.scheduleAfterDBRsDone()) || !dbrHintFetches.hasOutstanding()) registerNow = !fillKeysWatching(curLatest, context); else registerNow = false; } - SuccessPlan plan = new SuccessPlan(); - plan.decode = decode; - plan.curLatest = curLatest; - plan.registerNow = registerNow; - plan.killAttempts = killAttempts; - return plan; - } - - /** - * Determines whether a fetched block should be decoded into data. - * - * @param curLatest edition reported by the attempt - * @param lastEd last known edition at the time of processing - * @param dontUpdate whether the manager should be updated for this result - * @param block fetched block to evaluate; may be null - * @return {@code true} when decoding is required for this result - */ - private static boolean shouldDecode( - long curLatest, long lastEd, boolean dontUpdate, ClientSSKBlock block) { - return curLatest >= lastEd && !(dontUpdate && block == null); - } - - /** - * Adds new polling and random-probe attempts based on the current latest edition. - * - *

The method examines watched keys and subscriber hints to determine which editions should be - * fetched or polled next, and it schedules those attempts immediately. - * - * @param curLatest current latest edition used to seed new attempts - * @param context client context used to schedule new attempts; must not be null - */ - private void addNewAttempts(long curLatest, ClientContext context) { - USKWatchingKeys.ToFetch list = - watchingKeys.getEditionsToFetch( - curLatest, - context.random, - getRunningFetchEditions(), - shouldAddRandomEditions(context.random)); - Lookup[] toPoll = list.poll; - Lookup[] toFetch = list.fetch; - for (Lookup i : toPoll) { - if (LOG.isTraceEnabled()) LOG.trace("Polling {} for {}", i, this); - attemptsToStart.add(add(i, true)); - } - for (Lookup i : toFetch) { - if (LOG.isDebugEnabled()) LOG.debug("Adding checker for edition {} for {}", i, origUSK); - attemptsToStart.add(add(i, false)); - } - } - - /** - * Describes how to process a successful attempt. - * - *

The plan tells the caller whether to decode data, which attempts to cancel, and whether new - * attempts should be registered immediately. - */ - private static final class SuccessPlan { - /** Whether the payload should be decoded and retained. */ - boolean decode; - - /** Latest edition value to use for updates and scheduling. */ - long curLatest; - - /** Whether new attempts should be registered after processing. */ - boolean registerNow; - - /** Attempts that should be canceled because they are now obsolete. */ - List killAttempts; - - /** Creates an empty success plan. */ - SuccessPlan() {} + return successPlanner.createSuccessPlan(decode, curLatest, registerNow, killAttempts); } /** * Determines whether to add random edition probes during scheduling. * - * @param random random source used for probabilistic scheduling; must not be null + *

The decision is delegated to the DBR hint subsystem so that hint fetch outcomes influence + * how aggressively random probing is used. This avoids excessive random probes when hint-driven + * discovery already provides sufficient coverage. + * + * @param random random source used for probabilistic scheduling; must be non-null + * @param isFirstLoop whether this scheduling pass is the first loop after construction * @return {@code true} when random probes should be added for this round */ - private boolean shouldAddRandomEditions(Random random) { - return dbrHintFetches.shouldAddRandomEditions(random, firstLoop); + @Override + public boolean shouldAddRandomEditions(Random random, boolean isFirstLoop) { + return dbrHintFetches.shouldAddRandomEditions(random, isFirstLoop); } /** * Handles cancellation of an attempt and completes cancellation if needed. * - * @param att attempt that was canceled; must not be null - * @param context client context used for callback notifications; must not be null + *

The method removes the attempt from active tracking. If this was the last running attempt + * and the fetcher has already been marked as canceled, completion callbacks are fired. The call + * is safe from worker threads and performs no blocking I/O. + * + * @param att attempt that was canceled; must be non-null and associated with this fetcher + * @param context client context used for callback notifications; must be non-null */ - void onCancelled(USKAttempt att, ClientContext context) { + @Override + public void onCancelled(USKAttempt att, ClientContext context) { synchronized (this) { - runningAttempts.remove(att.number); - if (!runningAttempts.isEmpty()) return; + attempts.removeRunningAttempt(att.number); + if (attempts.hasRunningAttempts()) return; if (cancelled) finishCancelled(context); } @@ -1064,109 +609,7 @@ private void finishCancelled(ClientContext context) { completed = true; cb = callbacks.toArray(new USKFetcherCallback[0]); } - for (USKFetcherCallback c : cb) c.onCancelled(context); - } - - /** - * Removes attempts targeting editions below the provided threshold. - * - *

The returned list contains the canceled attempts so the caller may propagate cancellation. - * The method operates on polling attempts only and respects the ordering of the internal map. - * - * @param curLatest edition threshold; attempts below this edition are removed - * @return list of removed attempts, or {@code null} when no removals were necessary - */ - private List cancelBefore(long curLatest) { - List v = null; - int count = 0; - synchronized (this) { - for (Iterator i = runningAttempts.values().iterator(); i.hasNext(); ) { - USKAttempt att = i.next(); - if (att.number < curLatest) { - if (v == null) v = new ArrayList<>(runningAttempts.size() - count); - v.add(att); - i.remove(); - } - count++; - } - for (Iterator> i = pollingAttempts.entrySet().iterator(); - i.hasNext(); ) { - Map.Entry entry = i.next(); - if (entry.getKey() < curLatest) { - if (v == null) v = new ArrayList<>(Math.max(1, pollingAttempts.size() - count)); - v.add(entry.getValue()); - i.remove(); - } else break; // TreeMap is ordered. - } - } - return v; - } - - /** - * Cancels the provided attempts, if any. - * - * @param v list of attempts to cancel; may be null - * @param context client context used to propagate cancellation; must not be null - */ - private void finishCancelBefore(List v, ClientContext context) { - if (v != null) { - for (USKAttempt att : v) { - att.cancel(context); - } - } - } - - /** - * Adds a new {@link USKAttempt} for the requested edition. - * - *

The attempt is inserted into either the polling or running map depending on {@code forever}. - * The caller is responsible for calling {@link USKAttempt#schedule(ClientContext)} to actually - * enqueue the attempt. - * - * @param l lookup descriptor containing edition and key information; must not be null - * @param forever {@code true} to register as a polling attempt; {@code false} for a one-off probe - * @return the created attempt, or {@code null} when duplicates or invalid state prevent creation - * @throws IllegalArgumentException if the lookup edition is negative - */ - private synchronized USKAttempt add(Lookup l, boolean forever) { - long i = l.val; - if (l.val < 0) - throw new IllegalArgumentException( - "Can't check <0" + FOR_LITERAL + l.val + " on " + this + FOR_LITERAL + origUSK); - if (cancelled) return null; - if (checkStoreOnly) return null; - if (LOG.isDebugEnabled()) LOG.debug("Adding USKAttempt for {} for {}", i, origUSK.getURI()); - if (isDuplicateAttempt(forever, i)) return null; - USKAttempt a = new USKAttempt(l, forever); - if (forever) pollingAttempts.put(i, a); - else { - runningAttempts.put(i, a); - } - if (LOG.isDebugEnabled()) LOG.debug("Added {} for {}", a, origUSK); - return a; - } - - /** - * Checks whether an attempt for the given edition is already registered. - * - * @param forever {@code true} to check polling attempts; {@code false} to check running probes - * @param edition edition number to test for duplication - * @return {@code true} when an attempt already exists for the edition - */ - private boolean isDuplicateAttempt(boolean forever, long edition) { - if (forever) { - if (pollingAttempts.containsKey(edition)) { - if (LOG.isDebugEnabled()) LOG.debug("Already polling edition: {} for {}", edition, this); - return true; - } - } else { - if (runningAttempts.containsKey(edition)) { - if (LOG.isDebugEnabled()) - LOG.debug("Returning because already running for {}", origUSK.getURI()); - return true; - } - } - return false; + completionCoordinator.finishCancelled(context, cb); } /** @@ -1174,11 +617,10 @@ private boolean isDuplicateAttempt(boolean forever, long edition) { * *

The returned URI reflects the base USK namespace and does not change as editions advance. * Callers can use it for logging, diagnostics, or to derive edition-specific URIs via {@link - * USK#copy(long)}. The method performs no I/O and does not allocate new objects beyond the - * returned reference. + * USK#copy(long)}. The method performs no I/O and does not allocate new objects beyond returning + * the existing reference. * - * @return an immutable URI identifying the USK being fetched; callers must not modify the - * returned object + * @return immutable URI identifying the tracked USK; callers must not mutate it */ public FreenetURI getURI() { return origUSK.getURI(); @@ -1190,7 +632,7 @@ public FreenetURI getURI() { *

Returns {@code true} once the fetcher has been canceled or completed. After that point it no * longer schedules work, though background pollers may be re-armed by {@link * #schedule(ClientContext)} if applicable. This method is safe to call from any thread and - * provides a snapshot of state that may change immediately after return. + * provides a snapshot of the state that may change immediately after return. * * @return {@code true} if canceled or completed; otherwise {@code false} */ @@ -1222,9 +664,15 @@ public USK getOriginalUSK() { * from that context. The method is idempotent and safe to call repeatedly; if the fetcher has * already completed or been canceled, the scheduled task will effectively be a no-op. * + *

Delays are expressed in milliseconds and are interpreted relative to the caller's clock. + * This method does not validate whether the fetcher is currently registered; it simply forwards + * to the scheduler. Delayed scheduling preserves the same priority configuration that would be + * applied to an immediate call. The caller should avoid scheduling multiple delayed calls for the + * same instance unless intentional, as each call queues an independent timed job. + * * @param delay delay in milliseconds before scheduling; non-positive schedules immediately - * @param context client context used to reach the scheduler and timing facilities; must not be - * {@code null} + * @param context client context used to reach the scheduler and timing facilities; must be + * non-null */ public void schedule(long delay, final ClientContext context) { if (delay <= 0) { @@ -1244,8 +692,18 @@ public void schedule(long delay, final ClientContext context) { * ensure registration is in place. This method performs no blocking I/O directly; network work is * delegated to the schedulers. * + *

Callers should supply the same {@link ClientContext} used by related requests so scheduling + * occurs on the expected queues. The method is idempotent with respect to registration state, but + * it does not coalesce concurrent calls. If the request is configured for store-only checks, this + * method may resolve the round immediately after store checks are complete. + * + *

{@code
+   * // Example: schedule immediately after construction
+   * fetcher.schedule(context);
+   * }
+ * * @param context client context that provides schedulers, timing, and factories required to run - * the discovery loop; must not be {@code null} + * the discovery loop; must be non-null */ @Override public void schedule(ClientContext context) { @@ -1256,7 +714,12 @@ public void schedule(ClientContext context) { uskManager.subscribe(origUSK, this, false, parent.getClient()); boolean startedDBRs = dbrHintFetches.maybeStart(context); long lookedUp = uskManager.lookupLatestSlot(origUSK); - SchedulePlan plan = buildSchedulePlan(lookedUp, startedDBRs, context); + if (shouldAbortSchedule()) return; + USKSchedulingCoordinator.SchedulePlan plan = buildSchedulePlan(lookedUp, startedDBRs, context); + if (plan == null) return; + synchronized (this) { + plan.bye = cancelled || completed; + } if (plan.registerNow) registerAttempts(context); else if (plan.completeCheckingStore) { this.finishSuccess(context); @@ -1280,71 +743,30 @@ private boolean shouldAbortSchedule() { } } + private USKFetcherCallback[] snapshotCallbacks() { + synchronized (this) { + return callbacks.toArray(new USKFetcherCallback[0]); + } + } + /** * Builds a plan describing how to proceed with scheduling for this round. * *

The plan determines whether attempts should be registered immediately, whether the fetcher * should exit early, and whether store-only checking can be considered complete. * - * @param lookedUp latest slot looked up in the manager + * @param lookedUp the latest slot looked up in the manager * @param startedDBRs whether DBR hint fetches were started for this round * @param context client context used for scheduling decisions; must not be null * @return a schedule plan describing next steps for the caller */ - private SchedulePlan buildSchedulePlan( + private USKSchedulingCoordinator.SchedulePlan buildSchedulePlan( long lookedUp, boolean startedDBRs, ClientContext context) { - boolean registerNow = false; - boolean bye; - boolean completeCheckingStore = false; synchronized (this) { - valueAtSchedule = Math.max(lookedUp + 1, valueAtSchedule); - bye = cancelled || completed; - if (!bye) { - // subscribe() above may have called onFoundEdition and thus added a load of stuff. If so, - // we don't need to do so here. - if ((!checkStoreOnly) - && attemptsToStart.isEmpty() - && runningAttempts.isEmpty() - && pollingAttempts.isEmpty()) { - addNewAttempts(lookedUp, context); - } - - started = true; - if (lookedUp <= 0 && startedDBRs) { - // If we don't know anything, do the DBRs first. - scheduleAfterDBRsDone = true; - } else if ((!scheduleAfterDBRsDone) || !dbrHintFetches.hasOutstanding()) { - registerNow = !fillKeysWatching(lookedUp, context); - } - completeCheckingStore = - checkStoreOnly && scheduleAfterDBRsDone && runningStoreChecker == null; - } + if (cancelled || completed) return null; } - SchedulePlan plan = new SchedulePlan(); - plan.registerNow = registerNow; - plan.bye = bye; - plan.completeCheckingStore = completeCheckingStore; - return plan; - } - - /** - * Captures the actions required to continue or conclude a scheduling pass. - * - *

This plan is computed under synchronization and then applied without holding locks to avoid - * long lock hold times. - */ - private static final class SchedulePlan { - /** Whether attempts should be registered immediately after planning. */ - boolean registerNow; - - /** Whether the scheduler should exit early due to cancellation or completion. */ - boolean bye; - - /** Whether store-only checking can be marked complete for this round. */ - boolean completeCheckingStore; - - /** Creates an empty schedule plan. */ - SchedulePlan() {} + return schedulingCoordinator.buildSchedulePlan( + lookedUp, startedDBRs, context, pollingRound.firstLoop()); } /** @@ -1352,81 +774,64 @@ private static final class SchedulePlan { * *

After cancellation the fetcher stops scheduling any further datastore checks, DBR hint * fetches, or edition probes, and it unsubscribes from the {@link USKManager}. In-flight attempts - * are canceled when possible and subsequent calls that would otherwise schedule work become - * no-ops. This method is idempotent; calling it more than once has no additional effect beyond - * logging. + * are canceled when possible, and later calls that would otherwise schedule work become no-ops. + * This method is idempotent; calling it more than once has no additional effect beyond logging. * *

Cancellation does not delete any previously obtained data. If background polling was * configured, it is disabled for the lifetime of this instance. A new {@code USKFetcher} must be * created to resume discovery. * - * @param context the client runtime context used to unregister listeners and cancel outstanding - * work; must not be {@code null} + *

Cancellation is synchronous with respect to internal bookkeeping but does not wait for + * external network operations to finish; those are aborted or left to complete asynchronously by + * the underlying schedulers. Any retained payload data is cleared, so later callbacks do not + * reuse stale buffers. + * + * @param context client runtime context used to unregister listeners and cancel outstanding work; + * must be non-null */ @Override public void cancel(ClientContext context) { if (LOG.isDebugEnabled()) LOG.debug("Cancelling {}", this); uskManager.unsubscribe(origUSK, this); context.getSskFetchScheduler(realTimeFlag).schedTransient.removePendingKeys((KeyListener) this); - USKAttempt[] attempts; + USKAttempt[] running; USKAttempt[] polling; uskManager.onFinished(this); - SendableGet storeChecker; - Bucket data; synchronized (this) { if (cancelled) LOG.error("Already cancelled {}", this); if (completed) LOG.error("Already completed {}", this); cancelled = true; - attempts = runningAttempts.values().toArray(new USKAttempt[0]); - polling = pollingAttempts.values().toArray(new USKAttempt[0]); - attemptsToStart.clear(); - runningAttempts.clear(); - pollingAttempts.clear(); - storeChecker = runningStoreChecker; - runningStoreChecker = null; - data = lastRequestData; - lastRequestData = null; + running = attempts.snapshotRunningAttempts(); + polling = attempts.snapshotPollingAttempts(); + attempts.clearAllAttempts(); } - for (USKAttempt attempt : attempts) attempt.cancel(context); + for (USKAttempt attempt : running) attempt.cancel(context); for (USKAttempt p : polling) p.cancel(context); dbrHintFetches.cancelAll(context); - if (storeChecker != null) - // Remove from the store checker queue. - storeChecker.unregister(context, storeChecker.getPriorityClass()); - if (data != null) data.free(); + storeChecks.cancelStoreChecker(context); + completionCoordinator.clearLastRequestData(); } - /** - * Set of interested USKCallbacks. Note that we don't actually send them any information - they - * are essentially placeholders, an alternative to a refcount. This could be replaced with a Bloom - * filter or whatever, we only need .exists and .count. - */ - final HashSet subscribers; - - /** Map from subscribers to hint editions. */ - final HashMap subscriberHints = new HashMap<>(); - /** * Adds a subscriber and its current edition hint. * - *

Subscribers are not directly notified by this class; instead they influence whether and how + *

This class does not directly notify subscribers; instead, they influence whether and how * aggressively the fetcher continues to probe for newer editions. Hints help bias the search and - * are folded into the key-watching window used for datastore checks and network probes. + * are folded into the key-watching window used for datastore checks and network probes. The call + * is thread-safe and does not trigger immediate network I/O. Repeated registrations of the same + * callback update its hint and priority contributions without creating duplicate entries. * - * @param cb the subscriber whose interest influences polling priority and continuation; must not - * be {@code null} - * @param hint the subscriber's best-known edition number; values less than or equal to the last - * looked-up slot are ignored; larger values expand the search window + *

The method only mutates subscription state; it does not schedule new attempts directly. Any + * new scheduling decisions will happen when priorities are recomputed or when the next scheduling + * pass runs. + * + * @param cb subscriber whose interest influences polling priority and continuation; must be + * non-null + * @param hint subscriber's best-known edition number; larger values expand the watch window */ public void addSubscriber(USKCallback cb, long hint) { - Long[] hints; - synchronized (this) { - subscribers.add(cb); - subscriberHints.put(cb, hint); - hints = subscriberHints.values().toArray(new Long[0]); - } - updatePriorities(); - watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK)); + USKFetcherCallback[] fetcherCallbacks = snapshotCallbacks(); + subscriberRegistry.addSubscriber(cb, hint, fetcherCallbacks, toString()); } /** @@ -1436,34 +841,7 @@ public void addSubscriber(USKCallback cb, long hint) { * selects the most urgent priorities among all interested parties. */ private void updatePriorities() { - Prio prio = initialPrio(); - USKCallback[] localCallbacks; - USKFetcherCallback[] fetcherCallbacks; - synchronized (this) { - localCallbacks = subscribers.toArray(new USKCallback[0]); - // Callbacks also determine the fetcher's priority. - // Otherwise, USKFetcherTag would have no way to tell us the priority we should run at. - fetcherCallbacks = callbacks.toArray(new USKFetcherCallback[0]); - } - if (noCallbacks(localCallbacks, fetcherCallbacks)) { - setDefaultPriorities(); - return; - } - - accumulatePriorities(localCallbacks, prio); - accumulatePriorities(fetcherCallbacks, prio); - - if (LOG.isDebugEnabled()) - LOG.debug( - "Updating priorities: normal={} progress={} for {} for {}", - prio.normal, - prio.progress, - this, - origUSK); - synchronized (this) { - normalPollPriority = prio.normal; - progressPollPriority = prio.progress; - } + subscriberRegistry.updatePriorities(snapshotCallbacks(), toString()); } /** @@ -1472,91 +850,7 @@ private void updatePriorities() { * @return priority class to use for progress-oriented polling */ short refreshAndGetProgressPollPriority() { - updatePriorities(); - return getPriorityClass(); - } - - /** Mutable container for derived polling priorities. */ - private static final class Prio { - /** Normal polling priority class. */ - short normal; - - /** Progress polling priority class. */ - short progress; - - /** Creates a priority container with unset values. */ - Prio() {} - } - - /** - * Creates a priority container initialized to the paused priority class. - * - * @return a new priority container with paused defaults - */ - private static Prio initialPrio() { - Prio p = new Prio(); - p.normal = RequestStarter.PAUSED_PRIORITY_CLASS; - p.progress = RequestStarter.PAUSED_PRIORITY_CLASS; - return p; - } - - /** - * Checks whether there are no callbacks influencing priority selection. - * - * @param localCallbacks subscriber callbacks to test - * @param fetcherCallbacks fetcher-level callbacks to test - * @return {@code true} when both callback arrays are empty - */ - private static boolean noCallbacks( - USKCallback[] localCallbacks, USKFetcherCallback[] fetcherCallbacks) { - return localCallbacks.length == 0 && fetcherCallbacks.length == 0; - } - - /** Restores default polling priorities for normal and progress polling. */ - private void setDefaultPriorities() { - normalPollPriority = DEFAULT_NORMAL_POLL_PRIORITY; - progressPollPriority = DEFAULT_PROGRESS_POLL_PRIORITY; - if (LOG.isDebugEnabled()) - LOG.debug( - "Updating priorities: normal = {} progress = {} for {} for {}", - normalPollPriority, - progressPollPriority, - this, - origUSK); - } - - /** - * Accumulates priority preferences from subscriber callbacks. - * - * @param cbs callbacks providing priority hints; must not be null - * @param prio mutable container to update with minimum priorities - */ - private void accumulatePriorities(USKCallback[] cbs, Prio prio) { - for (USKCallback cb : cbs) { - short n = cb.getPollingPriorityNormal(); - if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n); - if (n < prio.normal) prio.normal = n; - if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n); - short p = cb.getPollingPriorityProgress(); - if (p < prio.progress) prio.progress = p; - } - } - - /** - * Accumulates priority preferences from fetcher-level callbacks. - * - * @param cbs callbacks providing priority hints; must not be null - * @param prio mutable container to update with minimum priorities - */ - private void accumulatePriorities(USKFetcherCallback[] cbs, Prio prio) { - for (USKFetcherCallback cb : cbs) { - short n = cb.getPollingPriorityNormal(); - if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n); - if (n < prio.normal) prio.normal = n; - if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n); - short p = cb.getPollingPriorityProgress(); - if (p < prio.progress) prio.progress = p; - } + return subscriberRegistry.refreshAndGetProgressPollPriority(snapshotCallbacks(), toString()); } /** @@ -1570,8 +864,8 @@ private void accumulatePriorities(USKFetcherCallback[] cbs, Prio prio) { * * @return {@code true} when one or more subscribers are present; {@code false} when none remain */ - public synchronized boolean hasSubscribers() { - return !subscribers.isEmpty(); + public boolean hasSubscribers() { + return subscriberRegistry.hasSubscribers(); } /** @@ -1585,8 +879,8 @@ public synchronized boolean hasSubscribers() { * @return {@code true} when one or more callbacks are registered; otherwise {@code false} */ @SuppressWarnings("unused") - public synchronized boolean hasCallbacks() { - return !callbacks.isEmpty(); + public boolean hasCallbacks() { + return subscriberRegistry.hasCallbacks(snapshotCallbacks()); } /** @@ -1595,19 +889,13 @@ public synchronized boolean hasCallbacks() { *

The subscriber will no longer influence polling priority or the set of editions watched in * the datastore. Removing a non-existent subscriber has no effect. The method also updates * internal hint tracking so that future scheduling reflects the reduced interest set, and it - * recalculates priorities based on remaining subscribers. + * recalculates priorities based on remaining subscribers. The call is thread-safe and does not + * block on network activity. * - * @param cb the subscriber to remove; {@code null} is ignored + * @param cb subscriber to remove; {@code null} is ignored */ public void removeSubscriber(USKCallback cb) { - Long[] hints; - synchronized (this) { - subscribers.remove(cb); - subscriberHints.remove(cb); - hints = subscriberHints.values().toArray(new Long[0]); - } - updatePriorities(); - watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK)); + subscriberRegistry.removeSubscriber(cb, snapshotCallbacks(), toString()); } /** @@ -1618,17 +906,11 @@ public void removeSubscriber(USKCallback cb) { * because those callbacks are tracked separately from subscriber callbacks. This behavior mirrors * legacy expectations where the same callback instance can be used in both roles. * - * @param cb the callback to remove; {@code null} is ignored + * @param cb callback to remove; {@code null} is ignored */ @SuppressWarnings("unused") public void removeCallback(USKCallback cb) { - Long[] hints; - synchronized (this) { - subscribers.remove(cb); - subscriberHints.remove(cb); - hints = subscriberHints.values().toArray(new Long[0]); - } - watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK)); + subscriberRegistry.removeCallback(cb); } /** @@ -1669,8 +951,8 @@ public short getPollingPriorityNormal() { *

Not supported for this class: priority is determined by internal state and the current * progress polling class reported by {@link #getPriorityClass()}. This method is not expected to * be called by production code and will throw an exception if invoked; callers should consult - * {@link #refreshAndGetProgressPollPriority()} instead to refresh priorities and obtain the - * current value. + * {@link #refreshAndGetProgressPollPriority()} instead to refresh priorities and get the current + * value. * * @return never returns normally * @throws UnsupportedOperationException always, because this operation is unsupported here @@ -1681,25 +963,27 @@ public short getPollingPriorityProgress() { } /** - * {@inheritDoc} + * Reacts to a newly discovered USK edition. * *

When invoked with {@code newKnownGood == true} and {@code newSlotToo == false} the callback * is ignored because slot (edition) discovery is the only driver for follow-up work here. For - * other cases, the method updates the manager and continues the discovery loop as appropriate for - * the configured mode. + * other cases, the method updates internal bookkeeping, may cancel stale attempts, and continues + * the discovery loop as appropriate for the configured mode. This handler does not block; it + * schedules work via the same mechanisms as regular attempts. * - * @param foundEdition The payload describing the discovered edition and its metadata. + * @param foundEdition payload describing the discovered edition and its metadata; must be + * non-null */ @Override public void onFoundEdition(USKFoundEdition foundEdition) { if (foundEdition.newKnownGood() && !foundEdition.newSlotToo()) return; // Only interested in slots - // Because this is frequently run off-thread, it is actually possible that the looked up edition - // is not the same as the edition we are being notified of. - FoundPlan plan = + // Because this is frequently run off-thread, it is actually possible that the looked-up edition + // is different from the edition we are being notified of. + USKSuccessPlanner.FoundPlan plan = prepareFoundPlan(foundEdition.edition(), foundEdition.data(), foundEdition.context()); if (plan == null) return; - finishCancelBefore(plan.killAttempts, foundEdition.context()); + attempts.finishCancelBefore(plan.killAttempts, foundEdition.context()); if (plan.registerNow) registerAttempts(foundEdition.context()); applyFoundDecodedData( plan.decode, @@ -1720,7 +1004,8 @@ public void onFoundEdition(USKFoundEdition foundEdition) { * @param context client context used for scheduling decisions; must not be null * @return a plan describing how to apply the found edition */ - private FoundPlan prepareFoundPlan(long ed, byte[] data, ClientContext context) { + private USKSuccessPlanner.FoundPlan prepareFoundPlan( + long ed, byte[] data, ClientContext context) { final long lastEd = uskManager.lookupLatestSlot(origUSK); boolean decode; List killAttempts = null; @@ -1732,22 +1017,18 @@ private FoundPlan prepareFoundPlan(long ed, byte[] data, ClientContext context) if (LOG.isDebugEnabled()) LOG.debug("Latest: {} in onFoundEdition", ed); if (!checkStoreOnly) { - killAttempts = cancelBefore(ed); - addNewAttempts(ed, context); + killAttempts = attempts.cancelBefore(ed); + attempts.addNewAttempts(ed, context, pollingRound.firstLoop()); } - if ((!scheduleAfterDBRsDone) || !dbrHintFetches.hasOutstanding()) + if ((!schedulingCoordinator.scheduleAfterDBRsDone()) || !dbrHintFetches.hasOutstanding()) registerNow = !fillKeysWatching(ed, context); else registerNow = false; } - FoundPlan plan = new FoundPlan(); - plan.decode = decode; - plan.killAttempts = killAttempts; - plan.registerNow = registerNow; - return plan; + return successPlanner.createFoundPlan(decode, registerNow, killAttempts); } /** - * Applies decoded data from a found edition into retained state. + * Applies decoded data from a found edition into a retained state. * *

When {@code decode} is {@code true}, the method updates compression metadata and retains the * decoded data bucket if configured to keep the last data. @@ -1760,337 +1041,79 @@ private FoundPlan prepareFoundPlan(long ed, byte[] data, ClientContext context) */ private void applyFoundDecodedData( boolean decode, boolean metadata, short codec, byte[] data, ClientContext context) { - synchronized (this) { - if (!decode) return; - lastCompressionCodec = codec; - lastWasMetadata = metadata; - if (keepLastData) { - // Note: converting bucket to byte[] and back is inefficient - if (lastRequestData != null) lastRequestData.free(); - try { - lastRequestData = BucketTools.makeImmutableBucket(context.tempBucketFactory, data); - } catch (IOException e) { - LOG.error("Caught {}", e, e); - } - } - } + completionCoordinator.applyFoundDecodedData(decode, metadata, codec, data, context); } - /** Describes how to apply a found edition and update scheduling state. */ - private static final class FoundPlan { - /** Whether decoded data should be applied. */ - boolean decode; - - /** Attempts to cancel after accepting the found edition. */ - List killAttempts; - - /** Whether to register new attempts immediately. */ - boolean registerNow; - - /** Creates an empty found plan. */ - FoundPlan() {} - } - - /** - * Builds a list of lookup descriptors for currently running attempts. - * - * @return list of unique lookup descriptors from running and polling attempts - */ - private synchronized List getRunningFetchEditions() { - List ret = new ArrayList<>(); - for (USKAttempt a : runningAttempts.values()) { - if (!ret.contains(a.lookup)) ret.add(a.lookup); - } - for (USKAttempt a : pollingAttempts.values()) { - if (!ret.contains(a.lookup)) ret.add(a.lookup); - } - return ret; - } - - /** - * Registers all staged attempts with their schedulers. - * - * @param context client context used to schedule attempts; must not be null - */ - private void registerAttempts(ClientContext context) { - USKAttempt[] attempts; - synchronized (USKFetcher.this) { - if (cancelled || completed) return; - attempts = attemptsToStart.toArray(new USKAttempt[0]); - attemptsToStart.clear(); + private final class StoreCheckCallbacks + implements USKStoreCheckCoordinator.USKStoreCheckCallbacks { + @Override + public void finishSuccess(ClientContext context) { + USKFetcher.this.finishSuccess(context); } - if (attempts.length > 0) parent.toNetwork(context); - if (LOG.isDebugEnabled()) - LOG.debug( - "Registering {} USKChecker's for {} running={} polling={}", - attempts.length, - this, - runningAttempts.size(), - pollingAttempts.size()); - for (USKAttempt attempt : attempts) { - // Look up on each iteration since scheduling can cause new editions to be found sometimes. - long lastEd = uskManager.lookupLatestSlot(origUSK); + @Override + public void notifySendingToNetwork(ClientContext context) { + USKCallback[] toCheck; synchronized (USKFetcher.this) { - // Note: condition may require verification in broader contexts - if (keepLastData && lastRequestData == null && lastEd == origUSK.suggestedEdition) - lastEd--; // If we want the data, then get it for the known edition, so we always get the - // data, so USKInserter can compare it and return the old edition if it is - // identical. + if (cancelled || completed) return; } - if (attempt == null) continue; - if (attempt.number > lastEd) attempt.schedule(context); - else { - synchronized (USKFetcher.this) { - runningAttempts.remove(attempt.number); - } + toCheck = subscriberRegistry.snapshotSubscribers(); + for (USKCallback cb : toCheck) { + if (cb instanceof USKProgressCallback callback) callback.onSendingToNetwork(context); } } - } - - /** Active store checker getter, or {@code null} when no store scan is running. */ - private USKStoreCheckerGetter runningStoreChecker = null; - - /** - * Bundles datastore sub-checkers used to query the local store for candidate editions. - * - *

This helper merges keys from multiple sources and forwards completion notifications back to - * the underlying sub-checkers. - */ - class USKStoreChecker { - - /** Sub-checkers contributing keys to query in the datastore. */ - final USKWatchingKeys.KeyList.StoreSubChecker[] checkers; - - /** - * Creates a store checker from a list of sub-checkers. - * - * @param c sub-checkers that contribute keys; must not be null - */ - public USKStoreChecker(List c) { - checkers = c.toArray(new USKWatchingKeys.KeyList.StoreSubChecker[0]); - } - - /** - * Creates a store checker from an array of sub-checkers. - * - * @param checkers2 sub-checker array to use directly; must not be null - */ - @SuppressWarnings("unused") - public USKStoreChecker(USKWatchingKeys.KeyList.StoreSubChecker[] checkers2) { - checkers = checkers2; - } - /** - * Returns the merged list of keys to check in the datastore. - * - * @return array of keys to check; may be empty - */ - public Key[] getKeys() { - if (checkers.length == 0) return new Key[0]; - if (checkers.length == 1) return checkers[0].keysToCheck; - return mergeKeysFromCheckers(); - } - - /** - * Merges keys from all sub-checkers into a de-duplicated array. - * - * @return merged array of keys to check in the datastore - */ - private Key[] mergeKeysFromCheckers() { - int x = 0; - for (USKWatchingKeys.KeyList.StoreSubChecker checker : checkers) { - x += checker.keysToCheck.length; - } - Key[] keys = new Key[x]; - int ptr = 0; - // Note: a more efficient merging algorithm could consider ranges. - HashSet check = new HashSet<>(); - for (USKWatchingKeys.KeyList.StoreSubChecker checker : checkers) { - for (Key k : checker.keysToCheck) { - if (!check.add(k)) continue; - keys[ptr++] = k; - } - } - if (keys.length != ptr) { - keys = Arrays.copyOf(keys, ptr); - } - return keys; - } - - /** Notifies all sub-checkers that their datastore checks have completed. */ - public void checked() { - for (USKWatchingKeys.KeyList.StoreSubChecker checker : checkers) { - checker.checked(); - } + @Override + public void processAttemptsAfterStoreCheck(USKAttempt[] attempts, ClientContext context) { + USKFetcher.this.attempts.processAttemptsAfterStoreCheck( + new USKAttemptManager.USKAttemptRegistrationParams( + context, completionCoordinator.hasLastRequestData(), origUSK.suggestedEdition), + attempts); } - } - - /** - * Starts or continues datastore checking for watched keys. - * - * @param ed latest known edition used to seed datastore checks - * @param context client context used to register the store checker; must not be null - * @return {@code true} when a store check is already running or was started; {@code false} when - * no store check is required - */ - @SuppressWarnings("BooleanMethodIsAlwaysInverted") - private boolean fillKeysWatching(long ed, ClientContext context) { - synchronized (this) { - // Do not run a new one until this one has finished. - // USKStoreCheckerGetter itself will automatically call back to fillKeysWatching so there is - // no - // chance of losing it. - if (runningStoreChecker != null) return true; - final USKStoreChecker checker = watchingKeys.getDatastoreChecker(ed); - if (checker == null) { - if (LOG.isDebugEnabled()) LOG.debug("No datastore checker"); - return false; - } - runningStoreChecker = new USKStoreCheckerGetter(this, parent, checker); - } - try { - context - .getSskFetchScheduler(realTimeFlag) - .register(null, new SendableGet[] {runningStoreChecker}, false, null, false); - } catch (Exception t) { - synchronized (this) { - runningStoreChecker = null; - } - LOG.error("Unable to start: {}", t, t); - try { - runningStoreChecker.unregister(context, progressPollPriority); - } catch (Exception _) { - // Ignore, hopefully it's already unregistered - } + @Override + public boolean shouldDeferUntilDBRs() { + return dbrHintFetches.hasOutstanding(); } - if (LOG.isDebugEnabled()) LOG.debug("Registered {} for {}", runningStoreChecker, this); - return true; - } - /** - * Completes registration after a datastore checker finishes its pre-registration phase. - * - *

The method unregisters the checker, marks it complete, then schedules any pending attempts - * based on the datastore results. When running in store-only mode, it may immediately conclude - * the round after DBR handling. - * - * @param storeChecker active store checker getter instance; must not be null - * @param checker datastore checker wrapper used to mark completion; must not be null - * @param context client context used for scheduling and callbacks; must not be null - * @param toNetwork whether the scheduler intended a network send for the checker - * @return {@code toNetwork} to preserve scheduler semantics; never sends network requests here - */ - @SuppressWarnings("java:S3516") - boolean preRegisterStoreChecker( - USKStoreCheckerGetter storeChecker, - USKStoreChecker checker, - ClientContext context, - boolean toNetwork) { - if (cancelled || completed) { - storeChecker.unregister(context, storeChecker.getPriorityClass()); - synchronized (this) { - runningStoreChecker = null; - } - if (LOG.isDebugEnabled()) - LOG.debug("StoreChecker preRegister aborted: fetcher cancelled/completed"); - return toNetwork; // cancel network send when scheduler planned to send - // value ignored by scheduler when toNetwork == false + @Override + public void setScheduleAfterDBRsDone(boolean value) { + USKFetcher.this.schedulingCoordinator.setScheduleAfterDBRsDone(value); } - storeChecker.unregister(context, storeChecker.getPriorityClass()); - - USKAttempt[] attempts; - synchronized (this) { - runningStoreChecker = null; - // Note: optionally start USKAttempts only when datastore check shows no progress. - attempts = attemptsToStart.toArray(new USKAttempt[0]); - attemptsToStart.clear(); - if (cancelled || completed) attempts = new USKAttempt[0]; + @Override + public boolean isCancelled() { + return USKFetcher.this.isCancelled(); } - checker.checked(); - - if (LOG.isDebugEnabled()) - LOG.debug( - "Checked datastore, finishing registration for {} checkers for {} for {}", - attempts.length, - this, - origUSK); - - if (attempts.length > 0) { - parent.toNetwork(context); - notifySendingToNetwork(context); + @Override + public FetchContext fetcherContext() { + return USKFetcher.this.ctx; } - processAttemptsAfterStoreCheck(attempts, context); - - long lastEd = uskManager.lookupLatestSlot(origUSK); - if (!fillKeysWatching(lastEd, context) && checkStoreOnly) { - if (LOG.isDebugEnabled()) LOG.debug("Just checking store, terminating {} ...", this); - if (shouldDeferUntilDBRs()) { - scheduleAfterDBRsDone = true; - } else { - finishSuccess(context); - } + @Override + public USKFetcher fetcher() { + return USKFetcher.this; } - - return toNetwork; // Store checker never sends network requests itself - // Value is ignored when toNetwork == false } /** - * Notifies progress callbacks that network sending is about to begin. + * Registers all staged attempts with their schedulers. * - * @param context client context forwarded to progress callbacks; must not be null + * @param context client context used to schedule attempts; must not be null */ - private void notifySendingToNetwork(ClientContext context) { - USKCallback[] toCheck; + private void registerAttempts(ClientContext context) { synchronized (this) { if (cancelled || completed) return; - toCheck = subscribers.toArray(new USKCallback[0]); - } - for (USKCallback cb : toCheck) { - if (cb instanceof USKProgressCallback callback) callback.onSendingToNetwork(context); } + attempts.registerAttempts( + new USKAttemptManager.USKAttemptRegistrationParams( + context, completionCoordinator.hasLastRequestData(), origUSK.suggestedEdition)); } - /** - * Processes attempts after the datastore check completes. - * - * @param attempts attempts to schedule or drop based on current known edition - * @param context client context used to schedule attempts; must not be null - */ - private void processAttemptsAfterStoreCheck(USKAttempt[] attempts, ClientContext context) { - for (USKAttempt attempt : attempts) { - long lastEd = uskManager.lookupLatestSlot(origUSK); - synchronized (this) { - // Note: condition may need verification. - if (keepLastData && lastRequestData == null && lastEd == origUSK.suggestedEdition) { - // If we want the data, then get it for the known edition, so we always get the data, so - // USKInserter can compare it and return the old edition if it is identical. - lastEd--; - } - } - if (attempt == null) continue; - if (attempt.number > lastEd) attempt.schedule(context); - else { - synchronized (this) { - runningAttempts.remove(attempt.number); - pollingAttempts.remove(attempt.number); - } - } - } - } - - /** - * Determines whether scheduling should wait for DBR hint fetches to finish. - * - * @return {@code true} when outstanding DBR hint fetches are still running - */ - private boolean shouldDeferUntilDBRs() { - return dbrHintFetches.hasOutstanding(); + @SuppressWarnings("BooleanMethodIsAlwaysInverted") + private boolean fillKeysWatching(long ed, ClientContext context) { + return storeChecks.fillKeysWatching(ed, context); } /** @@ -2128,9 +1151,10 @@ public KeyListener makeKeyListener(ClientContext context, boolean onStartup) { * *

The count reflects the internal watch list and is used by schedulers to estimate work * breadth. It does not necessarily equal the number of outstanding network requests and may - * include keys derived from subscriber hints that are not currently scheduled. + * include keys derived from subscriber hints that are not currently scheduled. The value is a + * snapshot that may change immediately after return as subscriptions evolve. * - * @return estimated count of watched keys + * @return current estimate of watched keys for scheduling heuristics and diagnostics */ @Override public synchronized long countKeys() { @@ -2155,7 +1179,7 @@ public short definitelyWantKey(Key key, byte[] saltedKey, ClientContext context) if (!origUSK.samePubKeyHash(k)) return -1; long lastSlot = uskManager.lookupLatestSlot(origUSK) + 1; synchronized (this) { - if (watchingKeys.match(k, lastSlot) != -1) return progressPollPriority; + if (watchingKeys.match(k, lastSlot) != -1) return subscriberRegistry.progressPriority(); } return -1; } @@ -2183,7 +1207,22 @@ public HasKeyListener getHasKeyListener() { */ @Override public short getPriorityClass() { - return progressPollPriority; + return subscriberRegistry.progressPriority(); + } + + @Override + public boolean isBackgroundPoll() { + return backgroundPoll; + } + + @Override + public short getProgressPollPriority() { + return getPriorityClass(); + } + + @Override + public short getNormalPollPriority() { + return subscriberRegistry.normalPriority(); } /** @@ -2217,19 +1256,10 @@ public SendableGet[] getRequestsForKey(Key key, byte[] saltedKey, ClientContext */ @Override public boolean handleBlock(Key key, byte[] saltedKey, KeyBlock found, ClientContext context) { - if (!(found instanceof SSKBlock)) return false; long lastSlot = uskManager.lookupLatestSlot(origUSK) + 1; - long edition = watchingKeys.match((NodeSSK) key, lastSlot); - if (edition == -1) return false; - if (LOG.isDebugEnabled()) LOG.debug("Matched edition {} for {}", edition, origUSK); - - ClientSSKBlock data; - try { - data = watchingKeys.decode((SSKBlock) found, edition); - } catch (SSKVerifyException _) { - data = null; - } - onSuccess(null, edition, false, data, context); + USKKeyWatchSet.MatchedBlock matched = watchingKeys.matchBlock(key, found, lastSlot); + if (matched == null) return false; + onSuccess(null, matched.edition(), false, matched.block(), context); return true; } @@ -2237,7 +1267,8 @@ public boolean handleBlock(Key key, byte[] saltedKey, KeyBlock found, ClientCont * Reports whether this fetcher has no further work to perform. * *

This is used by scheduling infrastructure to decide whether the request should remain - * registered. It mirrors {@link #isCancelled()} semantics for this fetcher. + * registered. It mirrors {@link #isCancelled()} semantics for this fetcher and returns a snapshot + * of state that may change immediately after return. * * @return {@code true} when canceled or completed; otherwise {@code false} */ @@ -2301,7 +1332,7 @@ public byte[] getWantedKey() { * and is used for quick filtering. It matches only {@link NodeSSK} keys for the tracked USK. The * check is conservative and may return {@code false} for keys outside the current watch window. * - * @param key candidate key to evaluate; must not be null + * @param key candidate key to evaluate; must be non-null * @param saltedKey scheduler-provided salted key bytes; unused by this implementation * @return {@code true} when the key appears relevant; otherwise {@code false} */ @@ -2319,8 +1350,8 @@ public boolean probablyWantKey(Key key, byte[] saltedKey) { * Updates the cooldown parameters used by USK polling. * *

This targeted mechanism applies updated cooldown values to the active contexts and live - * polling attempts so they take effect without reconstructing requests. For broader - * configuration, see the tracker discussion linked below. + * polling attempts so they take effect without reconstructing requests. It updates both the + * normal and no-store contexts, then refreshes the live polling attempts to adopt the change. * *

See: https://bugs.freenetproject.org/view.php?id=4984 @@ -2334,802 +1365,7 @@ public void changeUSKPollParameters(long time, int tries) { this.ctxNoStore.setCooldownRetries(tries); this.ctx.setCooldownTime(time); this.ctxNoStore.setCooldownTime(time); - USKAttempt[] pollers; - synchronized (this) { - pollers = pollingAttempts.values().toArray(new USKAttempt[0]); - } - for (USKAttempt a : pollers) a.reloadPollParameters(); - } - - /** - * Tracks the list of editions that we want to fetch, from various sources - subscribers, origUSK, - * last known slot from USKManager, etc. - * - *

LOCKING: Take the lock on this class last and always pass in lookup values. Do not look up - * values in USKManager inside this class's lock. - * - * @author Matthew Toseland <toad@amphibian.dyndns.org> (0xE43DA450) - */ - private class USKWatchingKeys { - - // Common for whole USK - /** Public key hash for the USK namespace being tracked. */ - final byte[] pubKeyHash; - - /** Crypto algorithm identifier for derived SSKs. */ - final byte cryptoAlgorithm; - - // List of slots since the USKManager's current last known good edition. - /** Key list anchored at the last known good slot. */ - private final KeyList fromLastKnownSlot; - - /** Per-subscriber key lists keyed by hinted edition. */ - private final TreeMap fromSubscribers; - - /** Persistent hint editions that outlive transient subscribers. */ - private final TreeSet persistentHints = new TreeSet<>(); - - // Note: consider additional WeakReference instances: one for the origUSK and - // one per subscriber-provided edition. These should be cleared when the subscriber goes away - // or when superseded by the last known edition. - - /** - * Creates a watcher seeded from the provided USK and last known edition. - * - * @param origUSK base USK used to derive key material; must not be null - * @param lookedUp last known edition slot used to seed key lists - */ - public USKWatchingKeys(USK origUSK, long lookedUp) { - this.pubKeyHash = origUSK.getPubKeyHash(); - this.cryptoAlgorithm = origUSK.cryptoAlgorithm; - if (LOG.isDebugEnabled()) LOG.debug("Creating KeyList from last known good: {}", lookedUp); - fromLastKnownSlot = new KeyList(lookedUp); - fromSubscribers = new TreeMap<>(); - if (origUSK.suggestedEdition > lookedUp) - fromSubscribers.put(origUSK.suggestedEdition, new KeyList(origUSK.suggestedEdition)); - } - - /** Bundles lookup descriptors to fetch immediately and to poll in the background. */ - class ToFetch { - - /** - * Creates a fetch plan from the provided lookup lists. - * - * @param toFetch2 lookups to fetch immediately; must not be null - * @param toPoll2 lookups to poll without immediate fetch; must not be null - */ - public ToFetch(List toFetch2, List toPoll2) { - fetch = toFetch2.toArray(new Lookup[0]); - poll = toPoll2.toArray(new Lookup[0]); - } - - /** Lookups to fetch immediately. */ - public final Lookup[] fetch; - - /** Lookups to poll in background cycles. */ - public final Lookup[] poll; - } - - /** - * Get a bunch of editions to probe for. - * - * @param lookedUp The current best known slot, from USKManager. - * @param random The random number generator. - * @param alreadyRunning This will be modified: We will remove anything that should still be - * running from it. - * @param doRandom whether to include random probes in the returned plan - * @return Editions to fetch and editions to poll for. - */ - public synchronized ToFetch getEditionsToFetch( - long lookedUp, Random random, List alreadyRunning, boolean doRandom) { - - if (LOG.isDebugEnabled()) - LOG.debug( - "Get editions to fetch, latest slot is {} running is {}", lookedUp, alreadyRunning); - - List toFetch = new ArrayList<>(); - List toPoll = new ArrayList<>(); - - boolean probeFromLastKnownGood = - lookedUp > -1 || (backgroundPoll && !firstLoop) || fromSubscribers.isEmpty(); - - if (probeFromLastKnownGood) - fromLastKnownSlot.getNextEditions(toFetch, toPoll, lookedUp, alreadyRunning); - - collectFromSubscribers(lookedUp, toFetch, toPoll, alreadyRunning); - - if (doRandom) { - collectRandomEditions( - probeFromLastKnownGood, lookedUp, random, toFetch, toPoll, alreadyRunning); - } - - return new ToFetch(toFetch, toPoll); - } - - /** - * Collects editions contributed by subscribers into fetch and poll lists. - * - * @param lookedUp current best-known slot from the manager - * @param toFetch destination list for immediate fetches; entries are appended - * @param toPoll destination list for polling attempts; entries are appended - * @param alreadyRunning lookups already in flight; may be modified by this method - */ - private void collectFromSubscribers( - long lookedUp, List toFetch, List toPoll, List alreadyRunning) { - // If we have moved past the origUSK, then clear the KeyList for it. - for (Iterator> it = fromSubscribers.entrySet().iterator(); - it.hasNext(); ) { - Entry entry = it.next(); - long l = entry.getKey() - 1; - if (l <= lookedUp) { - it.remove(); - } - if (l == 0) { - // add check for edition 0: this happens if -1 is suggested. - // Needed because we cannot set -0 for exhaustive search (-0 == 0 in Java). - entry.getValue().getEditionIfNotAlreadyRunning(toFetch, alreadyRunning, l, false); - } - entry.getValue().getNextEditions(toFetch, toPoll, l - 1, alreadyRunning); - } - } - - /** - * Adds randomized edition probes to the fetch/poll lists. - * - * @param probeFromLastKnownGood whether to seed probes from the last known good slot - * @param lookedUp current best-known slot used to bias sampling - * @param random random source used to sample editions; must not be null - * @param toFetch destination list for immediate fetches; entries are appended - * @param toPoll destination list for polling attempts; entries are appended - * @param alreadyRunning lookups already in flight; may be modified by this method - */ - private void collectRandomEditions( - boolean probeFromLastKnownGood, - long lookedUp, - Random random, - List toFetch, - List toPoll, - List alreadyRunning) { - // Now getRandomEditions - int runningRandom = countRunningRandom(alreadyRunning, toFetch, toPoll); - - int allowedRandom = 1 + fromSubscribers.size(); - if (LOG.isDebugEnabled()) - LOG.debug( - "Running random requests: {} total allowed: {} looked up is {} for {}", - runningRandom, - allowedRandom, - lookedUp, - USKFetcher.this); - - allowedRandom -= runningRandom; - - if (allowedRandom > 0 && probeFromLastKnownGood) { - fromLastKnownSlot.getRandomEditions(toFetch, lookedUp, alreadyRunning, random, 1); - allowedRandom -= 1; - } - - for (Iterator it = fromSubscribers.values().iterator(); - allowedRandom >= 2 && it.hasNext(); ) { - KeyList k = it.next(); - k.getRandomEditions(toFetch, lookedUp, alreadyRunning, random, 1); - allowedRandom -= 1; - } - } - - /** - * Counts random probes that are already running but not in the current plan. - * - * @param alreadyRunning lookups already in flight - * @param toFetch lookups planned for immediate fetch - * @param toPoll lookups planned for polling - * @return number of random probes already running outside the current plan - */ - private static int countRunningRandom( - List alreadyRunning, List toFetch, List toPoll) { - int runningRandom = 0; - for (Lookup l : alreadyRunning) { - if (toFetch.contains(l) || toPoll.contains(l)) continue; - runningRandom++; - } - return runningRandom; - } - - /** - * Reconciles subscriber hints with current persisted and derived hints. - * - * @param hints latest subscriber hint values; must not be null - * @param lookedUp current best-known slot used to discard stale hints - */ - public synchronized void updateSubscriberHints(Long[] hints, long lookedUp) { - List surviving = collectSurvivingHints(hints, lookedUp); - mergePersistentHints(surviving, lookedUp); - ensureSuggestedEditionIncluded(surviving, lookedUp); - reconcileSubscribersWithSurviving(surviving); - } - - /** - * Filters subscriber hints to those that remain relevant beyond {@code lookedUp}. - * - * @param hints subscriber hint values to filter; must not be null - * @param lookedUp current best-known slot used as a cutoff - * @return list of surviving hints in ascending order - */ - private static List collectSurvivingHints(Long[] hints, long lookedUp) { - List surviving = new ArrayList<>(); - Arrays.sort(hints); - long prev = -1; - for (Long hint : hints) { - if (hint <= lookedUp) { - prev = hint; - } else if (hint != prev) { - surviving.add(hint); - prev = hint; - } - } - return surviving; - } - - /** - * Merges persistent hints into the surviving list while dropping stale entries. - * - * @param surviving list of surviving hints to update; must not be null - * @param lookedUp current best-known slot used to drop stale hints - */ - private void mergePersistentHints(List surviving, long lookedUp) { - for (Iterator i = persistentHints.iterator(); i.hasNext(); ) { - Long hint = i.next(); - if (hint <= lookedUp) { - i.remove(); - } - if (surviving.contains(hint)) continue; - surviving.add(hint); - } - } - - /** - * Ensures the USK's suggested edition is present when it is still ahead. - * - * @param surviving list of surviving hints to update; must not be null - * @param lookedUp current best-known slot used as a cutoff - */ - private void ensureSuggestedEditionIncluded(List surviving, long lookedUp) { - if (origUSK.suggestedEdition > lookedUp && !surviving.contains(origUSK.suggestedEdition)) - surviving.add(origUSK.suggestedEdition); - } - - /** - * Reconciles the subscriber map to match the surviving hints list. - * - * @param surviving list of surviving hint editions; must not be null - */ - private void reconcileSubscribersWithSurviving(List surviving) { - for (Iterator it = fromSubscribers.keySet().iterator(); it.hasNext(); ) { - Long l = it.next(); - if (surviving.contains(l)) continue; - it.remove(); - } - for (Long l : surviving) { - if (fromSubscribers.containsKey(l)) continue; - fromSubscribers.put(l, new KeyList(l)); - } - } - - /** - * Adds a persistent hint edition that is ahead of the current lookup. - * - * @param suggestedEdition edition number to add; must be greater than {@code lookedUp} - * @param lookedUp current best-known slot used to ignore stale hints - */ - public synchronized void addHintEdition(long suggestedEdition, long lookedUp) { - if (suggestedEdition <= lookedUp) return; - if (!persistentHints.add(suggestedEdition)) return; - if (fromSubscribers.containsKey(suggestedEdition)) return; - fromSubscribers.put(suggestedEdition, new KeyList(suggestedEdition)); - } - - /** - * Estimates the number of watched keys based on current subscriber state. - * - * @return estimated count of watched keys for scheduling decisions - */ - public synchronized long size() { - return WATCH_KEYS - + (long) fromSubscribers.size() * WATCH_KEYS; // Note: does not account for overlap - } - - /** - * A precomputed list of E(H(docname))'s for each slot we might match. This is from an edition - * number which might be out of date. - */ - class KeyList { - - /** The USK edition number of the first slot */ - long firstSlot; - - /** The precomputed E(H(docname)) for each such slot. */ - private WeakReference> cache; - - /** We have checked the datastore from this point. */ - private long checkedDatastoreFrom = -1; - - /** We have checked the datastore up to this point. */ - private long checkedDatastoreTo = -1; - - /** - * Creates a key list anchored at the provided slot. - * - * @param slot first slot to include in the cache - */ - public KeyList(long slot) { - if (LOG.isDebugEnabled()) - LOG.debug( - "Creating KeyList from {} on {} {}", - slot, - USKFetcher.this, - this, - new Exception("debug")); - firstSlot = slot; - RemoveRangeArrayList ehDocnames = new RemoveRangeArrayList<>(WATCH_KEYS); - cache = new WeakReference<>(ehDocnames); - generate(firstSlot, WATCH_KEYS, ehDocnames); - } - - /** - * Add the next set of editions to either {@code toFetch} or {@code toPoll}. If any of those - * editions are already running, remove them from {@code alreadyRunning}. - * - * @param toFetch destination list for editions that should be fetched immediately when not in - * background polling mode; entries are appended, not cleared - * @param toPoll destination list for editions that should be polled (no immediate fetch) when - * in background polling mode; entries are appended, not cleared - * @param lookedUp current best known slot (edition) used as a base for computing the next - * candidate editions; values below zero are treated as zero - * @param alreadyRunning list of lookups currently in progress; this method removes any - * edition that remains valid so it is not scheduled twice - */ - public synchronized void getNextEditions( - List toFetch, List toPoll, long lookedUp, List alreadyRunning) { - if (LOG.isDebugEnabled()) LOG.debug("Getting next editions from {}", lookedUp); - if (lookedUp < 0) lookedUp = 0; - for (int i = 1; i <= origMinFailures; i++) { - long ed = i + lookedUp; - if (backgroundPoll) { - getEditionIfNotAlreadyRunning(toPoll, alreadyRunning, ed, true); - } else { - getEditionIfNotAlreadyRunning(toFetch, alreadyRunning, ed, true); - } - } - } - - /** - * Adds an edition lookup if it is not already running. - * - * @param lookupList destination list for new lookups; entries are appended - * @param alreadyRunning list of lookups already in progress; this method removes matches - * @param ed edition number to add - * @param ignoreStore whether this lookup should bypass store checks - * @return whether the edition was added - */ - public boolean getEditionIfNotAlreadyRunning( - List lookupList, List alreadyRunning, long ed, boolean ignoreStore) { - Lookup l = new Lookup(); - l.val = ed; - if (lookupList.contains(l)) { - if (LOG.isTraceEnabled()) LOG.trace("Ignoring {}", l); - return false; - } - if (alreadyRunning.remove(l)) { - if (LOG.isTraceEnabled()) LOG.trace("Ignoring (2): {}", l); - return false; - } - ClientSSK key; - // Note: consider reusing ehDocnames where feasible - // The problem is we need a ClientSSK for the high level stuff. - key = origUSK.getSSK(ed); - l.key = key; - l.ignoreStore = ignoreStore; - if (lookupList.contains(l)) { - if (LOG.isTraceEnabled()) LOG.trace("Ignoring (3): {}", l); - return false; - } - return lookupList.add(l); - } - - /** - * Adds random edition probes to the provided list. - * - * @param toFetch destination list for random probes; entries are appended - * @param lookedUp current best-known slot used as a base - * @param alreadyRunning list of lookups already in progress; used for de-duplication - * @param random random source used for sampling; must not be null - * @param allowed maximum number of random editions to add - */ - public synchronized void getRandomEditions( - List toFetch, - long lookedUp, - List alreadyRunning, - Random random, - int allowed) { - // Then add a couple of random editions for catch-up. - long baseEdition = lookedUp + origMinFailures; - for (int i = 0; i < allowed; i++) { - while (true) { // Note: consider switching to limited for-loop to ensure there can be no - // infinite loop - long fetch = sampleGeometric(baseEdition, random); - if (tryAddRandomEdition(toFetch, lookedUp, alreadyRunning, fetch)) break; - } - } - } - - /** - * Samples a future edition using a geometric distribution. - * - * @param baseEdition base edition offset for sampling - * @param random random source used to sample; must not be null - * @return sampled edition number at or above {@code baseEdition} - */ - private static long sampleGeometric(long baseEdition, Random random) { - // Geometric distribution. - // 20% chance of mean 100, 80% chance of mean 10. Thanks evanbd. - while (true) { - int mean = random.nextInt(5) == 0 ? 100 : 10; - double u = uniform01FromLong(random); - long fetch = baseEdition + (long) Math.floor(Math.log(u) / Math.log(1.0 - 1.0 / mean)); - if (fetch >= baseEdition) return fetch; - } - } - - /** - * Creates a uniform random value in (0,1] using {@link Random#nextLong()}. - * - * @param random random source used for sampling; must not be null - * @return uniform value in the open interval (0,1] - */ - private static double uniform01FromLong(Random random) { - long bits = random.nextLong() & Long.MAX_VALUE; // 0 .. 2^63-1 - return (bits + 1.0) / (Long.MAX_VALUE + 1.0); - } - - /** - * Attempts to add a random edition if it is not already scheduled. - * - * @param toFetch destination list for random probes; entries are appended - * @param lookedUp current best-known slot used for range decisions - * @param alreadyRunning list of lookups already in progress; used for de-duplication - * @param fetch sampled edition to add - * @return {@code true} when the edition was added to the fetch list - */ - private boolean tryAddRandomEdition( - List toFetch, long lookedUp, List alreadyRunning, long fetch) { - if (LOG.isDebugEnabled()) - LOG.debug( - "Trying random future edition {} for {} current edition {}", - fetch, - origUSK, - lookedUp); - return getEditionIfNotAlreadyRunning( - toFetch, alreadyRunning, fetch, (fetch - lookedUp) < WATCH_KEYS); - } - - /** Represents a sub-range of datastore keys to check. */ - public class StoreSubChecker { - - /** Keys to check */ - final NodeSSK[] keysToCheck; - - /** The edition from which we will have checked after we have executed this. */ - private final long checkedFrom; - - /** The edition up to which we have checked after we have executed this. */ - private final long checkedTo; - - /** - * Creates a sub-checker for a contiguous range of editions. - * - * @param keysToCheck node keys to check; must not be null - * @param checkFrom starting edition of the range - * @param checkTo ending edition (exclusive) of the range - */ - private StoreSubChecker(NodeSSK[] keysToCheck, long checkFrom, long checkTo) { - this.keysToCheck = keysToCheck; - this.checkedFrom = checkFrom; - this.checkedTo = checkTo; - if (LOG.isDebugEnabled()) - LOG.debug( - "Checking datastore from {} to {} for {} on {}", - checkFrom, - checkTo, - USKFetcher.this, - this); - } - - /** The keys have been checked. */ - void checked() { - synchronized (KeyList.this) { - // Update the start bound only when the previous range does not already cover it. - if (!(checkedDatastoreTo >= checkedFrom && checkedDatastoreFrom <= checkedFrom)) { - checkedDatastoreFrom = checkedFrom; - } - checkedDatastoreTo = checkedTo; - if (LOG.isDebugEnabled()) - LOG.debug( - "Checked from {} to {} (now overall is {} to {}) for {} for {}", - checkedFrom, - checkedTo, - checkedDatastoreFrom, - checkedDatastoreTo, - USKFetcher.this, - origUSK); - } - } - } - - /** - * Builds a datastore checker for a window of slots starting at {@code lastSlot}. - * - *

The method reuses and extends the cached document-name hashes as needed and returns a - * sub-checker describing the keys to check in the datastore. - * - * @param lastSlot starting edition to check from - * @return a sub-checker describing keys to check, or {@code null} when no work is needed - */ - public synchronized StoreSubChecker checkStore(long lastSlot) { - if (LOG.isDebugEnabled()) - LOG.debug("check store from {} current first slot {}", lastSlot, firstSlot); - long checkFrom = lastSlot; - long checkTo = lastSlot + WATCH_KEYS; - if (checkedDatastoreTo >= checkFrom) { - checkFrom = checkedDatastoreTo; - } - if (checkFrom >= checkTo) return null; // Nothing to check. - // Update the cache. - RemoveRangeArrayList ehDocnames = updateCache(lastSlot); - // Now create NodeSSK[] from the part of the cache that - // ehDocnames[0] is firstSlot - // ehDocnames[checkFrom-firstSlot] is checkFrom - int offset = (int) (checkFrom - firstSlot); - NodeSSK[] keysToCheck = new NodeSSK[WATCH_KEYS - offset]; - for (int x = 0, i = offset; i < WATCH_KEYS; i++, x++) { - keysToCheck[x] = new NodeSSK(pubKeyHash, ehDocnames.get(i), cryptoAlgorithm); - } - return new StoreSubChecker(keysToCheck, checkFrom, checkTo); - } - - /** - * Updates the cached document-name hashes based on a new base edition. - * - * @param curBaseEdition base edition used to realign the cache - * @return updated cache containing hashes for the current window - */ - synchronized RemoveRangeArrayList updateCache(long curBaseEdition) { - if (LOG.isDebugEnabled()) - LOG.debug("update cache from {} current first slot {}", curBaseEdition, firstSlot); - RemoveRangeArrayList ehDocnames; - if (cache == null || (ehDocnames = cache.get()) == null) { - ehDocnames = new RemoveRangeArrayList<>(WATCH_KEYS); - cache = new WeakReference<>(ehDocnames); - firstSlot = curBaseEdition; - if (LOG.isDebugEnabled()) LOG.debug("Regenerating because lost cached keys"); - generate(firstSlot, WATCH_KEYS, ehDocnames); - return ehDocnames; - } - match(null, curBaseEdition, ehDocnames); - return ehDocnames; - } - - /** - * Updates the cache if needed and attempts to match the provided key. - * - * @param key key to match, or {@code null} to only update the cache - * @param curBaseEdition new base edition used to realign the cache - * @return edition number for the key, or {@code -1} when not matched - */ - public synchronized long match(NodeSSK key, long curBaseEdition) { - if (LOG.isDebugEnabled()) - LOG.debug("match from {} current first slot {}", curBaseEdition, firstSlot); - RemoveRangeArrayList ehDocnames; - if (cache == null || (ehDocnames = cache.get()) == null) { - ehDocnames = new RemoveRangeArrayList<>(WATCH_KEYS); - cache = new WeakReference<>(ehDocnames); - firstSlot = curBaseEdition; - generate(firstSlot, WATCH_KEYS, ehDocnames); - return key == null ? -1 : innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); - } - // Might as well check first. - long x = innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); - if (x != -1) return x; - return match(key, curBaseEdition, ehDocnames); - } - - /** - * Updates the cache for a new base edition and matches only the changed segments. - * - * @param key key to match; may be {@code null} to skip matching - * @param curBaseEdition edition to align the cache with - * @param ehDocnames cached document-name hashes to update - * @return edition number for the key, or {@code -1} when not matched - */ - private long match( - NodeSSK key, long curBaseEdition, RemoveRangeArrayList ehDocnames) { - if (LOG.isDebugEnabled()) - LOG.debug( - "Matching {} cur base edition {} first slot was {} for {} on {}", - key, - curBaseEdition, - firstSlot, - origUSK, - this); - if (firstSlot < curBaseEdition) { - return handleFirstSlotBehind(key, curBaseEdition, ehDocnames); - } else if (firstSlot > curBaseEdition) { - return handleFirstSlotAhead(key, ehDocnames, curBaseEdition); - } - return -1; - } - - /** - * Handles the case where {@code firstSlot} is behind the new base edition. - * - * @param key key to match; may be {@code null} to skip matching - * @param curBaseEdition new base edition - * @param ehDocnames cached document-name hashes to update - * @return edition number for the key, or {@code -1} when not matched - */ - private long handleFirstSlotBehind( - NodeSSK key, long curBaseEdition, RemoveRangeArrayList ehDocnames) { - if (firstSlot + ehDocnames.size() <= curBaseEdition) { - // No overlap. Clear it and start again. - ehDocnames.clear(); - firstSlot = curBaseEdition; - generate(curBaseEdition, WATCH_KEYS, ehDocnames); - return key == null ? -1 : innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); - } else { - // There is some overlap. Delete the first part of the array then add stuff at the end. - // ehDocnames[i] is slot firstSlot + i - // We want to get rid of anything before curBaseEdition - // So the first slot that is useful is the slot at i = curBaseEdition - firstSlot - // Which is the new [0], whose edition is curBaseEdition - ehDocnames.removeRange(0, (int) (curBaseEdition - firstSlot)); - int size = ehDocnames.size(); - firstSlot = curBaseEdition; - generate(curBaseEdition + size, WATCH_KEYS - size, ehDocnames); - return key == null ? -1 : innerMatch(key, ehDocnames, WATCH_KEYS - size, size, firstSlot); - } - } - - /** - * Handles the case where {@code firstSlot} is ahead of the new base edition. - * - * @param key key to match; may be {@code null} to skip matching - * @param ehDocnames cached document-name hashes to consult - * @param curBaseEdition new base edition that lags behind {@code firstSlot} - * @return edition number for the key, or {@code -1} when not matched - */ - private long handleFirstSlotAhead( - NodeSSK key, RemoveRangeArrayList ehDocnames, long curBaseEdition) { - // Normal due to race conditions. We don't always report the new edition to the USKManager - // immediately. - // So ignore it. - if (LOG.isTraceEnabled()) - LOG.trace("Ignoring regression in match() from {} to {}", curBaseEdition, firstSlot); - return key == null ? -1 : innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); - } - - /** - * Matches a key against a slice of the cached hash list. - * - * @param key key to match; must not be null - * @param ehDocnames cached document-name hashes to scan - * @param offset start offset within the cache - * @param size number of entries to scan - * @param firstSlot edition represented by cache index 0 - * @return matched edition number, or {@code -1} when not found - */ - private long innerMatch( - NodeSSK key, - RemoveRangeArrayList ehDocnames, - int offset, - int size, - long firstSlot) { - byte[] data = key.getKeyBytes(); - for (int i = offset; i < (offset + size); i++) { - if (Arrays.equals(data, ehDocnames.get(i))) { - if (LOG.isDebugEnabled()) LOG.debug("Found edition {} for {}", firstSlot + i, origUSK); - return firstSlot + i; - } - } - return -1; - } - - /** - * Appends a series of document-name hashes to the cache. - * - * @param baseEdition edition to start from - * @param keys number of keys to add - * @param ehDocnames cache to append to; must not be null - */ - private void generate(long baseEdition, int keys, RemoveRangeArrayList ehDocnames) { - if (LOG.isDebugEnabled()) LOG.debug("generate() from {} for {}", baseEdition, origUSK); - assert (baseEdition >= 0); - for (int i = 0; i < keys; i++) { - long ed = baseEdition + i; - ehDocnames.add(origUSK.getSSK(ed).ehDocname); - } - } - } - - /** - * Builds a datastore checker for the current watch lists. - * - * @param lastSlot last known good edition used to seed checks - * @return store checker to run, or {@code null} when no checks are required - */ - public synchronized USKStoreChecker getDatastoreChecker(long lastSlot) { - // Check WATCH_KEYS from last known good slot. - // Note: does not currently take origUSK or subscribers into account. - if (LOG.isDebugEnabled()) - LOG.debug( - "Getting datastore checker from {} for {} on {}", - lastSlot, - origUSK, - USKFetcher.this, - new Exception("debug")); - List checkers = new ArrayList<>(); - KeyList.StoreSubChecker c = fromLastKnownSlot.checkStore(lastSlot + 1); - if (c != null) checkers.add(c); - // If we have moved past the origUSK, then clear the KeyList for it. - for (Iterator> it = fromSubscribers.entrySet().iterator(); - it.hasNext(); ) { - Entry entry = it.next(); - long l = entry.getKey(); - if (l <= lastSlot) it.remove(); - c = entry.getValue().checkStore(l); - if (c != null) checkers.add(c); - } - if (!checkers.isEmpty()) return new USKStoreChecker(checkers); - else return null; - } - - /** - * Decodes a low-level {@link SSKBlock} into a client-level block for the given edition. - * - * @param block low-level block to decode; must not be null - * @param edition edition number that the block is expected to represent - * @return decoded client block for the edition - * @throws SSKVerifyException if the block does not match the expected docname hash - */ - public ClientSSKBlock decode(SSKBlock block, long edition) throws SSKVerifyException { - ClientSSK csk = origUSK.getSSK(edition); - if (!Arrays.equals(csk.ehDocname, block.getKey().getKeyBytes())) { - throw new SSKVerifyException("Docname hash mismatch for decoded block"); - } - return ClientSSKBlock.construct(block, csk); - } - - /** - * Attempts to match the provided node key against watched key lists. - * - * @param key node key to match; must not be null - * @param lastSlot last known good edition used to prune stale lists - * @return matched edition number, or {@code -1} when no match is found - */ - public synchronized long match(NodeSSK key, long lastSlot) { - if (LOG.isDebugEnabled()) - LOG.debug("Trying to match {} from slot {} for {}", key, lastSlot, origUSK); - long ret = fromLastKnownSlot.match(key, lastSlot); - if (ret != -1) return ret; - - for (Iterator> it = fromSubscribers.entrySet().iterator(); - it.hasNext(); ) { - Entry entry = it.next(); - long l = entry.getKey(); - if (l <= lastSlot) it.remove(); - ret = entry.getValue().match(key, l); - if (ret != -1) return ret; - } - return -1; - } + attempts.reloadPollParameters(); } /** @@ -3137,54 +1373,22 @@ public synchronized long match(NodeSSK key, long lastSlot) { * *

Hints greater than the current last-known slot are remembered and may expand the search * window. Duplicate or stale hints are ignored. This method does not trigger immediate network - * activity; it only updates the internal watch list used for subsequent scheduling rounds. + * activity; it only updates the internal watch list used for later scheduling rounds. * - * @param suggestedEdition the edition number to add as a hint; must be greater than the last + * @param suggestedEdition edition number to add as a hint; must be greater than the last * looked-up slot to have any effect */ public void addHintEdition(long suggestedEdition) { watchingKeys.addHintEdition(suggestedEdition, uskManager.lookupLatestSlot(origUSK)); } - /** Describes a specific edition lookup and its derived key. */ - private class Lookup { - /** Edition value represented by this lookup. */ - long val; - - /** Client SSK key derived for the edition. */ - ClientSSK key; - - /** Whether this lookup should bypass store checks. */ - boolean ignoreStore; - - /** Creates an empty lookup descriptor. */ - Lookup() {} - - @Override - public boolean equals(Object o) { - if (o instanceof Lookup lookup) { - return lookup.val == val; - } else return false; - } - - @Override - public int hashCode() { - return Long.hashCode(val); - } - - @Override - public String toString() { - return origUSK + ":" + val; - } - } - /** * Resumes the request after a restart. * *

USKFetcher does not persist across restarts; callers should recreate it via the manager - * instead of resuming. + * instead of resuming. The method exists to satisfy interface requirements and always throws. * - * @param context client context that would be used for resuming; must not be null + * @param context client context that would be used for resuming; must be non-null * @throws UnsupportedOperationException always, because this fetcher is not persistent */ @Override @@ -3195,9 +1399,10 @@ public void onResume(ClientContext context) { /** * Notifies the fetcher that the node is shutting down. * - *

USKFetcher does not persist state, so shutdown handling is not supported. + *

USKFetcher does not persist state, so shutdown handling is not supported. The method exists + * to satisfy interface requirements and always throws. * - * @param context client context associated with shutdown; must not be null + * @param context client context associated with shutdown; must be non-null * @throws UnsupportedOperationException always, because this fetcher is not persistent */ @Override diff --git a/src/main/java/network/crypta/client/async/USKKeyWatchSet.java b/src/main/java/network/crypta/client/async/USKKeyWatchSet.java new file mode 100644 index 0000000000..934f786f6c --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKKeyWatchSet.java @@ -0,0 +1,1137 @@ +package network.crypta.client.async; + +import java.lang.ref.WeakReference; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.Random; +import java.util.TreeMap; +import java.util.TreeSet; +import network.crypta.keys.ClientSSK; +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.Key; +import network.crypta.keys.KeyBlock; +import network.crypta.keys.NodeSSK; +import network.crypta.keys.SSKBlock; +import network.crypta.keys.SSKVerifyException; +import network.crypta.keys.USK; +import network.crypta.support.RemoveRangeArrayList; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Tracks edition windows and lookup plans for a single {@link USK} namespace. + * + *

This watch set aggregates the last known good edition, per-subscriber hints, and persistent + * hints to decide which editions should be fetched immediately and which can be polled in the + * background. It maintains short caches of derived document-name hashes so matches against inbound + * keys and datastore blocks can be resolved without recomputing hashes on each request. State + * evolves as callers report new hints and as successful lookups advance the baseline slot. + * + *

All mutable states are guarded by this instance lock. Callers must acquire the lock on this + * object last and pass in any looked-up values; do not perform external lookups while holding this + * lock. + * + *

    + *
  • Compute fetch and poll plans for upcoming editions. + *
  • Maintain per-subscriber and persistent hint tracking. + *
  • Match keys or blocks to editions using cached hashes. + *
+ */ +final class USKKeyWatchSet { + /** Default number of edition slots probed per lookup window. */ + static final int WATCH_KEYS = 50; + + /** Logger for watch-set diagnostics and trace output. */ + private static final Logger LOG = LoggerFactory.getLogger(USKKeyWatchSet.class); + + /** USK, whose editions are being monitored and expanded into SSK lookups. */ + private final USK origUSK; + + /** Minimum number of failed edition probes to schedule beyond {@code lookedUp}. */ + private final int origMinFailures; + + /** Whether new lookups should be scheduled as background polls instead of immediate fetches. */ + private final boolean backgroundPoll; + + // Common for the whole USK + /** Public key hash for the USK namespace being tracked. */ + private final byte[] pubKeyHash; + + /** Crypto algorithm identifier for derived SSKs. */ + private final byte cryptoAlgorithm; + + // List of slots since the USKManager's current last known good edition. + /** Key list anchored at the last known good slot. */ + private final KeyList fromLastKnownSlot; + + /** Per-subscriber key lists keyed by the hinted edition. */ + private final TreeMap fromSubscribers; + + /** Persistent hint editions that outlive transient subscribers. */ + private final TreeSet persistentHints = new TreeSet<>(); + + /** + * Creates a watch set seeded from the current manager slot and USK hints. + * + *

The constructor initializes the shared hash cache for the last known good edition and + * records the configuration used to plan future lookups. If the USK already suggests an edition + * ahead of {@code lookedUp}, a subscriber list is seeded so that edition is fetched even before + * explicit hint updates arrive. + * + * @param origUSK base USK, whose editions and keys will be tracked, must not be null + * @param lookedUp current best-known slot from the manager; {@code -1} means unknown + * @param origMinFailures minimum number of failed edition probes to schedule past {@code + * lookedUp} + * @param backgroundPoll whether newly scheduled lookups should be polled rather than fetched + */ + USKKeyWatchSet(USK origUSK, long lookedUp, int origMinFailures, boolean backgroundPoll) { + this.origUSK = origUSK; + this.origMinFailures = origMinFailures; + this.backgroundPoll = backgroundPoll; + this.pubKeyHash = origUSK.getPubKeyHash(); + this.cryptoAlgorithm = origUSK.cryptoAlgorithm; + if (LOG.isDebugEnabled()) LOG.debug("Creating KeyList from last known good: {}", lookedUp); + fromLastKnownSlot = new KeyList(lookedUp); + fromSubscribers = new TreeMap<>(); + if (origUSK.suggestedEdition > lookedUp) + fromSubscribers.put(origUSK.suggestedEdition, new KeyList(origUSK.suggestedEdition)); + } + + /** + * Bundles lookup descriptors to fetch immediately and to poll in the background. + * + *

The two arrays represent a single planning cycle produced by {@link #getEditionsToFetch}. + * Callers typically enqueue the {@link #fetch} entries for immediate network fetches and schedule + * {@link #poll} entries for lower-priority background polling. The arrays are immutable snapshots + * of the lists provided to the constructor. + */ + static class ToFetch { + + /** + * Creates a fetch plan from the provided lookup lists. + * + *

The constructor copies the list contents into fixed arrays. The original lists are not + * retained, so callers may continue to mutate them after construction without affecting the + * stored plan. The ordering of entries is preserved from the input lists. + * + * @param toFetch2 lookups to fetch immediately; non-null, in planned execution order + * @param toPoll2 lookups to poll without immediate fetch; non-null, in planned order + */ + public ToFetch(List toFetch2, List toPoll2) { + fetch = toFetch2.toArray(new Lookup[0]); + poll = toPoll2.toArray(new Lookup[0]); + } + + /** + * Lookups to fetch immediately. + * + *

This array represents higher-priority fetches that should be started right away. Entries + * are unique for a given planning cycle and already filtered against the running set. The array + * is owned by this instance and should be treated as read-only by callers. + */ + public final Lookup[] fetch; + + /** + * Lookups to poll in background cycles. + * + *

This array represents lower-priority probes suitable for periodic polling. Entries are + * stable for the planning cycle and already deduplicated against active lookups. The array is + * owned by this instance and should be treated as read-only by callers. + */ + public final Lookup[] poll; + } + + /** + * Builds a plan of editions to fetch immediately and to poll in the background. + * + *

The plan is derived from the last known good slot, active subscriber hints, and optional + * random sampling. The method removes lookups that are already running from the supplied list, so + * callers can reuse that list as a deduplication set. When background polling is enabled, the + * method prefers polling new editions rather than immediate fetches. The returned plan is a + * snapshot; later updates to hints do not retroactively change it. + * + * @param lookedUp current best-known slot from the manager; {@code -1} when unknown + * @param random random source used for optional sampling; must not be null when {@code doRandom} + * @param alreadyRunning lookups already in flight; entries that remain valid are removed in-place + * @param doRandom whether to include randomized probes beyond deterministic windows + * @param isFirstLoop whether this is the first polling loop of a watch cycle + * @return plan containing lookups to fetch immediately and to poll later + */ + public synchronized ToFetch getEditionsToFetch( + long lookedUp, + Random random, + List alreadyRunning, + boolean doRandom, + boolean isFirstLoop) { + + if (LOG.isDebugEnabled()) + LOG.debug("Get editions to fetch, latest slot is {} running is {}", lookedUp, alreadyRunning); + + List toFetch = new ArrayList<>(); + List toPoll = new ArrayList<>(); + + boolean probeFromLastKnownGood = + lookedUp > -1 || (backgroundPoll && !isFirstLoop) || fromSubscribers.isEmpty(); + + if (probeFromLastKnownGood) + fromLastKnownSlot.getNextEditions(toFetch, toPoll, lookedUp, alreadyRunning); + + collectFromSubscribers(lookedUp, toFetch, toPoll, alreadyRunning); + + if (doRandom) { + collectRandomEditions( + probeFromLastKnownGood, lookedUp, random, toFetch, toPoll, alreadyRunning); + } + + return new ToFetch(toFetch, toPoll); + } + + /** + * Collects editions contributed by subscribers into fetch and poll lists. + * + * @param lookedUp current best-known slot from the manager + * @param toFetch destination list for immediate fetches; entries are appended + * @param toPoll destination list for polling attempts; entries are appended + * @param alreadyRunning lookups already in flight; may be modified by this method + */ + private void collectFromSubscribers( + long lookedUp, List toFetch, List toPoll, List alreadyRunning) { + // If we have moved past the origUSK, then clear the KeyList for it. + for (Iterator> it = fromSubscribers.entrySet().iterator(); + it.hasNext(); ) { + Entry entry = it.next(); + long l = entry.getKey() - 1; + if (l <= lookedUp) { + it.remove(); + } + if (l == 0) { + // add a check for edition 0: this happens if -1 is suggested. + // Needed because we cannot set -0 for exhaustive search (-0 == 0 in Java). + entry.getValue().getEditionIfNotAlreadyRunning(toFetch, alreadyRunning, l, false); + } + entry.getValue().getNextEditions(toFetch, toPoll, l - 1, alreadyRunning); + } + } + + /** + * Adds randomized edition probes to the fetch/poll lists. + * + * @param probeFromLastKnownGood whether to seed probe from the last known good slot + * @param lookedUp the current best-known slot used to bias sampling + * @param random random source used to sample editions; must not be null + * @param toFetch destination list for immediate fetches; entries are appended + * @param toPoll destination list for polling attempts; entries are appended + * @param alreadyRunning lookups already in flight; may be modified by this method + */ + private void collectRandomEditions( + boolean probeFromLastKnownGood, + long lookedUp, + Random random, + List toFetch, + List toPoll, + List alreadyRunning) { + // Now getRandomEditions + int runningRandom = countRunningRandom(alreadyRunning, toFetch, toPoll); + + int allowedRandom = 1 + fromSubscribers.size(); + if (LOG.isDebugEnabled()) + LOG.debug( + "Running random requests: {} total allowed: {} looked up is {} for {}", + runningRandom, + allowedRandom, + lookedUp, + origUSK); + + allowedRandom -= runningRandom; + + if (allowedRandom > 0 && probeFromLastKnownGood) { + fromLastKnownSlot.getRandomEditions(toFetch, lookedUp, alreadyRunning, random, 1); + allowedRandom -= 1; + } + + for (Iterator it = fromSubscribers.values().iterator(); + allowedRandom >= 2 && it.hasNext(); ) { + KeyList k = it.next(); + k.getRandomEditions(toFetch, lookedUp, alreadyRunning, random, 1); + allowedRandom -= 1; + } + } + + /** + * Counts random probes that are already running but not in the current plan. + * + * @param alreadyRunning lookups already in flight + * @param toFetch lookups planned for immediate fetch + * @param toPoll lookups planned for polling + * @return number of random probes already running outside the current plan + */ + private static int countRunningRandom( + List alreadyRunning, List toFetch, List toPoll) { + int runningRandom = 0; + for (Lookup l : alreadyRunning) { + if (toFetch.contains(l) || toPoll.contains(l)) continue; + runningRandom++; + } + return runningRandom; + } + + /** + * Reconciles subscriber hints with persisted and derived hints for this watch set. + * + *

The supplied hint array is sorted and deduplicated, then merged with persistent hints and + * the USK's suggested edition when it is still ahead of {@code lookedUp}. Any hints at or below + * the current slot are discarded. The subscriber map is then updated to reflect the surviving + * hints, creating or removing {@link KeyList} instances as needed. + * + * @param hints latest subscriber hint values; non-null, may contain duplicates + * @param lookedUp current best-known slot used to discard stale hints and prune lists + */ + public synchronized void updateSubscriberHints(Long[] hints, long lookedUp) { + List surviving = collectSurvivingHints(hints, lookedUp); + mergePersistentHints(surviving, lookedUp); + ensureSuggestedEditionIncluded(surviving, lookedUp); + reconcileSubscribersWithSurviving(surviving); + } + + /** + * Filters subscriber hints to those that remain relevant beyond {@code lookedUp}. + * + * @param hints subscriber hint values to filter; must not be null + * @param lookedUp current best-known slot used as a cutoff + * @return list of surviving hints in ascending order + */ + private static List collectSurvivingHints(Long[] hints, long lookedUp) { + List surviving = new ArrayList<>(); + Arrays.sort(hints); + long prev = -1; + for (Long hint : hints) { + if (hint <= lookedUp) { + prev = hint; + } else if (hint != prev) { + surviving.add(hint); + prev = hint; + } + } + return surviving; + } + + /** + * Merges persistent hints into the surviving list while dropping stale entries. + * + * @param surviving list of surviving hints to update; must not be null + * @param lookedUp current best-known slot used to drop stale hints + */ + private void mergePersistentHints(List surviving, long lookedUp) { + for (Iterator i = persistentHints.iterator(); i.hasNext(); ) { + Long hint = i.next(); + if (hint <= lookedUp) { + i.remove(); + } + if (surviving.contains(hint)) continue; + surviving.add(hint); + } + } + + /** + * Ensures the USK's suggested edition is present when it is still ahead. + * + * @param surviving list of surviving hints to update; must not be null + * @param lookedUp current best-known slot used as a cutoff + */ + private void ensureSuggestedEditionIncluded(List surviving, long lookedUp) { + if (origUSK.suggestedEdition > lookedUp && !surviving.contains(origUSK.suggestedEdition)) + surviving.add(origUSK.suggestedEdition); + } + + /** + * Reconciles the subscriber map to match the surviving hints list. + * + * @param surviving list of surviving hint editions; must not be null + */ + private void reconcileSubscribersWithSurviving(List surviving) { + for (Iterator it = fromSubscribers.keySet().iterator(); it.hasNext(); ) { + Long l = it.next(); + if (surviving.contains(l)) continue; + it.remove(); + } + for (Long l : surviving) { + if (fromSubscribers.containsKey(l)) continue; + fromSubscribers.put(l, new KeyList(l)); + } + } + + /** + * Adds a persistent hint edition that is ahead of the current lookup. + * + *

The hint is stored in the persistent set so it survives transient subscribers. If the hint + * is new and still ahead of {@code lookedUp}, a {@link KeyList} is created to schedule fetches + * for that edition. Hints at or behind the current slot are ignored. + * + * @param suggestedEdition edition number to add; must be greater than {@code lookedUp} + * @param lookedUp the current best-known slot used to ignore stale hints + */ + public synchronized void addHintEdition(long suggestedEdition, long lookedUp) { + if (suggestedEdition <= lookedUp) return; + if (!persistentHints.add(suggestedEdition)) return; + if (fromSubscribers.containsKey(suggestedEdition)) return; + fromSubscribers.put(suggestedEdition, new KeyList(suggestedEdition)); + } + + /** + * Estimates the number of watched keys based on the current subscriber state. + * + *

The returned value multiplies the configured watch window by the number of active subscriber + * lists, plus the base watch list. The estimate does not account for overlapping editions across + * lists, so callers should treat it as an upper bound for scheduling heuristics. + * + * @return estimated count of watched keys for scheduling and load decisions + */ + public synchronized long size() { + return WATCH_KEYS + (long) fromSubscribers.size() * WATCH_KEYS; // Note: does not account for + // overlap + } + + /** + * Builds datastore sub-checkers for the current watch lists. + * + *

The method creates sub-checkers that cover a window of {@link #WATCH_KEYS} editions for the + * last known good slot and any subscriber-provided hints. Each sub-checker encapsulates the set + * of {@link NodeSSK} keys that should be checked in the datastore. When no checks are required, + * the method returns {@code null} to avoid unnecessary work. + * + * @param lastSlot the last known good edition used to seed checks and prune stale lists + * @return datastore sub-checkers to run, or {@code null} when no checks are required + */ + public synchronized List getDatastoreCheckers(long lastSlot) { + // Check WATCH_KEYS from last known good slot. + // Note: does not currently take origUSK or subscribers into account. + if (LOG.isDebugEnabled()) + LOG.debug("Getting datastore checker from {} for {}", lastSlot, origUSK); + List checkers = new ArrayList<>(); + KeyList.StoreSubChecker c = fromLastKnownSlot.checkStore(lastSlot + 1); + if (c != null) checkers.add(c); + // If we have moved past the origUSK, then clear the KeyList for it. + for (Iterator> it = fromSubscribers.entrySet().iterator(); + it.hasNext(); ) { + Entry entry = it.next(); + long l = entry.getKey(); + if (l <= lastSlot) it.remove(); + c = entry.getValue().checkStore(l); + if (c != null) checkers.add(c); + } + return checkers.isEmpty() ? null : checkers; + } + + /** + * Decodes a low-level {@link SSKBlock} into a client-level block for the given edition. + * + *

The method derives the expected {@link ClientSSK} from the USK and verifies that the + * document-name hash in the block matches the derived value. On success, the block is wrapped in + * a {@link ClientSSKBlock} for higher-level consumers. Verification is strict and will throw when + * the block does not correspond to the expected edition. + * + * @param block low-level block to decode; must not be null and must be an SSK block + * @param edition edition number that the block is expected to represent + * @return decoded client block for the edition, ready for higher-level processing + * @throws SSKVerifyException if the block does not match the expected document-name hash + */ + public ClientSSKBlock decode(SSKBlock block, long edition) throws SSKVerifyException { + ClientSSK csk = origUSK.getSSK(edition); + if (!Arrays.equals(csk.ehDocname, block.getKey().getKeyBytes())) { + throw new SSKVerifyException("Docname hash mismatch for decoded block"); + } + return ClientSSKBlock.construct(block, csk); + } + + /** + * Attempts to match the provided node key against watched key lists. + * + *

The method checks the base watch list anchored at the last known good slot and then scans + * any subscriber-provided lists. Subscriber lists whose edition anchors are at or behind {@code + * lastSlot} are discarded as stale. Matching is performed against cached document-name hashes and + * returns the edition number when the key corresponds to a watched slot. + * + * @param key node key to match; must not be null and must belong to the same USK + * @param lastSlot the last known good edition used to prune stale lists and bound matching + * @return matched edition number, or {@code -1} when no match is found + */ + public synchronized long match(NodeSSK key, long lastSlot) { + if (LOG.isDebugEnabled()) + LOG.debug("Trying to match {} from slot {} for {}", key, lastSlot, origUSK); + long ret = fromLastKnownSlot.match(key, lastSlot); + if (ret != -1) return ret; + + for (Iterator> it = fromSubscribers.entrySet().iterator(); + it.hasNext(); ) { + Entry entry = it.next(); + long l = entry.getKey(); + if (l <= lastSlot) it.remove(); + ret = entry.getValue().match(key, l); + if (ret != -1) return ret; + } + return -1; + } + + /** + * Reports whether a key is definitely wanted by this watch set. + * + *

The check is strict: the key must be a {@link NodeSSK} that shares the USK public key hash + * and must match one of the currently watched editions. When a match is found, the supplied + * {@code progressPriority} is returned so callers can preserve their scheduling class. + * + * @param key candidate key to evaluate; must not be null and must be a {@link NodeSSK} + * @param lastSlot the last known good edition used to bound the match + * @param progressPriority priority class to return on match + * @return priority class when wanted, or {@code -1} when not wanted + */ + public short definitelyWantKey(Key key, long lastSlot, short progressPriority) { + if (!(key instanceof NodeSSK k)) return -1; + if (!origUSK.samePubKeyHash(k)) return -1; + synchronized (this) { + if (match(k, lastSlot) != -1) return progressPriority; + } + return -1; + } + + /** + * Reports whether a key is probably wanted by this watch set. + * + *

This check is a softer version of {@link #definitelyWantKey(Key, long, short)} and returns + * only a boolean. The key must be a {@link NodeSSK} for the same USK and must match a watched + * edition. The result reflects the current watch lists and may change as hints are updated. + * + * @param key candidate key to evaluate; must not be null and must be a {@link NodeSSK} + * @param lastSlot the last known good edition used to bound the match + * @return {@code true} if the key appears relevant, {@code false} otherwise + */ + @SuppressWarnings("unused") + public boolean probablyWantKey(Key key, long lastSlot) { + if (!(key instanceof NodeSSK k)) return false; + if (!origUSK.samePubKeyHash(k)) return false; + synchronized (this) { + return match(k, lastSlot) != -1; + } + } + + /** + * Attempts to match and decode a found block against the watch lists. + * + *

The method first verifies that the incoming key and block are of the SSK type, then attempts + * to match the key against the watched editions. If a match is found, the block is decoded and + * verified against the expected document-name hash for that edition. Verification failures return + * a {@link MatchedBlock} with a {@code null} payload to indicate the match but failed to decode. + * + * @param key key associated with the found block; must be a {@link NodeSSK} + * @param found block returned from the datastore; must be an {@link SSKBlock} + * @param lastSlot the last known good edition used to bound the match + * @return a matched block result, or {@code null} when no match was found + */ + public MatchedBlock matchBlock(Key key, KeyBlock found, long lastSlot) { + if (!(found instanceof SSKBlock sskBlock)) return null; + if (!(key instanceof NodeSSK)) return null; + long edition; + synchronized (this) { + edition = match((NodeSSK) key, lastSlot); + } + if (edition == -1) return null; + if (LOG.isDebugEnabled()) LOG.debug("Matched edition {} for {}", edition, origUSK); + + ClientSSKBlock data; + try { + data = decode(sskBlock, edition); + } catch (SSKVerifyException _) { + data = null; + } + return new MatchedBlock(edition, data); + } + + /** + * Describes a matched block and its resolved edition number. + * + * @param edition resolved edition value that matched the watch list + * @param block decoded client block, or {@code null} when verification failed + */ + record MatchedBlock(long edition, ClientSSKBlock block) {} + + /** + * Caches derived document-name hashes for a sliding window of editions. + * + *

Each {@code KeyList} is anchored at a specific base edition and maintains a fixed-size + * window of {@link #WATCH_KEYS} hashes derived from the owning USK. The cache is stored in a weak + * reference, so it can be reclaimed when memory is tight, with regeneration on demand. The list + * is used to match incoming {@link NodeSSK} keys or to build datastore checkers without + * recomputing hashes for every request. + */ + class KeyList { + + /** + * USK edition number represented by cache index 0. + * + *

This value advances as the cache is realigned to newer base editions. It is always greater + * than or equal to zero and acts as the base offset for indexing into {@link #cache}. + */ + long firstSlot; + + /** + * Weakly referenced cache of document-name hashes for each watched slot. + * + *

The list contains {@code WATCH_KEYS} entries whenever populated. It can be cleared by the + * garbage collector, in which case it is regenerated on the next access. + */ + private WeakReference> cache; + + /** + * The lowest edition for which datastore checks have been confirmed. + * + *

Initialized to {@code -1} to represent "unchecked". Updated as sub-checkers report + * completion in {@link StoreSubChecker#checked()}. + */ + private long checkedDatastoreFrom = -1; + + /** + * The highest edition (exclusive) for which datastore checks have been confirmed. + * + *

Initialized to {@code -1} to represent "unchecked". Updated as sub-checkers report + * completion in {@link StoreSubChecker#checked()}. + */ + private long checkedDatastoreTo = -1; + + /** + * Creates a key list anchored at the provided slot. + * + *

The cache window is initialized immediately with {@link #WATCH_KEYS} hashes derived from + * the USK. The window can later be realigned as newer base editions are reported, preserving + * any overlapping entries when possible. + * + * @param slot the first slot to include in the cache; must be zero or higher + */ + public KeyList(long slot) { + if (LOG.isDebugEnabled()) + LOG.debug("Creating KeyList from {} on {} {}", slot, origUSK, this, new Exception("debug")); + firstSlot = slot; + RemoveRangeArrayList ehDocnames = new RemoveRangeArrayList<>(WATCH_KEYS); + cache = new WeakReference<>(ehDocnames); + generate(firstSlot, WATCH_KEYS, ehDocnames); + } + + /** + * Adds the next set of editions to either {@code toFetch} or {@code toPoll}. + * + *

The method advances forward from {@code lookedUp}, scheduling up to {@code + * origMinFailures} editions. Already-running lookups are removed from {@code alreadyRunning} to + * avoid duplicate scheduling. When background polling is enabled, the editions are appended to + * the poll list instead of the immediate fetch list. + * + * @param toFetch destination list for editions that should be fetched immediately when not in + * background polling mode; entries are appended, not cleared + * @param toPoll destination list for editions that should be polled (no immediate fetch) when + * in background polling mode; entries are appended, not cleared + * @param lookedUp current best known slot (edition) used as a base for computing the next + * candidate editions; values below zero are treated as zero + * @param alreadyRunning list of lookups currently in progress; this method removes any edition + * that remains valid so it is not scheduled twice + */ + public synchronized void getNextEditions( + List toFetch, List toPoll, long lookedUp, List alreadyRunning) { + if (LOG.isDebugEnabled()) LOG.debug("Getting next editions from {}", lookedUp); + if (lookedUp < 0) lookedUp = 0; + for (int i = 1; i <= origMinFailures; i++) { + long ed = i + lookedUp; + if (backgroundPoll) { + getEditionIfNotAlreadyRunning(toPoll, alreadyRunning, ed, true); + } else { + getEditionIfNotAlreadyRunning(toFetch, alreadyRunning, ed, true); + } + } + } + + /** + * Adds an edition lookup if it is not already running. + * + *

The lookup is deduplicated against both the target list and the already-running list. If a + * matching lookup is found in {@code alreadyRunning}, it is removed and no new entry is added. + * The resulting {@link Lookup} contains the derived {@link ClientSSK} key for the edition. + * + * @param lookupList destination list for new lookups; entries are appended in order + * @param alreadyRunning list of lookups already in progress; this method removes matches + * @param ed edition number to add as a lookup candidate + * @param ignoreStore whether this lookup should bypass store checks + * @return {@code true} when the edition was added, {@code false} when deduplicated + */ + public boolean getEditionIfNotAlreadyRunning( + List lookupList, List alreadyRunning, long ed, boolean ignoreStore) { + Lookup l = new Lookup(); + l.val = ed; + l.label = origUSK.toString(); + if (lookupList.contains(l)) { + if (LOG.isTraceEnabled()) LOG.trace("Ignoring {}", l); + return false; + } + if (alreadyRunning.remove(l)) { + if (LOG.isTraceEnabled()) LOG.trace("Ignoring (2): {}", l); + return false; + } + ClientSSK key; + // Note: consider reusing ehDocnames where possible + // The problem is we need a ClientSSK for the high level stuff. + key = origUSK.getSSK(ed); + l.key = key; + l.ignoreStore = ignoreStore; + if (lookupList.contains(l)) { + if (LOG.isTraceEnabled()) LOG.trace("Ignoring (3): {}", l); + return false; + } + return lookupList.add(l); + } + + /** + * Adds random edition probes to the provided list. + * + *

The method samples future editions using {@link #sampleGeometric(long, Random)} and adds + * them to {@code toFetch} until {@code allowed} entries are accepted. Each sampled edition is + * deduplicated against the running set. The random probes help catch up to fast-moving editions + * without needing to scan every intermediate slot. + * + * @param toFetch destination list for random probes; entries are appended + * @param lookedUp current best-known slot used as a base for sampling + * @param alreadyRunning list of lookups already in progress; used for deduplication + * @param random random source used for sampling; must not be null + * @param allowed maximum number of random editions to add + */ + public synchronized void getRandomEditions( + List toFetch, + long lookedUp, + List alreadyRunning, + Random random, + int allowed) { + // Then add a couple of random editions for catch-up. + long baseEdition = lookedUp + origMinFailures; + for (int i = 0; i < allowed; i++) { + while (true) { // Note: consider switching to limited for-loop to ensure there can be no + // infinite loop + long fetch = sampleGeometric(baseEdition, random); + if (tryAddRandomEdition(toFetch, lookedUp, alreadyRunning, fetch)) break; + } + } + } + + /** + * Samples a future edition using a geometric distribution. + * + *

The sampling uses a mix of means to bias toward nearer editions while still allowing + * larger jumps. The returned edition is always greater than or equal to {@code baseEdition}. + * + * @param baseEdition base edition offset for sampling; must be zero or higher + * @param random random source used to sample; must not be null + * @return sampled edition number at or above {@code baseEdition} + */ + private static long sampleGeometric(long baseEdition, Random random) { + // Geometric distribution. + // 20% chance of mean 100, 80% chance of mean 10. Thanks evanbd. + while (true) { + int mean = random.nextInt(5) == 0 ? 100 : 10; + double u = uniform01FromLong(random); + long fetch = baseEdition + (long) Math.floor(Math.log(u) / Math.log(1.0 - 1.0 / mean)); + if (fetch >= baseEdition) return fetch; + } + } + + /** + * Creates a uniform random value in (0,1] using {@link Random#nextLong()}. + * + *

The helper converts the positive {@code long} range into a floating-point value in the + * open interval (0,1]. It never returns zero, which avoids taking {@code log(0)} when sampling. + * + * @param random random source used for sampling; must not be null + * @return uniform value in the open interval (0,1] + */ + private static double uniform01FromLong(Random random) { + long bits = random.nextLong() & Long.MAX_VALUE; // 0 .. 2^63-1 + return (bits + 1.0) / (Long.MAX_VALUE + 1.0); + } + + /** + * Attempts to add a random edition if it is not already scheduled. + * + *

The lookup is deduplicated against the running set and uses the {@code ignoreStore} flag + * when the sampled edition is close enough to {@code lookedUp}. The method logs diagnostic + * information when debug logging is enabled. + * + * @param toFetch destination list for random probes; entries are appended + * @param lookedUp current best-known slot used for range decisions + * @param alreadyRunning list of lookups already in progress; used for deduplication + * @param fetch sampled edition to add + * @return {@code true} when the edition was added to the fetch list + */ + private boolean tryAddRandomEdition( + List toFetch, long lookedUp, List alreadyRunning, long fetch) { + if (LOG.isDebugEnabled()) + LOG.debug( + "Trying random future edition {} for {} current edition {}", fetch, origUSK, lookedUp); + return getEditionIfNotAlreadyRunning( + toFetch, alreadyRunning, fetch, (fetch - lookedUp) < WATCH_KEYS); + } + + /** + * Represents a sub-range of datastore keys to check. + * + *

The sub-checker encapsulates a contiguous range of editions and the corresponding {@link + * NodeSSK} keys. Once the caller verifies those keys against the datastore, it should invoke + * {@link #checked()} to update the parent {@link KeyList} state. + */ + public class StoreSubChecker { + + /** + * Keys to check in the datastore for this range. + * + *

The array is ordered by increasing edition and is owned by the sub-checker. + */ + final NodeSSK[] keysToCheck; + + /** + * The edition from which the datastore will be checked after execution. + * + *

This value is inclusive and marks the start of the checked range. + */ + private final long checkedFrom; + + /** + * The edition up to which the datastore will be checked after execution. + * + *

This value is exclusive and marks the end of the checked range. + */ + private final long checkedTo; + + /** + * Creates a sub-checker for a contiguous range of editions. + * + *

The caller is responsible for running datastore checks for each key in {@code + * keysToCheck} and then calling {@link #checked()} to advance the cached datastore bounds. + * + * @param keysToCheck node keys to check; must not be null and in ascending edition order + * @param checkFrom starting edition of the range, inclusive + * @param checkTo ending edition of the range, exclusive + */ + private StoreSubChecker(NodeSSK[] keysToCheck, long checkFrom, long checkTo) { + this.keysToCheck = keysToCheck; + this.checkedFrom = checkFrom; + this.checkedTo = checkTo; + if (LOG.isDebugEnabled()) + LOG.debug( + "Checking datastore from {} to {} for {} on {}", checkFrom, checkTo, origUSK, this); + } + + /** + * Marks this checker as completed and updates datastore bounds. + * + *

The method updates the parent {@link KeyList} with the completed range. It keeps the + * existing lower bound if it already covers {@code checkedFrom}, but always advances the + * upper bound to {@code checkedTo}. Callers should invoke this once per sub-checker after all + * keys have been verified. + */ + void checked() { + synchronized (KeyList.this) { + // Update the start bound only when the previous range does not already cover it. + if (!(checkedDatastoreTo >= checkedFrom && checkedDatastoreFrom <= checkedFrom)) { + checkedDatastoreFrom = checkedFrom; + } + checkedDatastoreTo = checkedTo; + if (LOG.isDebugEnabled()) + LOG.debug( + "Checked from {} to {} (now overall is {} to {}) for {}", + checkedFrom, + checkedTo, + checkedDatastoreFrom, + checkedDatastoreTo, + origUSK); + } + } + } + + /** + * Builds a datastore checker for a window of slots starting at {@code lastSlot}. + * + *

The checker describes a contiguous range of editions beginning at {@code lastSlot} and + * spanning up to {@link #WATCH_KEYS} entries. The method reuses cached hashes whenever possible + * and skips work already covered by prior datastore checks. When no new range remains, the + * method returns {@code null}. + * + * @param lastSlot starting edition to check from; values below zero are treated as zero + * @return a sub-checker describing keys to check, or {@code null} when no work is needed + */ + public synchronized StoreSubChecker checkStore(long lastSlot) { + if (LOG.isDebugEnabled()) + LOG.debug("check store from {} current first slot {}", lastSlot, firstSlot); + long checkFrom = lastSlot; + long checkTo = lastSlot + WATCH_KEYS; + if (checkedDatastoreTo >= checkFrom) { + checkFrom = checkedDatastoreTo; + } + if (checkFrom >= checkTo) return null; // Nothing to check. + // Update the cache. + RemoveRangeArrayList ehDocnames = updateCache(lastSlot); + // Now create NodeSSK[] from the part of the cache that + // ehDocnames[0] is firstSlot + // ehDocnames[checkFrom-firstSlot] is checkFrom + int offset = (int) (checkFrom - firstSlot); + NodeSSK[] keysToCheck = new NodeSSK[WATCH_KEYS - offset]; + for (int x = 0, i = offset; i < WATCH_KEYS; i++, x++) { + keysToCheck[x] = new NodeSSK(pubKeyHash, ehDocnames.get(i), cryptoAlgorithm); + } + return new StoreSubChecker(keysToCheck, checkFrom, checkTo); + } + + /** + * Updates the cached document-name hashes based on a new base edition. + * + *

The cache is regenerated if it has been reclaimed by the garbage collector. Otherwise, the + * existing list is realigned to {@code curBaseEdition} by trimming or extending entries as + * needed. The returned cache is always populated with {@link #WATCH_KEYS} entries. + * + * @param curBaseEdition base edition used to realign the cache + * @return updated cache containing hashes for the current window + */ + synchronized RemoveRangeArrayList updateCache(long curBaseEdition) { + if (LOG.isDebugEnabled()) + LOG.debug("update cache from {} current first slot {}", curBaseEdition, firstSlot); + RemoveRangeArrayList ehDocnames; + if (cache == null || (ehDocnames = cache.get()) == null) { + ehDocnames = new RemoveRangeArrayList<>(WATCH_KEYS); + cache = new WeakReference<>(ehDocnames); + firstSlot = curBaseEdition; + if (LOG.isDebugEnabled()) LOG.debug("Regenerating because lost cached keys"); + generate(firstSlot, WATCH_KEYS, ehDocnames); + return ehDocnames; + } + match(null, curBaseEdition, ehDocnames); + return ehDocnames; + } + + /** + * Updates the cache if needed and attempts to match the provided key. + * + *

If the cache is missing, it is regenerated for {@code curBaseEdition}. Otherwise, the + * method checks the current cache first and only performs a realignment when needed. A {@code + * null} key skips matching and simply ensures the cache is aligned. + * + * @param key key to match, or {@code null} to only update the cache + * @param curBaseEdition new base edition used to realign the cache + * @return edition number for the key, or {@code -1} when not matched + */ + public synchronized long match(NodeSSK key, long curBaseEdition) { + if (LOG.isDebugEnabled()) + LOG.debug("match from {} current first slot {}", curBaseEdition, firstSlot); + RemoveRangeArrayList ehDocnames; + if (cache == null || (ehDocnames = cache.get()) == null) { + ehDocnames = new RemoveRangeArrayList<>(WATCH_KEYS); + cache = new WeakReference<>(ehDocnames); + firstSlot = curBaseEdition; + generate(firstSlot, WATCH_KEYS, ehDocnames); + return key == null ? -1 : innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); + } + // Might as well check first. + long x = innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); + if (x != -1) return x; + return match(key, curBaseEdition, ehDocnames); + } + + /** + * Updates the cache for a new base edition and matches only the changed segments. + * + *

This helper avoids rechecking the entire cache by updating only the sections that changed + * due to the base edition moving forward or backward. When the base edition regresses, the + * cache is left intact and matching uses the existing window. + * + * @param key key to match; may be {@code null} to skip matching + * @param curBaseEdition edition to align the cache with + * @param ehDocnames cached document-name hashes to update + * @return edition number for the key, or {@code -1} when not matched + */ + private long match(NodeSSK key, long curBaseEdition, RemoveRangeArrayList ehDocnames) { + if (LOG.isDebugEnabled()) + LOG.debug( + "Matching {} cur base edition {} first slot was {} for {} on {}", + key, + curBaseEdition, + firstSlot, + origUSK, + this); + if (firstSlot < curBaseEdition) { + return handleFirstSlotBehind(key, curBaseEdition, ehDocnames); + } else if (firstSlot > curBaseEdition) { + return handleFirstSlotAhead(key, ehDocnames, curBaseEdition); + } + return -1; + } + + /** + * Handles the case where {@code firstSlot} is behind the new base edition. + * + *

If the new base edition is beyond the cached window, the cache is rebuilt from scratch. If + * there is overlap, the cache is trimmed at the front and extended at the end. Matching is + * limited to the updated window when a key is provided. + * + * @param key key to match; may be {@code null} to skip matching + * @param curBaseEdition new base edition + * @param ehDocnames cached document-name hashes to update + * @return edition number for the key, or {@code -1} when not matched + */ + private long handleFirstSlotBehind( + NodeSSK key, long curBaseEdition, RemoveRangeArrayList ehDocnames) { + if (firstSlot + ehDocnames.size() <= curBaseEdition) { + // No overlap. Clear it and start again. + ehDocnames.clear(); + firstSlot = curBaseEdition; + generate(curBaseEdition, WATCH_KEYS, ehDocnames); + return key == null ? -1 : innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); + } else { + // There is some overlap. Delete the first part of the array, then add stuff at the end. + // ehDocnames[i] is slot firstSlot + i + // We want to get rid of anything before curBaseEdition + // So the first slot that is useful is the slot at i = curBaseEdition - firstSlot + // Which is the new [0], whose edition is curBaseEdition + ehDocnames.removeRange(0, (int) (curBaseEdition - firstSlot)); + int size = ehDocnames.size(); + firstSlot = curBaseEdition; + generate(curBaseEdition + size, WATCH_KEYS - size, ehDocnames); + return key == null ? -1 : innerMatch(key, ehDocnames, WATCH_KEYS - size, size, firstSlot); + } + } + + /** + * Handles the case where {@code firstSlot} is ahead of the new base edition. + * + *

The method treats the regression as a transient condition and continues to use the current + * cache window. Matching is therefore performed against the existing cache rather than + * rebuilding it for the older base edition. + * + * @param key key to match; may be {@code null} to skip matching + * @param ehDocnames cached document-name hashes to consult + * @param curBaseEdition new base edition that lags behind {@code firstSlot} + * @return edition number for the key, or {@code -1} when not matched + */ + private long handleFirstSlotAhead( + NodeSSK key, RemoveRangeArrayList ehDocnames, long curBaseEdition) { + // Normal due to race conditions. We don't always report the new edition to the USKManager + // immediately. + // So ignore it. + if (LOG.isTraceEnabled()) + LOG.trace("Ignoring regression in match() from {} to {}", curBaseEdition, firstSlot); + return key == null ? -1 : innerMatch(key, ehDocnames, 0, ehDocnames.size(), firstSlot); + } + + /** + * Matches a key against a slice of the cached hash list. + * + *

The method compares the key's bytes against the cached hash window between {@code offset} + * and {@code offset + size}. It returns the edition number derived from {@code firstSlot} when + * a match is found. The scan is linear over the specified slice. + * + * @param key key to match; must not be null + * @param ehDocnames cached document-name hashes to scan + * @param offset start offset within the cache + * @param size number of entries to scan + * @param firstSlot edition represented by cache index 0 + * @return matched edition number, or {@code -1} when not found + */ + private long innerMatch( + NodeSSK key, + RemoveRangeArrayList ehDocnames, + int offset, + int size, + long firstSlot) { + byte[] data = key.getKeyBytes(); + for (int i = offset; i < (offset + size); i++) { + if (Arrays.equals(data, ehDocnames.get(i))) { + if (LOG.isDebugEnabled()) LOG.debug("Found edition {} for {}", firstSlot + i, origUSK); + return firstSlot + i; + } + } + return -1; + } + + /** + * Appends a series of document-name hashes to the cache. + * + *

The method derives {@link ClientSSK} instances for each edition starting at {@code + * baseEdition} and appends their document-name hashes to {@code ehDocnames}. The caller is + * responsible for ensuring the cache size does not exceed {@link #WATCH_KEYS}. + * + * @param baseEdition edition to start from + * @param keys number of keys to add + * @param ehDocnames cache to append to; must not be null + */ + private void generate(long baseEdition, int keys, RemoveRangeArrayList ehDocnames) { + if (LOG.isDebugEnabled()) LOG.debug("generate() from {} for {}", baseEdition, origUSK); + assert (baseEdition >= 0); + for (int i = 0; i < keys; i++) { + long ed = baseEdition + i; + ehDocnames.add(origUSK.getSSK(ed).ehDocname); + } + } + } + + /** + * Describes a specific edition lookup and its derived key. + * + *

Lookup instances are value-like and are considered equal based on their edition value. + * Callers populate {@link #key} and {@link #ignoreStore} when scheduling network fetches or + * datastore checks. The {@link #label} is used for log output only and may be null. + */ + static class Lookup { + /** + * Edition value represented by this lookup. + * + *

Equality and hashing are based solely on this value. + */ + long val; + + /** + * Client SSK key derived for the edition. + * + *

Set when the lookup is scheduled so callers can initiate fetches without recomputing. + */ + ClientSSK key; + + /** + * Whether this lookup should bypass store checks. + * + *

When {@code true}, the lookup is intended for direct fetch without checking the datastore. + */ + boolean ignoreStore; + + /** + * Descriptive label for logging, usually the owning USK. + * + *

This field is optional and may be {@code null}. + */ + String label; + + /** + * Creates an empty lookup descriptor. + * + *

Fields are populated by the scheduling methods that construct lookups. + */ + Lookup() {} + + @Override + public boolean equals(Object o) { + if (o instanceof Lookup lookup) { + return lookup.val == val; + } else return false; + } + + @Override + public int hashCode() { + return Long.hashCode(val); + } + + @Override + public String toString() { + return (label == null ? "?" : label) + ":" + val; + } + } +} diff --git a/src/main/java/network/crypta/client/async/USKPollingRound.java b/src/main/java/network/crypta/client/async/USKPollingRound.java new file mode 100644 index 0000000000..761e32c6fb --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKPollingRound.java @@ -0,0 +1,277 @@ +package network.crypta.client.async; + +import network.crypta.keys.USK; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Encapsulates completion checks and background rescheduling for a single USK polling round. + * + *

This helper owns the lightweight state that bridges store checks, polling attempts, and + * subscriber notifications while a {@link USKFetcher} progresses through one background polling + * cycle. It evaluates whether all required checks have finished, ensures that attempts have cooled + * down at least once, and emits progress callbacks when a round can be treated as finished for now. + * It also tracks the current backoff interval and decides when to reset the backoff based on + * observed progress in the manager. + * + *

The instance is mutable and not internally synchronized; callers are expected to invoke its + * methods from a scheduling thread or otherwise serialize access. Each instance is scoped to a + * single fetcher and USK, and it is typically reused across multiple scheduling ticks until the + * polling cycle completes. + * + *

    + *
  • Checks whether datastore scans, random probes, and DBR hints have settled. + *
  • Notifies {@link USKProgressCallback} subscribers when a round becomes idle. + *
  • Computes exponential backoff delays with capped sleep times. + *
+ */ +final class USKPollingRound { + /** Logger for debugging and lifecycle diagnostics. */ + private static final Logger LOG = LoggerFactory.getLogger(USKPollingRound.class); + + /** Coordinates and tracks in-flight polling attempts. */ + private final USKAttemptManager attempts; + + /** Runs datastore check cycles before scheduling attempts. */ + private final USKStoreCheckCoordinator storeChecks; + + /** Tracks date-based hint fetches that gate polling completion. */ + private final USKDateHintFetches dbrHintFetches; + + /** Provides a stable snapshot of subscribed callbacks. */ + private final USKSubscriberRegistry subscribers; + + /** Manager used to query the latest known slots. */ + private final USKManager uskManager; + + /** Base USK that is being polled by this round. */ + private final USK origUSK; + + /** Indicates whether scheduling is biased for real-time activity. */ + private final boolean realTimeFlag; + + /** Baseline sleep duration restored when progress is detected, in milliseconds. */ + private final long origSleepTime; + + /** Maximum sleep duration allowed during backoff, in milliseconds. */ + private final long maxSleepTime; + + /** Current sleep duration used for the next backoff interval, in milliseconds. */ + private long sleepTime; + + /** Tracks whether the round has completed its initial loop. */ + private boolean firstLoop; + + /** + * Creates a polling round helper for a single fetcher cycle. + * + *

The helper keeps references to stable collaborators from {@code context} and stores the + * current backoff window and loop state. The initial sleep time is used for the first delay and + * is later doubled (with a cap) until progress is observed. The baseline and maximum sleep times + * are retained so that backoff resets can restore the original interval without consulting the + * caller again. + * + * @param context shared collaborators used to resolve attempts, store checks, and subscribers; + * must be non-null and scoped to a single fetcher + * @param sleepTime initial backoff delay in milliseconds for the first rescheduling attempt + * @param firstLoop whether the round should treat the next scheduling step as the initial loop + * @param origSleepTime baseline delay in milliseconds to restore when progress is observed + * @param maxSleepTime upper bound in milliseconds for exponential backoff delays + */ + USKPollingRound( + USKPollingRoundContext context, + long sleepTime, + boolean firstLoop, + long origSleepTime, + long maxSleepTime) { + this.attempts = context.attempts(); + this.storeChecks = context.storeChecks(); + this.dbrHintFetches = context.dbrHintFetches(); + this.subscribers = context.subscribers(); + this.uskManager = context.uskManager(); + this.origUSK = context.origUSK(); + this.realTimeFlag = context.realTimeFlag(); + this.sleepTime = sleepTime; + this.firstLoop = firstLoop; + this.origSleepTime = origSleepTime; + this.maxSleepTime = maxSleepTime; + } + + /** + * Outcome of resolving polling attempts for a round. + * + *

The {@link #ready} flag indicates whether all prerequisite checks have finished, and the + * {@link #attempts} array provides a snapshot of polling attempts relevant to the completion + * decision. The snapshot may be empty when the round is not ready to complete. + */ + static final class PollingResolution { + /** True when the round is eligible for completion checks. */ + final boolean ready; + + /** Snapshot of polling attempts considered for completion. */ + final USKAttempt[] attempts; + + /** + * Creates a resolution snapshot for the current round. + * + * @param ready whether the round is ready for completion evaluation + * @param attempts snapshot of polling attempts; may be empty but never null + */ + PollingResolution(boolean ready, USKAttempt[] attempts) { + this.ready = ready; + this.attempts = attempts; + } + } + + /** + * Determines whether all prerequisite checks are complete and snapshots polling attempts. + * + *

The method checks for active datastore scans, running random probes, missing polling + * attempts, and outstanding DBR hint fetches. If any prerequisite is still in flight, it returns + * a non-ready resolution with an empty attempt list. When all checks are complete, it returns a + * ready resolution with a snapshot of current polling attempts for further evaluation. + * + * @param cancelled whether the owning fetcher has been canceled and should stop checking + * @param completed whether the owning fetcher has already completed and should not re-evaluate + * @return a resolution indicating readiness and a snapshot of polling attempts for the round + */ + PollingResolution resolvePollingAttemptsIfAllChecksDone(boolean cancelled, boolean completed) { + if (cancelled || completed) return new PollingResolution(false, new USKAttempt[0]); + if (storeChecks.isStoreCheckRunning()) { + if (LOG.isDebugEnabled()) + LOG.debug("Not finished because still running store checker on {}", this); + return new PollingResolution(false, new USKAttempt[0]); + } + if (attempts.hasRunningAttempts()) { + if (LOG.isDebugEnabled()) + LOG.debug("Not finished because running attempts (random probes) on {}", this); + return new PollingResolution(false, new USKAttempt[0]); + } + if (attempts.hasNoPollingAttempts()) { + if (LOG.isDebugEnabled()) + LOG.debug("Not finished because no polling attempts (not started???) on {}", this); + return new PollingResolution(false, new USKAttempt[0]); + } + if (dbrHintFetches.hasOutstanding()) { + if (LOG.isDebugEnabled()) + LOG.debug("Not finished because still waiting for DBR attempts on {}", this); + return new PollingResolution(false, new USKAttempt[0]); + } + return new PollingResolution(true, attempts.snapshotPollingAttempts()); + } + + /** + * Evaluates whether the current round is finished for now and notifies callbacks if appropriate. + * + *

This method first resolves whether prerequisite checks have completed, then confirms that + * every polling attempt has entered a cooldown at least once. If any attempt has not cooled down, + * the round remains active and no callbacks are fired. When all attempts have cooled down, it + * delegates to {@link #notifyFinishedForNow(ClientContext, boolean, boolean)} to inform progress + * subscribers. + * + * @param context client context used for callback notifications; must be non-null + * @param cancelled whether the owning fetcher has been canceled and should halt notifications + * @param completed whether the owning fetcher has already completed and should not notify + */ + void checkFinishedForNow(ClientContext context, boolean cancelled, boolean completed) { + PollingResolution res = resolvePollingAttemptsIfAllChecksDone(cancelled, completed); + if (!res.ready) return; + for (USKAttempt a : res.attempts) { + if (!a.everInCooldown()) { + if (LOG.isDebugEnabled()) + LOG.debug( + "Not finished because polling attempt {} never entered cooldown on {}", a, this); + return; + } + } + notifyFinishedForNow(context, cancelled, completed); + } + + /** + * Notifies progress callbacks that the round is finished for now. + * + *

The notification is skipped when the fetcher has been canceled or completed. When invoked, + * the method snapshots subscribers and calls {@link USKProgressCallback#onRoundFinished} for each + * eligible callback, allowing clients to observe that a steady-state polling cycle has settled. + * + * @param context client context forwarded to callbacks; must be non-null for valid notifications + * @param cancelled whether the owning fetcher has been canceled and should suppress callbacks + * @param completed whether the owning fetcher has completed and should suppress callbacks + */ + void notifyFinishedForNow(ClientContext context, boolean cancelled, boolean completed) { + if (LOG.isDebugEnabled()) + LOG.debug( + "Notifying finished for now on {} for {}{}", + this, + origUSK, + this.realTimeFlag ? " (realtime)" : " (bulk)"); + if (cancelled || completed) return; + USKCallback[] toCheck = subscribers.snapshotSubscribers(); + for (USKCallback cb : toCheck) { + if (cb instanceof USKProgressCallback callback) callback.onRoundFinished(context); + } + } + + /** + * Computes the next backoff delay for background polling and updates internal state. + * + *

The sleep interval is doubled on each call until it reaches {@link #maxSleepTime}. If the + * manager reports that progress has been made since the round was scheduled, the sleep interval + * is reset to {@link #origSleepTime}, {@link #firstLoop} is cleared, and the delay is set to zero + * so the next cycle runs immediately. The returned value is the delay in milliseconds to pass to + * the scheduler. + * + * @param context client context used for randomness when choosing the next delay + * @param valueAtSchedule latest slot value captured when the round was scheduled + * @return delay in milliseconds until the next polling cycle should be scheduled + */ + long rescheduleBackgroundPoll(ClientContext context, long valueAtSchedule) { + long valAtEnd = uskManager.lookupLatestSlot(origUSK); + long end; + long now = System.currentTimeMillis(); + long newSleepTime = sleepTime * 2; + if (newSleepTime > maxSleepTime) newSleepTime = maxSleepTime; + sleepTime = newSleepTime; + end = now + context.random.nextInt((int) sleepTime); + + if (valAtEnd > valueAtSchedule && valAtEnd > origUSK.suggestedEdition) { + sleepTime = origSleepTime; + firstLoop = false; + end = now; + if (LOG.isDebugEnabled()) + LOG.debug("We have advanced: at start, {} at end, {}", valueAtSchedule, valAtEnd); + } + if (LOG.isDebugEnabled()) + LOG.debug("Sleep time is {} this sleep is {} for {}", sleepTime, end - now, this); + return end - now; + } + + /** + * Returns the current backoff sleep interval. + * + * @return sleep duration in milliseconds for the next scheduling decision + */ + @SuppressWarnings("unused") + long sleepTime() { + return sleepTime; + } + + /** + * Indicates whether the round is still in its initial loop. + * + * @return {@code true} when the round has not yet exited the first loop + */ + boolean firstLoop() { + return firstLoop; + } + + /** + * Updates whether the polling round should treat the next cycle as the first loop. + * + * @param value {@code true} to mark the round as being in its first loop, otherwise {@code false} + */ + @SuppressWarnings({"unused", "SameParameterValue"}) + void setFirstLoop(boolean value) { + firstLoop = value; + } +} diff --git a/src/main/java/network/crypta/client/async/USKPollingRoundContext.java b/src/main/java/network/crypta/client/async/USKPollingRoundContext.java new file mode 100644 index 0000000000..97d0f351bf --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKPollingRoundContext.java @@ -0,0 +1,26 @@ +package network.crypta.client.async; + +import network.crypta.keys.USK; + +/** + * Shared dependencies for configuring a {@link USKPollingRound}. + * + *

This bundles the stable collaborators used during polling rounds so they can be reused when + * scheduling background polling. + * + * @param attempts polling attempt manager used to track active attempts + * @param storeChecks coordinator for datastore checks + * @param dbrHintFetches date-hint fetch coordinator + * @param subscribers registry for USK callbacks + * @param uskManager USK manager used to look up latest slots + * @param origUSK base USK that is being polled + * @param realTimeFlag whether polling is scheduled with real-time bias + */ +record USKPollingRoundContext( + USKAttemptManager attempts, + USKStoreCheckCoordinator storeChecks, + USKDateHintFetches dbrHintFetches, + USKSubscriberRegistry subscribers, + USKManager uskManager, + USK origUSK, + boolean realTimeFlag) {} diff --git a/src/main/java/network/crypta/client/async/USKPriorityPolicy.java b/src/main/java/network/crypta/client/async/USKPriorityPolicy.java new file mode 100644 index 0000000000..42a43935e6 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKPriorityPolicy.java @@ -0,0 +1,212 @@ +package network.crypta.client.async; + +import network.crypta.node.RequestStarter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Computes effective polling priority classes for USK fetchers. + * + *

This policy aggregates priority hints from subscriber callbacks and fetcher-level callbacks to + * determine the priority classes used by {@link USKAttemptManager} when scheduling background + * polls. Callers typically invoke {@link #updatePriorities(USKCallback[], USKFetcherCallback[], + * String)} whenever callback sets change so that the current polling priorities reflect the most + * urgent subscriber. The policy maintains the derived priorities as mutable state and exposes them + * through lightweight accessors. + * + *

The policy favors the minimum (highest urgency) priority class among all callbacks. When no + * callbacks are present, it resets to default normal and progress priorities. Instances are not + * thread-safe; callers should synchronize externally or confine usage to a single scheduling + * thread. The logic is intentionally conservative to avoid oscillation and uses the existing + * scheduler constants without performing any blocking work. + * + *

    + *
  • Tracks current normal and progress polling priority classes. + *
  • Resets priorities to defaults when no callbacks are registered. + *
  • Triggers poll parameter reloads after any effective change. + *
+ */ +final class USKPriorityPolicy { + /** Logger for priority updates and trace diagnostics. */ + private static final Logger LOG = LoggerFactory.getLogger(USKPriorityPolicy.class); + + /** Default polling priority for normal background checks. */ + private static final short DEFAULT_NORMAL_POLL_PRIORITY = RequestStarter.PREFETCH_PRIORITY_CLASS; + + /** Default polling priority for progress-oriented checks. */ + private static final short DEFAULT_PROGRESS_POLL_PRIORITY = RequestStarter.UPDATE_PRIORITY_CLASS; + + /** Current polling priority for normal background checks. */ + private short normalPollPriority = DEFAULT_NORMAL_POLL_PRIORITY; + + /** Current polling priority for progress-oriented checks. */ + private short progressPollPriority = DEFAULT_PROGRESS_POLL_PRIORITY; + + /** Attempt manager that consumes polling priorities. */ + private final USKAttemptManager attempts; + + /** + * Creates a priority policy bound to a specific attempt manager. + * + *

The manager reference is used to reload polling parameters whenever derived priorities + * change. The policy does not take ownership of the manager and assumes its lifecycle matches + * that of the owning fetcher. + * + * @param attempts attempt manager that should be updated after priority changes; must be non-null + */ + USKPriorityPolicy(USKAttemptManager attempts) { + this.attempts = attempts; + } + + /** + * Returns the current normal polling priority class. + * + *

The value reflects the minimum priority requested by all callbacks or the default priority + * when no callbacks are present. + * + * @return priority class used for steady-state background polling + */ + short normalPriority() { + return normalPollPriority; + } + + /** + * Returns the current progress polling priority class. + * + *

The value reflects the minimum progress priority requested by callbacks, which can be more + * urgent than the normal priority when fast progress is desired. + * + * @return priority class used when progress-oriented polling is needed + */ + short progressPriority() { + return progressPollPriority; + } + + /** + * Recomputes polling priorities based on the active callback sets. + * + *

The method aggregates the minimum normal and progress priorities across subscriber and + * fetcher callbacks. If no callbacks are present, it falls back to default priorities. After + * updating the derived priorities, it triggers a reload of poll parameters so that ongoing + * attempts adopt the new scheduling classes. The method is deterministic and idempotent for the + * same input arrays. + * + * @param subscribers subscriber callbacks providing polling priority preferences; must not be + * null but may be empty + * @param fetcherCallbacks fetcher callbacks providing polling priority preferences; must not be + * null but may be empty + * @param fetcherName human-readable identifier used only for debug logging + */ + void updatePriorities( + USKCallback[] subscribers, USKFetcherCallback[] fetcherCallbacks, String fetcherName) { + Prio prio = initialPrio(); + if (noCallbacks(subscribers, fetcherCallbacks)) { + setDefaultPriorities(fetcherName); + return; + } + + accumulatePriorities(subscribers, prio); + accumulatePriorities(fetcherCallbacks, prio); + + if (LOG.isDebugEnabled()) + LOG.debug( + "Updating priorities: normal={} progress={} for {}", + prio.normal, + prio.progress, + fetcherName); + normalPollPriority = prio.normal; + progressPollPriority = prio.progress; + attempts.reloadPollParameters(); + } + + /** + * Resets polling priorities to the default values and reloads poll parameters. + * + *

This is used when no callbacks provide priority hints. It restores normal and progress + * priorities to their configured defaults and then refreshes the attempt manager's scheduling + * parameters. + * + * @param fetcherName human-readable identifier used only for debug logging + */ + private void setDefaultPriorities(String fetcherName) { + normalPollPriority = DEFAULT_NORMAL_POLL_PRIORITY; + progressPollPriority = DEFAULT_PROGRESS_POLL_PRIORITY; + if (LOG.isDebugEnabled()) + LOG.debug( + "Updating priorities: normal = {} progress = {} for {}", + normalPollPriority, + progressPollPriority, + fetcherName); + attempts.reloadPollParameters(); + } + + /** Mutable container for derived polling priorities. */ + private static final class Prio { + /** Normal polling priority class. */ + short normal; + + /** Progress polling priority class. */ + short progress; + + /** Creates a priority container with unset values. */ + Prio() {} + } + + /** + * Creates a priority container initialized to the paused priority class. + * + * @return a new priority container with paused defaults + */ + private static Prio initialPrio() { + Prio p = new Prio(); + p.normal = RequestStarter.PAUSED_PRIORITY_CLASS; + p.progress = RequestStarter.PAUSED_PRIORITY_CLASS; + return p; + } + + /** + * Checks whether there are no callbacks influencing priority selection. + * + * @param localCallbacks subscriber callbacks to test + * @param fetcherCallbacks fetcher-level callbacks to test + * @return {@code true} when both callback arrays are empty + */ + private static boolean noCallbacks( + USKCallback[] localCallbacks, USKFetcherCallback[] fetcherCallbacks) { + return localCallbacks.length == 0 && fetcherCallbacks.length == 0; + } + + /** + * Accumulates priority preferences from subscriber callbacks. + * + * @param cbs callbacks providing priority hints; must not be null + * @param prio mutable container to update with minimum priorities + */ + private static void accumulatePriorities(USKCallback[] cbs, Prio prio) { + for (USKCallback cb : cbs) { + short n = cb.getPollingPriorityNormal(); + if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n); + if (n < prio.normal) prio.normal = n; + if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n); + short p = cb.getPollingPriorityProgress(); + if (p < prio.progress) prio.progress = p; + } + } + + /** + * Accumulates priority preferences from fetcher-level callbacks. + * + * @param cbs callbacks providing priority hints; must not be null + * @param prio mutable container to update with minimum priorities + */ + private static void accumulatePriorities(USKFetcherCallback[] cbs, Prio prio) { + for (USKFetcherCallback cb : cbs) { + short n = cb.getPollingPriorityNormal(); + if (LOG.isTraceEnabled()) LOG.trace("Normal priority for {} : {}", cb, n); + if (n < prio.normal) prio.normal = n; + if (LOG.isTraceEnabled()) LOG.trace("Progress priority for {} : {}", cb, n); + short p = cb.getPollingPriorityProgress(); + if (p < prio.progress) prio.progress = p; + } + } +} diff --git a/src/main/java/network/crypta/client/async/USKSchedulingCoordinator.java b/src/main/java/network/crypta/client/async/USKSchedulingCoordinator.java new file mode 100644 index 0000000000..85da97fe31 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKSchedulingCoordinator.java @@ -0,0 +1,173 @@ +package network.crypta.client.async; + +/** + * Coordinates scheduling decisions for a USK polling round. + * + *

This coordinator encapsulates the state required to decide whether a round should register a + * datastore check, schedule network activity, or conclude early when store-only checks are + * complete. Callers provide the current known edition value and an execution context; the + * coordinator updates its internal flags and returns a {@link SchedulePlan} describing the next + * action. The class keeps track of whether a scheduling cycle has started, when DBR hint fetches + * should gate scheduling, and the last value observed at schedule time. + * + *

The coordinator is mutable and synchronizes its public methods to keep the state consistent. + * It is typically owned by a {@link USKFetcher} and invoked from scheduling threads, so callers + * should avoid holding external locks while calling into it. The logic favors correctness over + * immediate scheduling by deferring actions until prerequisite datastore checks or DBR hint fetches + * have finished. + * + *

    + *
  • Tracks whether a scheduling cycle has started and when to defer for DBR hints. + *
  • Decides when to register datastore checks versus scheduling attempts. + *
  • Exposes snapshot flags used to coordinate follow-up scheduling steps. + *
+ */ +final class USKSchedulingCoordinator { + /** Attempt manager used to schedule or inspect polling attempts. */ + private final USKAttemptManager attempts; + + /** Coordinator responsible for datastore store checks. */ + private final USKStoreCheckCoordinator storeChecks; + + /** DBR hint fetch tracker used to decide when to defer scheduling. */ + private final USKDateHintFetches dbrHintFetches; + + /** Whether the owning fetcher should operate in store-only mode. */ + private final boolean checkStoreOnly; + + /** Latest value captured when a scheduling cycle was built. */ + private long valueAtSchedule; + + /** Tracks whether the coordinator has started at least one scheduling cycle. */ + private boolean started; + + /** Tracks whether scheduling must wait until DBR hint fetches finish. */ + private boolean scheduleAfterDBRsDone; + + /** + * Creates a scheduling coordinator for a USK polling round. + * + *

The coordinator holds references to the attempt manager, store check coordinator, and DBR + * hint fetches so it can build a consistent schedule plan. The {@code checkStoreOnly} flag + * influences whether network activity is scheduled or whether the coordinator should conclude + * once datastore checks complete. + * + * @param attempts attempt manager that tracks polling attempts; must be non-null + * @param storeChecks store check coordinator used to register datastore checks; must be non-null + * @param dbrHintFetches DBR hint fetch tracker used to gate scheduling; must be non-null + * @param checkStoreOnly whether the round should avoid network fetches and only check the store + */ + USKSchedulingCoordinator( + USKAttemptManager attempts, + USKStoreCheckCoordinator storeChecks, + USKDateHintFetches dbrHintFetches, + boolean checkStoreOnly) { + this.attempts = attempts; + this.storeChecks = storeChecks; + this.dbrHintFetches = dbrHintFetches; + this.checkStoreOnly = checkStoreOnly; + } + + /** + * Plan returned by {@link #buildSchedulePlan(long, boolean, ClientContext, boolean)}. + * + *

The plan indicates whether a datastore check should be registered immediately, whether the + * caller should conclude the round, and whether store-only checking has completed. The flags are + * deliberately simple and are interpreted by the caller to decide the next scheduling step. + */ + static final class SchedulePlan { + /** Whether to register a datastore check immediately. */ + boolean registerNow; + + /** Whether the caller should stop scheduling and conclude the round. */ + boolean bye; + + /** Whether store-only checking has completed and should be finalized. */ + boolean completeCheckingStore; + + /** Creates an empty plan; fields default to {@code false}. */ + SchedulePlan() {} + } + + /** + * Builds the next scheduling plan for the current polling round. + * + *

The method records the latest observed edition value, ensures polling attempts are scheduled + * when no attempts are running, and determines whether datastore checks should be registered + * immediately. When DBR hint fetches are in progress, it may defer scheduling until those hints + * are complete. In store-only mode, the returned plan can indicate that checking is complete once + * outstanding datastore checks finish. + * + * @param lookedUp latest edition value observed before scheduling; may be negative for unknown + * @param startedDBRs whether DBR hint fetches have already started for this round + * @param context client context used to schedule new polling attempts; must be non-null + * @param firstLoop whether the current scheduling cycle is the first loop of the round + * @return a schedule plan describing the next action the caller should take + */ + synchronized SchedulePlan buildSchedulePlan( + long lookedUp, boolean startedDBRs, ClientContext context, boolean firstLoop) { + boolean registerNow = false; + boolean completeCheckingStore; + valueAtSchedule = Math.max(lookedUp + 1, valueAtSchedule); + if ((!checkStoreOnly) + && !attempts.hasPendingAttempts() + && !attempts.hasRunningAttempts() + && attempts.hasNoPollingAttempts()) { + attempts.addNewAttempts(lookedUp, context, firstLoop); + } + started = true; + if (lookedUp <= 0 && startedDBRs) { + scheduleAfterDBRsDone = true; + } else if ((!scheduleAfterDBRsDone) || !dbrHintFetches.hasOutstanding()) { + registerNow = !storeChecks.fillKeysWatching(lookedUp, context); + } + completeCheckingStore = + checkStoreOnly && scheduleAfterDBRsDone && !storeChecks.isStoreCheckRunning(); + SchedulePlan plan = new SchedulePlan(); + plan.registerNow = registerNow; + plan.bye = false; + plan.completeCheckingStore = completeCheckingStore; + return plan; + } + + /** + * Returns whether a scheduling cycle has been started. + * + * @return {@code true} once a scheduling plan has been built for this coordinator + */ + synchronized boolean isStarted() { + return started; + } + + /** Resets the started flag so the next call treats the cycle as not yet started. */ + synchronized void resetStarted() { + started = false; + } + + /** + * Updates whether scheduling should wait for DBR hint fetches to complete. + * + * @param value {@code true} to defer scheduling until DBR hint fetches finish + */ + synchronized void setScheduleAfterDBRsDone(boolean value) { + scheduleAfterDBRsDone = value; + } + + /** + * Returns whether scheduling is currently deferred until DBR hint fetches finish. + * + * @return {@code true} when scheduling should wait for DBR hint completion + */ + synchronized boolean scheduleAfterDBRsDone() { + return scheduleAfterDBRsDone; + } + + /** + * Returns the latest value captured at schedule time. + * + * @return the last {@code lookedUp} value recorded when building a schedule plan + */ + synchronized long valueAtSchedule() { + return valueAtSchedule; + } +} diff --git a/src/main/java/network/crypta/client/async/USKStoreCheckCoordinator.java b/src/main/java/network/crypta/client/async/USKStoreCheckCoordinator.java new file mode 100644 index 0000000000..5a38049287 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKStoreCheckCoordinator.java @@ -0,0 +1,572 @@ +package network.crypta.client.async; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import network.crypta.client.FetchContext; +import network.crypta.keys.Key; +import network.crypta.keys.USK; +import network.crypta.node.SendableGet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Coordinates datastore checking and store-check request lifecycle for USK polling. + * + *

This coordinator owns the lifecycle for datastore-only checks that precede or complement + * polling attempts. It registers store checkers with the scheduler, monitors their completion, and + * then decides whether to start polling attempts or conclude in store-only mode. The coordinator is + * constructed with shared dependencies and a callback interface that bridges back into the owning + * fetcher when scheduling and completion decisions are made. + * + *

The class is mutable and synchronizes around its running checker state to prevent concurrent + * registration. Callers generally invoke {@link #fillKeysWatching(long, ClientContext)} and {@link + * #preRegisterStoreChecker(USKStoreCheckerGetter, USKStoreChecker, ClientContext, boolean)} from + * scheduler threads. The design favors correctness and safe cancellation over aggressive + * parallelism; only one store check may run at a time, and callers must respect cancellation flags + * supplied via {@link USKStoreCheckCallbacks}. + * + *

    + *
  • Registers datastore checkers and tracks whether one is active. + *
  • Starts or resumes polling attempts after store checks complete. + *
  • Supports store-only rounds that may terminate without network activity. + *
+ */ +final class USKStoreCheckCoordinator { + /** Logger for store-check lifecycle events and diagnostics. */ + private static final Logger LOG = LoggerFactory.getLogger(USKStoreCheckCoordinator.class); + + /** Active store checker getter, or {@code null} when no store scan is running. */ + private USKStoreCheckerGetter runningStoreChecker; + + /** Watched key set used to derive datastore checks. */ + private final USKKeyWatchSet watchingKeys; + + /** Attempt manager used to schedule polling attempts after store checks. */ + private final USKAttemptManager attempts; + + /** Parent requester used for scheduling and network accounting. */ + private final ClientRequester parent; + + /** Whether this coordinator should avoid network fetches and only check the store. */ + private final boolean checkStoreOnly; + + /** USK manager used to query the latest known slot. */ + private final USKManager uskManager; + + /** Base USK being checked for datastore availability. */ + private final USK origUSK; + + /** Callback interface used to bridge to the owning fetcher. */ + private final USKStoreCheckCallbacks callbacks; + + /** Whether store checks should run with real-time scheduling bias. */ + private final boolean realTimeFlag; + + /** + * Parameters used to configure {@link USKStoreCheckCoordinator}. + * + *

This bundle captures the stable collaborators required to schedule store checks. It is + * constructed via the nested {@link Builder} to keep constructor signatures small and encourage + * explicit configuration. + */ + static final class Params { + /** Key watch set that supplies datastore checkers. */ + private final USKKeyWatchSet watchingKeys; + + /** Attempt manager that schedules polling attempts after store checks. */ + private final USKAttemptManager attempts; + + /** Parent requester used for network scheduling and priority decisions. */ + private final ClientRequester parent; + + /** Whether the fetcher should perform store-only checks without network activity. */ + private final boolean checkStoreOnly; + + /** Manager used to query the latest known slot values. */ + private final USKManager uskManager; + + /** Base USK that is being checked. */ + private final USK origUSK; + + /** Callback interface used to notify the owning fetcher. */ + private final USKStoreCheckCallbacks callbacks; + + /** Whether store checks should run with real-time bias. */ + private final boolean realTimeFlag; + + /** + * Creates a parameter bundle from the provided builder. + * + * @param builder builder that supplies all required fields + */ + private Params(Builder builder) { + this.watchingKeys = builder.watchingKeys; + this.attempts = builder.attempts; + this.parent = builder.parent; + this.checkStoreOnly = builder.checkStoreOnly; + this.uskManager = builder.uskManager; + this.origUSK = builder.origUSK; + this.callbacks = builder.callbacks; + this.realTimeFlag = builder.realTimeFlag; + } + + /** + * Returns a new builder for assembling {@link Params}. + * + * @return a fresh builder instance with unset fields + */ + static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link Params}. + * + *

Each setter returns the builder to allow chaining. Call {@link #build()} once all fields + * are configured. + */ + static final class Builder { + /** Key watch set that supplies datastore checkers. */ + private USKKeyWatchSet watchingKeys; + + /** Attempt manager that schedules polling attempts after store checks. */ + private USKAttemptManager attempts; + + /** Parent requester used for network scheduling and priority decisions. */ + private ClientRequester parent; + + /** Whether the fetcher should perform store-only checks without network activity. */ + private boolean checkStoreOnly; + + /** Manager used to query the latest known slot values. */ + private USKManager uskManager; + + /** Base USK that is being checked. */ + private USK origUSK; + + /** Callback interface used to notify the owning fetcher. */ + private USKStoreCheckCallbacks callbacks; + + /** Whether store checks should run with real-time bias. */ + private boolean realTimeFlag; + + /** Creates a new builder with unset fields. */ + Builder() {} + + /** + * Sets the key watch set used to derive datastore checkers. + * + * @param watchingKeys watch set used to build store checkers; must be non-null + * @return this builder for method chaining + */ + Builder watchingKeys(USKKeyWatchSet watchingKeys) { + this.watchingKeys = watchingKeys; + return this; + } + + /** + * Sets the attempt manager used to schedule polling attempts. + * + * @param attempts attempt manager to be updated after store checks; must be non-null + * @return this builder for method chaining + */ + Builder attempts(USKAttemptManager attempts) { + this.attempts = attempts; + return this; + } + + /** + * Sets the parent requester used for scheduling decisions. + * + * @param parent requester used to schedule network activity; must be non-null + * @return this builder for method chaining + */ + Builder parent(ClientRequester parent) { + this.parent = parent; + return this; + } + + /** + * Sets whether the coordinator should only check the store. + * + * @param checkStoreOnly {@code true} to avoid network fetches and only check the store + * @return this builder for method chaining + */ + Builder checkStoreOnly(boolean checkStoreOnly) { + this.checkStoreOnly = checkStoreOnly; + return this; + } + + /** + * Sets the USK manager used to query the latest known slot. + * + * @param uskManager manager used to look up slot values; must be non-null + * @return this builder for method chaining + */ + Builder uskManager(USKManager uskManager) { + this.uskManager = uskManager; + return this; + } + + /** + * Sets the base USK being checked. + * + * @param origUSK base USK to check; must be non-null + * @return this builder for method chaining + */ + Builder origUSK(USK origUSK) { + this.origUSK = origUSK; + return this; + } + + /** + * Sets the callbacks used to notify the owning fetcher. + * + * @param callbacks callback interface for completion and scheduling events; must be non-null + * @return this builder for method chaining + */ + Builder callbacks(USKStoreCheckCallbacks callbacks) { + this.callbacks = callbacks; + return this; + } + + /** + * Sets whether scheduling should use real-time bias. + * + * @param realTimeFlag {@code true} to prefer real-time scheduling priorities + * @return this builder for method chaining + */ + Builder realTimeFlag(boolean realTimeFlag) { + this.realTimeFlag = realTimeFlag; + return this; + } + + /** + * Builds the {@link Params} instance from the configured fields. + * + * @return an immutable parameter bundle for the coordinator + */ + Params build() { + return new Params(this); + } + } + } + + /** + * Creates a coordinator using a parameter bundle. + * + *

The parameter bundle should contain fully initialized collaborators that remain valid for + * the lifetime of the coordinator. + * + * @param params parameter bundle with collaborators and scheduling flags; must be non-null + */ + USKStoreCheckCoordinator(Params params) { + this.watchingKeys = params.watchingKeys; + this.attempts = params.attempts; + this.parent = params.parent; + this.checkStoreOnly = params.checkStoreOnly; + this.uskManager = params.uskManager; + this.origUSK = params.origUSK; + this.callbacks = params.callbacks; + this.realTimeFlag = params.realTimeFlag; + } + + /** + * Starts or continues datastore checking for watched keys. + * + *

The coordinator ensures only one store checker is active at a time. If a checker is already + * running, the method returns {@code true} to indicate that no new registration was performed. If + * there are no datastore checkers to run, it returns {@code false} to signal that no store check + * is required. + * + * @param ed latest known edition used to seed datastore checks + * @param context client context used to register the store checker; must not be null + * @return {@code true} when a store check is already running or was started; {@code false} when + * no store check is required + */ + @SuppressWarnings("BooleanMethodIsAlwaysInverted") + boolean fillKeysWatching(long ed, ClientContext context) { + synchronized (this) { + // Do not run a new one until this one has finished. + // USKStoreCheckerGetter itself will automatically call back to fillKeysWatching, so there is + // no + // chance of losing it. + if (runningStoreChecker != null) return true; + USKStoreChecker checker = buildStoreChecker(ed); + if (checker == null) { + if (LOG.isDebugEnabled()) LOG.debug("No datastore checker"); + return false; + } + + runningStoreChecker = new USKStoreCheckerGetter(this, callbacks, parent, checker); + } + try { + context + .getSskFetchScheduler(realTimeFlag) + .register(null, new SendableGet[] {runningStoreChecker}, false, null, false); + } catch (Exception t) { + USKStoreCheckerGetter storeChecker; + synchronized (this) { + storeChecker = runningStoreChecker; + runningStoreChecker = null; + } + LOG.error("Unable to start: {}", t, t); + if (storeChecker != null) { + try { + storeChecker.unregister(context, storeChecker.getPriorityClass()); + } catch (Exception _) { + // Ignore, hopefully it's already unregistered + } + } + } + if (LOG.isDebugEnabled()) LOG.debug("Registered {} for {}", runningStoreChecker, callbacks); + return true; + } + + /** + * Completes registration after a datastore checker finishes its pre-registration phase. + * + *

The method unregisters the checker, marks it complete, then schedules any pending attempts + * based on the datastore results. When running in store-only mode, it may immediately conclude + * the round after DBR handling. + * + * @param storeChecker active store checker getter instance; must not be null + * @param checker datastore checker wrapper used to mark completion; must not be null + * @param context client context used for scheduling and callbacks; must not be null + * @param toNetwork whether the scheduler intended a network sending for the checker + * @return {@code toNetwork} to preserve scheduler semantics; never sends network requests here + */ + @SuppressWarnings("java:S3516") + boolean preRegisterStoreChecker( + USKStoreCheckerGetter storeChecker, + USKStoreChecker checker, + ClientContext context, + boolean toNetwork) { + if (callbacks.isCancelled()) { + storeChecker.unregister(context, storeChecker.getPriorityClass()); + synchronized (this) { + runningStoreChecker = null; + } + if (LOG.isDebugEnabled()) + LOG.debug("StoreChecker preRegister aborted: fetcher cancelled/completed"); + return toNetwork; // cancel network send when scheduler planned to send + // value ignored by scheduler when toNetwork == false + } + + storeChecker.unregister(context, storeChecker.getPriorityClass()); + + USKAttempt[] attemptsToStart; + synchronized (this) { + runningStoreChecker = null; + // Note: optionally start USKAttempts only when a datastore check shows no progress. + attemptsToStart = attempts.snapshotAttemptsToStart(); + attempts.clearAttemptsToStart(); + if (callbacks.isCancelled()) attemptsToStart = new USKAttempt[0]; + } + + checker.checked(); + + if (LOG.isDebugEnabled()) + LOG.debug( + "Checked datastore, finishing registration for {} checkers for {}", + attemptsToStart.length, + origUSK); + + if (attemptsToStart.length > 0) { + parent.toNetwork(context); + callbacks.notifySendingToNetwork(context); + } + + callbacks.processAttemptsAfterStoreCheck(attemptsToStart, context); + + long lastEd = uskManager.lookupLatestSlot(origUSK); + if (!fillKeysWatching(lastEd, context) && checkStoreOnly) { + if (LOG.isDebugEnabled()) LOG.debug("Just checking store, terminating {} ...", callbacks); + if (callbacks.shouldDeferUntilDBRs()) { + callbacks.setScheduleAfterDBRsDone(true); + } else { + callbacks.finishSuccess(context); + } + } + + return toNetwork; // Store checker never sends network requests itself + // Value is ignored when toNetwork == false + } + + /** + * Returns whether a store check is currently running. + * + * @return {@code true} if a store checker getter is active, otherwise {@code false} + */ + boolean isStoreCheckRunning() { + synchronized (this) { + return runningStoreChecker != null; + } + } + + /** + * Cancels any running store checker and unregisters it from the scheduler. + * + *

If no checker is running, the method is a no-op. + * + * @param context client context used to unregister the checker; must not be null + */ + void cancelStoreChecker(ClientContext context) { + USKStoreCheckerGetter checker; + synchronized (this) { + checker = runningStoreChecker; + runningStoreChecker = null; + } + if (checker != null) { + checker.unregister(context, checker.getPriorityClass()); + } + } + + /** + * Builds a store checker for the given edition. + * + * @param ed edition used to select datastore sub-checkers + * @return a store checker instance, or {@code null} if no checks are required + */ + private USKStoreChecker buildStoreChecker(long ed) { + List checkers = watchingKeys.getDatastoreCheckers(ed); + if (checkers == null) return null; + return new USKStoreChecker(checkers); + } + + /** + * Bundles of datastore sub-checkers used to query the local store for candidate editions. + * + *

This helper merges keys from multiple sources and forwards completion notifications back to + * the underlying sub-checkers. + */ + final class USKStoreChecker { + + /** Sub-checkers contributing keys to a query in the datastore. */ + final USKKeyWatchSet.KeyList.StoreSubChecker[] checkers; + + /** + * Creates a store checker from a list of sub-checkers. + * + * @param c sub-checkers that contribute keys; must not be null + */ + public USKStoreChecker(List c) { + checkers = c.toArray(new USKKeyWatchSet.KeyList.StoreSubChecker[0]); + } + + /** + * Creates a store checker from an array of sub-checkers. + * + * @param checkers2 sub-checker array to use directly; must not be null + */ + @SuppressWarnings("unused") + public USKStoreChecker(USKKeyWatchSet.KeyList.StoreSubChecker[] checkers2) { + checkers = checkers2; + } + + /** + * Returns the merged list of keys to check in the datastore. + * + * @return array of keys to check; may be empty + */ + public Key[] getKeys() { + if (checkers.length == 0) return new Key[0]; + if (checkers.length == 1) return checkers[0].keysToCheck; + return mergeKeysFromCheckers(); + } + + /** + * Merges keys from all sub-checkers into a deduplicated array. + * + * @return merged array of keys to check in the datastore + */ + private Key[] mergeKeysFromCheckers() { + int x = 0; + for (USKKeyWatchSet.KeyList.StoreSubChecker checker : checkers) { + x += checker.keysToCheck.length; + } + Key[] keys = new Key[x]; + int ptr = 0; + // Note: a more efficient merging algorithm could consider ranges. + HashSet check = new HashSet<>(); + for (USKKeyWatchSet.KeyList.StoreSubChecker checker : checkers) { + for (Key k : checker.keysToCheck) { + if (!check.add(k)) continue; + keys[ptr++] = k; + } + } + if (keys.length != ptr) { + keys = Arrays.copyOf(keys, ptr); + } + return keys; + } + + /** Notifies all sub-checkers that their datastore checks have completed. */ + public void checked() { + for (USKKeyWatchSet.KeyList.StoreSubChecker checker : checkers) { + checker.checked(); + } + } + } + + /** Callbacks used by {@link USKStoreCheckCoordinator} to coordinate with the owning fetcher. */ + interface USKStoreCheckCallbacks { + /** + * Completes the fetcher successfully after store-only checking finishes. + * + * @param context client context used to complete and notify callbacks; must be non-null + */ + void finishSuccess(ClientContext context); + + /** + * Notifies subscribers that network activity is about to begin. + * + * @param context client context used to notify subscribers; must be non-null + */ + void notifySendingToNetwork(ClientContext context); + + /** + * Processes attempts after a store check completes. + * + * @param attempts polling attempts to start or update; may be empty but not null + * @param context client context used for scheduling and callbacks; must be non-null + */ + void processAttemptsAfterStoreCheck(USKAttempt[] attempts, ClientContext context); + + /** + * Determines whether scheduling should be deferred until DBR hints finish. + * + * @return {@code true} to defer scheduling until DBR hint fetches are complete + */ + boolean shouldDeferUntilDBRs(); + + /** + * Updates whether scheduling should wait for DBR hints to finish. + * + * @param value {@code true} to defer scheduling until DBR hint fetches complete + */ + void setScheduleAfterDBRsDone(boolean value); + + /** + * Indicates whether the owning fetcher has been canceled. + * + * @return {@code true} if the fetcher is canceled and should stop scheduling + */ + boolean isCancelled(); + + /** + * Returns the fetch context used for store check operations. + * + * @return fetch context used by the owning fetcher + */ + FetchContext fetcherContext(); + + /** + * Returns the owning fetcher instance. + * + * @return the fetcher that owns this coordinator + */ + USKFetcher fetcher(); + } +} diff --git a/src/main/java/network/crypta/client/async/USKStoreCheckerGetter.java b/src/main/java/network/crypta/client/async/USKStoreCheckerGetter.java index e7b877e156..0d9c4a0d6e 100644 --- a/src/main/java/network/crypta/client/async/USKStoreCheckerGetter.java +++ b/src/main/java/network/crypta/client/async/USKStoreCheckerGetter.java @@ -13,37 +13,44 @@ * A {@link SendableGet} that performs a local-only datastore presence probe for candidate USK * editions. * - *

This getter is created by {@link USKFetcher} when it wants to cheaply answer the question - * "does the datastore already contain any likely next editions?" before attempting any network - * fetch. It exposes a set of candidate {@link Key}s via {@link #listKeys()} and relies on the - * surrounding request machinery to perform local checks only; it does not select a single key to - * send, and it does not initiate network traffic itself. + *

This getter is created by {@link USKStoreCheckCoordinator} when it wants to cheaply answer the + * question "does the datastore already contain any likely next editions?" before attempting any + * network fetch. It exposes a set of candidate {@link Key}s via {@link #listKeys()} and relies on + * the surrounding request machinery to perform local checks only; it does not select a single key + * to send, and it does not initiate network traffic itself. * *

Lifecycle-wise, the instance is intended to be single-shot: {@link #preRegister(ClientContext, - * boolean)} delegates to {@link USKFetcher#preRegisterStoreChecker(USKStoreCheckerGetter, - * USKFetcher.USKStoreChecker, ClientContext, boolean)} and then permanently marks the request as - * done so that subsequent scheduling treats it as canceled. This keeps the store-check wiring + * boolean)} delegates to {@link + * USKStoreCheckCoordinator#preRegisterStoreChecker(USKStoreCheckerGetter, + * USKStoreCheckCoordinator.USKStoreChecker, ClientContext, boolean)} and then permanently marks the + * request as done so that later scheduling treats it as canceled. This keeps the store-check wiring * separate from {@code USKFetcher}'s polling logic, reducing coupling and making the probe behavior * explicit. * *

This class does not perform its own synchronization; it assumes the threading model used by - * the request scheduler and the owning {@link USKFetcher}. + * the request scheduler and the owning {@link USKStoreCheckCoordinator}. * *

    *
  • Supplies candidate keys to probe via {@link #listKeys()}. - *
  • Delegates registration and accounting to the owning {@link USKFetcher}. + *
  • Delegates registration and accounting to the owning coordinator. *
  • Cancels itself after registration to remain single-shot. *
* - * @see USKFetcher - * @see USKFetcher.USKStoreChecker + * @see USKStoreCheckCoordinator + * @see USKStoreCheckCoordinator.USKStoreChecker */ final class USKStoreCheckerGetter extends SendableGet { - /** Owning fetcher that provides context, policy, and accounting for this probe. */ - private final transient USKFetcher fetcher; + /** Coordinator for store-check lifecycle and callbacks. */ + private final transient USKStoreCheckCoordinator coordinator; - /** Candidate-key provider used to enumerate likely USK edition datastore keys. */ - private final transient USKFetcher.USKStoreChecker checker; + /** Callbacks for fetcher-level state needed by the store check. */ + private final transient USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks; + + /** Candidate-key provider used to list likely USK edition datastore keys. */ + private final transient USKStoreCheckCoordinator.USKStoreChecker checker; + + /** Request the owner supplied at construction and passed to the superclass. */ + private final ClientRequester owner; /** * Tracks whether {@link #preRegister(ClientContext, boolean)} has run and this request is @@ -54,18 +61,24 @@ final class USKStoreCheckerGetter extends SendableGet { /** * Creates a new local-only store-check getter for a single USK polling pass. * - *

The instance delegates most behavior to {@code fetcher} and {@code checker} and is designed - * to be short-lived: once {@link #preRegister(ClientContext, boolean)} completes, the getter - * marks itself done so that the scheduler stops considering it for further work. + *

The instance delegates most behavior to {@code coordinator} and {@code checker} and is + * designed to be short-lived: once {@link #preRegister(ClientContext, boolean)} completes, the + * getter marks itself done so that the scheduler stops considering it for further work. * - * @param fetcher owning {@link USKFetcher} that supplies context and policy. - * @param parent request owner used for scheduling and real-time flag. + * @param coordinator store-check coordinator for lifecycle events. + * @param callbacks fetcher-level callbacks used for context and state. + * @param owner request the owner used for scheduling and real-time flag. * @param checker candidate-key provider used for datastore probing decisions. */ USKStoreCheckerGetter( - USKFetcher fetcher, ClientRequester parent, USKFetcher.USKStoreChecker checker) { - super(parent, parent.realTimeFlag()); - this.fetcher = fetcher; + USKStoreCheckCoordinator coordinator, + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks, + ClientRequester owner, + USKStoreCheckCoordinator.USKStoreChecker checker) { + super(owner, owner.realTimeFlag()); + this.coordinator = coordinator; + this.callbacks = callbacks; + this.owner = owner; this.checker = checker; } @@ -74,14 +87,14 @@ final class USKStoreCheckerGetter extends SendableGet { * *

This implementation reuses the context configured on the owning {@link USKFetcher} and * returns the exact instance stored on the fetcher (no defensive copy). Sharing the context keeps - * datastore behavior and fetch-policy settings consistent between the probe and any subsequent - * USK polling actions. + * datastore behavior and fetch-policy settings consistent between the probe and any later USK + * polling actions. * * @return the fetch context to use for store checks, shared with the owning fetcher. */ @Override public FetchContext getContext() { - return fetcher.ctx; + return callbacks.fetcherContext(); } /** @@ -120,10 +133,11 @@ public ClientKey getKey(SendableRequestItem token) { /** * Lists the candidate datastore keys to probe for likely USK editions. * - *

The returned set is determined by {@link USKFetcher.USKStoreChecker} and represents the - * editions that the owning {@link USKFetcher} considers plausible next steps. The scheduler uses - * this list for local store checking only; this getter never turns these keys into network - * requests directly. This method returns the array provided by the checker without copying it. + *

The returned set is determined by {@link USKStoreCheckCoordinator.USKStoreChecker} and + * represents the editions that the owning {@link USKFetcher} considers plausible next steps. The + * scheduler uses this list for local store checking only; this getter never turns these keys into + * network requests directly. This method returns the array provided by the checker without + * copying it. * * @return an array of candidate {@link Key} instances to probe; may be empty. */ @@ -136,8 +150,8 @@ public Key[] listKeys() { * Handles a failure for this getter. * *

Failures are treated as non-fatal for the local store-check probe. The higher-level {@link - * USKFetcher} logic decides how to proceed (for example, whether to attempt a network fetch), so - * this callback intentionally performs no action. + * USKStoreCheckCoordinator} logic decides how to proceed (for example, whether to attempt a + * network fetch), so this callback intentionally performs no action. * *

The parameters are accepted to satisfy the {@link SendableGet} contract but are otherwise * ignored. @@ -154,10 +168,11 @@ public void onFailure(LowLevelGetException e, SendableRequestItem token, ClientC /** * Registers this getter with the scheduler, delegating the actual work to the owning fetcher. * - *

This method forwards to {@link USKFetcher#preRegisterStoreChecker(USKStoreCheckerGetter, - * USKFetcher.USKStoreChecker, ClientContext, boolean)} and then marks the request as done in a - * {@code finally} block so that {@link #isCancelled()} returns {@code true} afterward. It is - * intended to run once per instance as part of a single store-check pass. + *

This method forwards to {@link + * USKStoreCheckCoordinator#preRegisterStoreChecker(USKStoreCheckerGetter, + * USKStoreCheckCoordinator.USKStoreChecker, ClientContext, boolean)} and then marks the request + * as done in a {@code finally} block so that {@link #isCancelled()} returns {@code true} + * afterward. It is intended to run once per instance as part of a single store-check pass. * * @param context client context used during registration; must not be null. * @param toNetwork whether the scheduler is attempting a network registration; forwarded as-is. @@ -166,18 +181,18 @@ public void onFailure(LowLevelGetException e, SendableRequestItem token, ClientC @Override public boolean preRegister(ClientContext context, boolean toNetwork) { try { - return fetcher.preRegisterStoreChecker(this, checker, context, toNetwork); + return coordinator.preRegisterStoreChecker(this, checker, context, toNetwork); } finally { done = true; } } /** - * Selects a key to send based on local-fetching state. + * Selects a key to send based on the local-fetching state. * *

This getter never selects a network-sendable key. It exists only to drive local store * checking via {@link #listKeys()}, and the input parameters are unused. Returning {@code null} - * prevents any attempt to schedule a network send for this helper request. As a result, the + * prevents any attempt to schedule a network sending for this helper request. As a result, the * scheduler sees no sendable work from this getter. * * @param keys keys currently being fetched locally; ignored by this implementation. @@ -201,7 +216,7 @@ public SendableRequestItem chooseKey(KeysFetchingLocally keys, ClientContext con */ @Override public long countAllKeys(ClientContext context) { - return fetcher.countKeys(); + return callbacks.fetcher().countKeys(); } /** @@ -225,14 +240,14 @@ public long countSendableKeys(ClientContext context) { * *

Although this getter does not perform network I/O, it still participates in the same * scheduling and accounting paths as other requests. Selecting the client based on the real-time - * flag of the owning {@code parent} keeps the probe aligned with the rest of the USK polling - * workflow and ensures it is attributed to the correct request queue. + * flag of the owning requester keeps the probe aligned with the rest of the USK polling workflow + * and ensures it is attributed to the correct request queue. * - * @return the request client matching the parent's real-time scheduling mode. + * @return the request client matching the owner's real-time scheduling mode. */ @Override public RequestClient getClient() { - return parent.realTimeFlag() ? USKManager.rcRT : USKManager.rcBulk; + return owner.realTimeFlag() ? USKManager.rcRT : USKManager.rcBulk; } /** @@ -240,14 +255,14 @@ public RequestClient getClient() { * *

The request machinery uses this link to attribute accounting and cancellation. This getter * is a helper object and does not represent an independent client request, so it returns the - * parent requester supplied at construction time. Callers should treat the returned requester as - * the authoritative owner of this probe. + * owner requester supplied at construction time. Callers should treat the returned requester as + * the authoritative owner of this probe and its scheduling. * - * @return the parent requester that owns this store-check probe. + * @return the owner requester that owns this store-check probe. */ @Override public ClientRequester getClientRequest() { - return parent; + return owner; } /** @@ -261,7 +276,7 @@ public ClientRequester getClientRequest() { */ @Override public short getPriorityClass() { - return fetcher.getPriorityClass(); + return callbacks.fetcher().getPriorityClass(); } /** @@ -276,7 +291,7 @@ public short getPriorityClass() { */ @Override public boolean isCancelled() { - return done || fetcher.isCancelled(); + return done || callbacks.isCancelled(); } /** @@ -285,7 +300,7 @@ public boolean isCancelled() { *

USK datastore lookups are performed using SSK-derived keys, so this getter always reports * {@code true} to match the underlying key type expectations of the request machinery. This * classification can influence request routing, accounting, and key-handling behavior. It has no - * side effects and does not vary per instance. + * side effects and does not vary, per instance. * * @return {@code true}, as this getter operates on SSK-derived keys. */ @@ -323,6 +338,6 @@ public long getWakeupTime(ClientContext context, long now) { */ @Override protected ClientGetState getClientGetState() { - return fetcher; + return callbacks.fetcher(); } } diff --git a/src/main/java/network/crypta/client/async/USKSubscriberRegistry.java b/src/main/java/network/crypta/client/async/USKSubscriberRegistry.java new file mode 100644 index 0000000000..e9ede283c9 --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKSubscriberRegistry.java @@ -0,0 +1,210 @@ +package network.crypta.client.async; + +import java.util.HashMap; +import java.util.HashSet; +import network.crypta.keys.USK; + +/** + * Tracks USK subscribers, edition hints, and polling priority preferences. + * + *

The registry maintains a set of {@link USKCallback} subscribers and their associated edition + * hints. It updates the {@link USKKeyWatchSet} with subscriber hints, recalculates polling + * priorities through {@link USKPriorityPolicy}, and exposes snapshot views of registered + * subscribers. Callers generally use it when adding or removing subscribers from a {@link + * USKFetcher} so that the polling attempts remain aligned with the most recent subscriber state. + * + *

The registry is mutable and synchronizes around the subscriber state. It does not synchronize + * accesses to the {@link USKKeyWatchSet} or {@link USKManager}; those collaborators are expected to + * be thread-safe or externally synchronized. Hint updates and priority changes are applied in a + * predictable sequence: update the registry, refresh priorities, and then update watching keys with + * the latest hint snapshot. + * + *

    + *
  • Stores subscriber hints used to bias edition probing. + *
  • Maintains polling priority preferences for normal and progress modes. + *
  • Provides snapshot arrays for use by scheduling and notification paths. + *
+ */ +final class USKSubscriberRegistry { + /** Live subscriber set used for callback updates and snapshots. */ + private final HashSet subscribers = new HashSet<>(); + + /** Edition hint values supplied by subscribers, keyed by callback. */ + private final HashMap subscriberHints = new HashMap<>(); + + /** Watched the key set that consumes subscriber hints. */ + private final USKKeyWatchSet watchingKeys; + + /** USK manager used to look up the latest known slot. */ + private final USKManager uskManager; + + /** Priority policy that aggregates polling preferences. */ + private final USKPriorityPolicy priorityPolicy; + + /** Base USK used for lookup and hint interpretation. */ + private final USK origUSK; + + /** + * Creates a subscriber registry bound to a specific USK fetcher. + * + *

The registry holds the dependencies needed to update watch keys and compute polling + * priorities. It assumes the {@code attempts} manager and {@code uskManager} remain valid for the + * lifetime of the owning fetcher. + * + * @param watchingKeys watch set updated with subscriber hints; must be non-null + * @param uskManager manager used to query latest slot values; must be non-null + * @param attempts attempt manager used by the priority policy; must be non-null + * @param origUSK base USK that anchors hint and lookup calculations; must be non-null + */ + USKSubscriberRegistry( + USKKeyWatchSet watchingKeys, USKManager uskManager, USKAttemptManager attempts, USK origUSK) { + this.watchingKeys = watchingKeys; + this.uskManager = uskManager; + this.priorityPolicy = new USKPriorityPolicy(attempts); + this.origUSK = origUSK; + } + + /** + * Adds a subscriber and updates polling priorities and watch hints. + * + *

The subscriber and its hint are stored, then the priority policy is refreshed using the + * provided fetcher callbacks. Finally, the updated hint set is pushed to the watch set so that + * future polling attempts can incorporate the new hint values. + * + * @param cb subscriber callback to register; must be non-null + * @param hint edition hint provided by the subscriber + * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null + * @param fetcherName human-readable fetcher identifier used for debug logging + */ + void addSubscriber( + USKCallback cb, long hint, USKFetcherCallback[] fetcherCallbacks, String fetcherName) { + Long[] hints; + synchronized (this) { + subscribers.add(cb); + subscriberHints.put(cb, hint); + hints = subscriberHints.values().toArray(new Long[0]); + } + updatePriorities(fetcherCallbacks, fetcherName); + watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK)); + } + + /** + * Removes a subscriber and updates polling priorities and watch hints. + * + *

The subscriber and its hint are removed, priorities are refreshed using the provided fetcher + * callbacks, and the remaining hint set is propagated to the watch set. The method is safe to + * call even if the subscriber was not registered. + * + * @param cb subscriber callback to remove; must be non-null + * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null + * @param fetcherName human-readable fetcher identifier used for debug logging + */ + void removeSubscriber(USKCallback cb, USKFetcherCallback[] fetcherCallbacks, String fetcherName) { + Long[] hints; + synchronized (this) { + subscribers.remove(cb); + subscriberHints.remove(cb); + hints = subscriberHints.values().toArray(new Long[0]); + } + updatePriorities(fetcherCallbacks, fetcherName); + watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK)); + } + + /** + * Removes a subscriber without updating polling priorities. + * + *

This is used when the caller is already managing priority changes elsewhere. The method + * still updates the watch set with the remaining hint values. + * + * @param cb subscriber callback to remove; must be non-null + */ + void removeCallback(USKCallback cb) { + Long[] hints; + synchronized (this) { + subscribers.remove(cb); + subscriberHints.remove(cb); + hints = subscriberHints.values().toArray(new Long[0]); + } + watchingKeys.updateSubscriberHints(hints, uskManager.lookupLatestSlot(origUSK)); + } + + /** + * Returns whether any subscribers are registered. + * + * @return {@code true} if at least one subscriber is present + */ + boolean hasSubscribers() { + synchronized (this) { + return !subscribers.isEmpty(); + } + } + + /** + * Returns whether any fetcher callbacks are present. + * + * @param fetcherCallbacks fetcher callbacks to evaluate; must not be null + * @return {@code true} when the array contains at least one callback + */ + boolean hasCallbacks(USKFetcherCallback[] fetcherCallbacks) { + return fetcherCallbacks.length != 0; + } + + /** + * Refreshes priorities and returns the current progress polling priority. + * + *

The method recalculates polling priorities using the provided fetcher callbacks and then + * returns the progress priority, allowing callers to use the updated value immediately. + * + * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null + * @param fetcherName human-readable fetcher identifier used for debug logging + * @return the updated progress polling priority class + */ + short refreshAndGetProgressPollPriority( + USKFetcherCallback[] fetcherCallbacks, String fetcherName) { + updatePriorities(fetcherCallbacks, fetcherName); + return progressPriority(); + } + + /** + * Returns the current progress polling priority class. + * + * @return progress polling priority derived from subscriber preferences + */ + short progressPriority() { + return priorityPolicy.progressPriority(); + } + + /** + * Returns the current normal polling priority class. + * + * @return normal polling priority derived from subscriber preferences + */ + short normalPriority() { + return priorityPolicy.normalPriority(); + } + + /** + * Returns a snapshot of registered subscribers. + * + * @return an array snapshot of subscribers; may be empty but never null + */ + USKCallback[] snapshotSubscribers() { + synchronized (this) { + return subscribers.toArray(new USKCallback[0]); + } + } + + /** + * Updates polling priorities using the current subscriber snapshot. + * + * @param fetcherCallbacks fetcher callbacks that influence polling priorities; must not be null + * @param fetcherName human-readable fetcher identifier used for debug logging + */ + void updatePriorities(USKFetcherCallback[] fetcherCallbacks, String fetcherName) { + USKCallback[] localCallbacks; + synchronized (this) { + localCallbacks = subscribers.toArray(new USKCallback[0]); + } + priorityPolicy.updatePriorities(localCallbacks, fetcherCallbacks, fetcherName); + } +} diff --git a/src/main/java/network/crypta/client/async/USKSuccessPlanner.java b/src/main/java/network/crypta/client/async/USKSuccessPlanner.java new file mode 100644 index 0000000000..560167922b --- /dev/null +++ b/src/main/java/network/crypta/client/async/USKSuccessPlanner.java @@ -0,0 +1,134 @@ +package network.crypta.client.async; + +import java.util.List; +import network.crypta.keys.ClientSSKBlock; + +/** + * Builds plan objects for handling successful or discovered USK editions. + * + *

This helper centralizes the decision-making data needed when a polling attempt succeeds or + * discovers a newer edition. Callers use it to construct immutable-looking plan objects that carry + * flags about whether to decode data, which attempts should be canceled, and whether a store check + * should be registered immediately. The planner does not execute any actions itself; it simply + * prepares structured data for the owning {@link USKFetcher} or related coordinators. + * + *

The class is stateless and thread-safe, and it may be reused freely across scheduling cycles. + * Plan instances are mutable data holders and are typically short-lived, created for a single + * scheduling decision, and then discarded. + * + *

    + *
  • Creates plan objects for successful fetches or found editions. + *
  • Encodes decode and registration decisions in a small data structure. + *
  • Provides a reusable helper for decode-eligibility checks. + *
+ */ +final class USKSuccessPlanner { + /** Creates a stateless planner instance. */ + USKSuccessPlanner() {} + + /** + * Plan describing how to handle a successful fetch. + * + *

The plan records whether to decode data, the current latest edition value, and whether + * registration should happen immediately. It also includes any polling attempts that should be + * terminated after successful handling is completed. + */ + static final class SuccessPlan { + /** Whether the caller should decode the associated data block. */ + boolean decode; + + /** Current latest edition value after applying the successful result. */ + long curLatest; + + /** Whether the caller should register follow-up work immediately. */ + boolean registerNow; + + /** Attempts that should be canceled after the success is processed. */ + List killAttempts; + + /** Creates an empty success plan with default values. */ + SuccessPlan() {} + } + + /** + * Plan describing how to handle a discovered edition without a full success path. + * + *

The plan records whether to decode data, whether a store check should be registered + * immediately, and which polling attempts should be terminated after handling the discovery. + */ + static final class FoundPlan { + /** Whether the caller should decode the associated data block. */ + boolean decode; + + /** Attempts that should be canceled after the discovery is processed. */ + List killAttempts; + + /** Whether the caller should register follow-up work immediately. */ + boolean registerNow; + + /** Creates an empty found plan with default values. */ + FoundPlan() {} + } + + /** + * Creates a plan for handling a successful fetch. + * + *

The returned plan aggregates the caller's decision flags and the list of attempts that + * should be terminated after success handling. The method does not validate the inputs; it simply + * packages them for downstream consumers. + * + * @param decode whether the success path should decode the returned data block + * @param curLatest latest edition value after applying the successful fetch + * @param registerNow whether follow-up registration should occur immediately + * @param killAttempts polling attempts to cancel after success handling; may be empty but not + * null + * @return a success plan populated with the provided values + */ + SuccessPlan createSuccessPlan( + boolean decode, long curLatest, boolean registerNow, List killAttempts) { + SuccessPlan plan = new SuccessPlan(); + plan.decode = decode; + plan.curLatest = curLatest; + plan.registerNow = registerNow; + plan.killAttempts = killAttempts; + return plan; + } + + /** + * Creates a plan for handling a newly discovered edition. + * + *

The returned plan captures decode and registration choices along with any polling attempts + * that should be terminated after the discovery is processed. + * + * @param decode whether the discovery path should decode the returned data block + * @param registerNow whether follow-up registration should occur immediately + * @param killAttempts polling attempts to cancel after handling the discovery; may be empty but + * not null + * @return a found plan populated with the provided values + */ + FoundPlan createFoundPlan(boolean decode, boolean registerNow, List killAttempts) { + FoundPlan plan = new FoundPlan(); + plan.decode = decode; + plan.registerNow = registerNow; + plan.killAttempts = killAttempts; + return plan; + } + + /** + * Determines whether the given result should be decoded. + * + *

The decision is based on the current latest edition value, the last known edition, and + * whether the caller has requested a no-update path without a data block. A {@code null} block is + * treated as non-decodable when {@code dontUpdate} is set. + * + * @param curLatest current latest edition value tracked by the caller + * @param lastEd last known edition value to compare against + * @param dontUpdate whether the caller is explicitly avoiding updates + * @param block decoded block candidate; may be null when only metadata is available + * @return {@code true} when the result is eligible for decoding + */ + static boolean shouldDecode( + long curLatest, long lastEd, boolean dontUpdate, ClientSSKBlock block) { + return curLatest >= lastEd && !(dontUpdate && block == null); + } +} diff --git a/src/test/java/network/crypta/client/async/USKAttemptManagerTest.java b/src/test/java/network/crypta/client/async/USKAttemptManagerTest.java new file mode 100644 index 0000000000..ac261dd6f6 --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKAttemptManagerTest.java @@ -0,0 +1,617 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.net.MalformedURLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.stream.Collectors; +import network.crypta.client.ArchiveManager; +import network.crypta.client.FetchContext; +import network.crypta.client.FetchContextOptions; +import network.crypta.client.InsertContext; +import network.crypta.client.InsertContextOptions; +import network.crypta.client.events.SimpleEventProducer; +import network.crypta.client.filter.LinkFilterExceptionProvider; +import network.crypta.clients.fcp.PersistentRequestRoot; +import network.crypta.config.Config; +import network.crypta.crypt.MasterSecret; +import network.crypta.crypt.RandomSource; +import network.crypta.keys.ClientSSK; +import network.crypta.keys.Key; +import network.crypta.keys.KeyBlock; +import network.crypta.keys.NodeSSK; +import network.crypta.keys.USK; +import network.crypta.node.ClientContextResources; +import network.crypta.node.RequestClient; +import network.crypta.support.MemoryLimitedJobRunner; +import network.crypta.support.PriorityAwareExecutor; +import network.crypta.support.Ticker; +import network.crypta.support.api.LockableRandomAccessBufferFactory; +import network.crypta.support.io.FileRandomAccessBufferFactory; +import network.crypta.support.io.FilenameGenerator; +import network.crypta.support.io.PersistentFileTracker; +import network.crypta.support.io.PersistentTempBucketFactory; +import network.crypta.support.io.TempBucketFactory; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKAttemptManagerTest { + + private static final RequestClient TRANSIENT_CLIENT = + new RequestClient() { + @Override + public boolean persistent() { + return false; + } + + @Override + public boolean realTimeFlag() { + return false; + } + }; + + private static final class DirectExecutor implements PriorityAwareExecutor { + @Override + public void execute(Runnable job) { + job.run(); + } + + @Override + public void execute(Runnable job, String jobName) { + job.run(); + } + + @Override + public void execute(Runnable job, String jobName, boolean fromTicker) { + job.run(); + } + + @Override + public int[] waitingThreads() { + return new int[0]; + } + + @Override + public int[] runningThreads() { + return new int[0]; + } + + @Override + public int getWaitingThreadsCount() { + return 0; + } + } + + private static final class DirectTicker implements Ticker { + private final PriorityAwareExecutor executor = new DirectExecutor(); + + @Override + public void queueTimedJob(Runnable job, long offset) { + job.run(); + } + + @Override + public void queueTimedJob( + Runnable job, String name, long offset, boolean runOnTickerAnyway, boolean noDupes) { + job.run(); + } + + @Override + public PriorityAwareExecutor getExecutor() { + return executor; + } + + @Override + public void removeQueuedJob(Runnable job) { + // no-op + } + + @Override + public void queueTimedJobAbsolute( + Runnable runner, String name, long time, boolean runOnTickerAnyway, boolean noDupes) { + runner.run(); + } + } + + private static final class TestRequester extends ClientRequester { + private final ClientBaseCallback callback; + private final network.crypta.keys.FreenetURI uri; + private int toNetworkCalls; + private boolean cancelled; + + private TestRequester(network.crypta.keys.FreenetURI uri, RequestClient client) { + super((short) 1, client); + this.uri = uri; + this.callback = + new ClientBaseCallback() { + @Override + public void onResume(ClientContext context) { + // no-op + } + + @Override + public RequestClient getRequestClient() { + return client; + } + }; + } + + @Override + public void onTransition( + ClientGetState oldState, ClientGetState newState, ClientContext context) { + // no-op + } + + @Override + public void cancel(ClientContext context) { + cancelled = true; + } + + @Override + public network.crypta.keys.FreenetURI getURI() { + return uri; + } + + @Override + public boolean isFinished() { + return cancelled; + } + + @Override + protected void innerNotifyClients(ClientContext context) { + // no-op + } + + @Override + protected void innerToNetwork(ClientContext context) { + toNetworkCalls++; + } + + @Override + protected ClientBaseCallback getCallback() { + return callback; + } + + int toNetworkCalls() { + return toNetworkCalls; + } + } + + private static FetchContext newFetchContext() { + return new FetchContext( + FetchContextOptions.builder() + .limits(16 * 1024L, 16 * 1024L, 4096) + .archiveLimits(1, 0, 0, true) + .retryLimits(0, 0, 2) + .splitfileLimits(true, 0, 0) + .behavior(false, false, false) + .clientOptions(new SimpleEventProducer(), true, false) + .filterOverrides(null, null, null) + .build()); + } + + private static InsertContext newInsertContext() { + return new InsertContext( + InsertContextOptions.builder() + .retryLimits(0, 0) + .splitfileSegmentLimits(0, 0) + .clientOptions(new SimpleEventProducer(), true, false, false) + .compressorDescriptor(null) + .redundancy(0, 0) + .compatibility(InsertContext.CompatibilityMode.COMPAT_CURRENT) + .build()); + } + + private static ClientContext minimalContext(USKManager uskManager, RandomSource randomSource) { + return new ClientContext( + 1L, + new ClientContextRuntime( + mock(ClientLayerPersister.class), + new DirectExecutor(), + mock(MemoryLimitedJobRunner.class), + new DirectTicker(), + randomSource, + new Random(123), + mock(MasterSecret.class)), + new ClientContextStorageFactories( + mock(PersistentTempBucketFactory.class), + mock(TempBucketFactory.class), + mock(PersistentFileTracker.class), + mock(FilenameGenerator.class), + mock(FilenameGenerator.class), + mock(FileRandomAccessBufferFactory.class), + mock(FileRandomAccessBufferFactory.class)), + new ClientContextRafFactories( + mock(LockableRandomAccessBufferFactory.class), + mock(LockableRandomAccessBufferFactory.class)), + new ClientContextServices( + new ClientContextResources(mock(ArchiveManager.class), mock(HealingQueue.class)), + uskManager, + mock(network.crypta.support.compress.RealCompressor.class), + mock(DatastoreChecker.class), + mock(PersistentRequestRoot.class), + mock(LinkFilterExceptionProvider.class)), + new ClientContextDefaults(newFetchContext(), newInsertContext(), mock(Config.class))); + } + + private static USK newUSK() throws MalformedURLException { + byte[] pubKeyHash = new byte[NodeSSK.PUBKEY_HASH_SIZE]; + byte[] cryptoKey = new byte[ClientSSK.CRYPTO_KEY_LENGTH]; + byte[] extras = + new byte[] { + NodeSSK.SSK_VERSION, 0, Key.ALGO_AES_PCFB_256_SHA256, 0, (byte) KeyBlock.HASH_SHA256 + }; + return new USK(pubKeyHash, cryptoKey, extras, "site", 0L); + } + + private static USKKeyWatchSet.Lookup lookup(USK usk, long edition, boolean ignoreStore) { + USKKeyWatchSet.Lookup lookup = new USKKeyWatchSet.Lookup(); + lookup.val = edition; + lookup.key = usk.getSSK(edition); + lookup.ignoreStore = ignoreStore; + lookup.label = "test"; + return lookup; + } + + private static USKAttemptManager newManager( + USKAttemptCallbacks callbacks, + USKManager uskManager, + USKKeyWatchSet watchingKeys, + boolean checkStoreOnly, + boolean keepLastData, + USK usk, + ClientRequester parent) { + USKAttemptContext attemptContext = + new USKAttemptContext(callbacks, usk, newFetchContext(), newFetchContext(), parent, false); + return new USKAttemptManager( + attemptContext, uskManager, watchingKeys, checkStoreOnly, keepLastData); + } + + @Test + void cancelBefore_whenRunningAndPollingBeforeCutoff_removesAndReturns() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext context = minimalContext(uskManager, mock(RandomSource.class)); + + USKKeyWatchSet.Lookup runningOld = lookup(usk, 1L, false); + USKKeyWatchSet.Lookup runningNew = lookup(usk, 4L, false); + USKKeyWatchSet.Lookup pollingOld = lookup(usk, 2L, true); + USKKeyWatchSet.Lookup pollingNew = lookup(usk, 5L, true); + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch( + Arrays.asList(runningOld, runningNew), Arrays.asList(pollingOld, pollingNew)); + + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + + manager.addNewAttempts(0L, context, true); + manager.clearAttemptsToStart(); + + List toCancel = manager.cancelBefore(3L); + + assertEquals(2, toCancel.size()); + Set cancelledNumbers = + toCancel.stream().map(attempt -> attempt.number).collect(Collectors.toSet()); + assertEquals(Set.of(1L, 2L), cancelledNumbers); + assertEquals(1, manager.runningAttemptCount()); + assertEquals(1, manager.pollingAttemptCount()); + } + + @Test + void finishCancelBefore_whenAttemptsProvided_invokesCancel() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + ClientContext context = mock(ClientContext.class); + USKAttempt attempt = mock(USKAttempt.class); + + manager.finishCancelBefore(List.of(attempt), context); + + verify(attempt).cancel(context); + } + + @Test + void shouldAddRandomEditions_delegatesToCallbacks() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + RandomSource randomSource = mock(RandomSource.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext context = minimalContext(uskManager, randomSource); + + when(callbacks.shouldAddRandomEditions(randomSource, true)).thenReturn(true); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + + assertTrue(manager.shouldAddRandomEditions(context, true)); + verify(callbacks).shouldAddRandomEditions(randomSource, true); + } + + @Test + void addNewAttempts_whenStoreOnly_doesNotStage() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext context = minimalContext(uskManager, mock(RandomSource.class)); + + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch(List.of(lookup(usk, 1L, false)), List.of(lookup(usk, 2L, true))); + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, true, false, usk, parent); + + manager.addNewAttempts(0L, context, true); + + assertFalse(manager.hasPendingAttempts()); + assertEquals(0, manager.runningAttemptCount()); + assertEquals(0, manager.pollingAttemptCount()); + } + + @Test + void addNewAttempts_whenDuplicateEditions_skipsDuplicate() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext context = minimalContext(uskManager, mock(RandomSource.class)); + + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch(List.of(lookup(usk, 3L, false)), List.of(lookup(usk, 4L, true))); + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + + manager.addNewAttempts(0L, context, true); + assertEquals(2, manager.snapshotAttemptsToStart().length); + manager.clearAttemptsToStart(); + + manager.addNewAttempts(0L, context, true); + + assertFalse(manager.hasPendingAttempts()); + assertEquals(1, manager.runningAttemptCount()); + assertEquals(1, manager.pollingAttemptCount()); + } + + @Test + void addNewAttempts_whenNegativeEdition_throwsIllegalArgumentException() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext context = minimalContext(uskManager, mock(RandomSource.class)); + + USKKeyWatchSet.Lookup negative = new USKKeyWatchSet.Lookup(); + negative.val = -1L; + negative.key = usk.getSSK(0L); + negative.ignoreStore = false; + negative.label = "negative"; + USKKeyWatchSet.ToFetch plan = new USKKeyWatchSet.ToFetch(List.of(negative), List.of()); + + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + + assertThrows(IllegalArgumentException.class, () -> manager.addNewAttempts(0L, context, true)); + } + + @Test + void registerAttempts_whenNewerSchedulesAndNotifiesNetwork() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext addContext = minimalContext(uskManager, mock(RandomSource.class)); + ClientContext scheduleContext = mock(ClientContext.class); + + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch( + List.of(lookup(usk, 10L, false)), List.of(lookup(usk, 11L, true))); + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + manager.addNewAttempts(0L, addContext, true); + + USKAttempt[] attempts = manager.snapshotAttemptsToStart(); + List checkers = new ArrayList<>(); + for (USKAttempt attempt : attempts) { + USKChecker checker = mock(USKChecker.class); + attempt.checker = checker; + checkers.add(checker); + } + + when(uskManager.lookupLatestSlot(usk)).thenReturn(9L); + + manager.registerAttempts( + new USKAttemptManager.USKAttemptRegistrationParams(scheduleContext, true, 15L)); + + assertEquals(1, parent.toNetworkCalls()); + assertFalse(manager.hasPendingAttempts()); + for (USKChecker checker : checkers) { + verify(checker).schedule(scheduleContext); + } + } + + @Test + void registerAttempts_whenObsolete_removesFromMaps() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext addContext = minimalContext(uskManager, mock(RandomSource.class)); + ClientContext scheduleContext = mock(ClientContext.class); + + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch(List.of(lookup(usk, 5L, false)), List.of()); + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + manager.addNewAttempts(0L, addContext, true); + + USKAttempt attempt = manager.snapshotAttemptsToStart()[0]; + USKChecker checker = mock(USKChecker.class); + attempt.checker = checker; + + when(uskManager.lookupLatestSlot(usk)).thenReturn(5L); + + manager.registerAttempts( + new USKAttemptManager.USKAttemptRegistrationParams(scheduleContext, true, 5L)); + + assertEquals(1, parent.toNetworkCalls()); + assertEquals(0, manager.runningAttemptCount()); + verify(checker, never()).schedule(scheduleContext); + } + + @Test + void registerAttempts_whenKeepLastDataAndNoLastRequestData_schedulesSuggestedEdition() + throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext addContext = minimalContext(uskManager, mock(RandomSource.class)); + ClientContext scheduleContext = mock(ClientContext.class); + + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch(List.of(lookup(usk, 12L, false)), List.of()); + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, true, usk, parent); + manager.addNewAttempts(0L, addContext, true); + + USKAttempt attempt = manager.snapshotAttemptsToStart()[0]; + USKChecker checker = mock(USKChecker.class); + attempt.checker = checker; + + when(uskManager.lookupLatestSlot(usk)).thenReturn(12L); + + manager.registerAttempts( + new USKAttemptManager.USKAttemptRegistrationParams(scheduleContext, false, 12L)); + + verify(checker).schedule(scheduleContext); + } + + @Test + void reloadPollParameters_whenPollingAttemptsPresent_refreshesChecker() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext context = minimalContext(uskManager, mock(RandomSource.class)); + + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch(List.of(), List.of(lookup(usk, 7L, true))); + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + manager.addNewAttempts(0L, context, true); + manager.clearAttemptsToStart(); + + USKAttempt pollingAttempt = manager.snapshotPollingAttempts()[0]; + USKChecker checker = mock(USKChecker.class); + pollingAttempt.checker = checker; + + manager.reloadPollParameters(); + + verify(checker).onChangedFetchContext(); + } + + @Test + void runningAttemptsDescription_includesCancelledAndSucceededFlags() throws Exception { + USKManager uskManager = mock(USKManager.class); + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptCallbacks callbacks = mock(USKAttemptCallbacks.class); + USK usk = newUSK(); + TestRequester parent = new TestRequester(usk.getURI(), TRANSIENT_CLIENT); + ClientContext context = minimalContext(uskManager, mock(RandomSource.class)); + + USKKeyWatchSet.ToFetch plan = + new USKKeyWatchSet.ToFetch(List.of(lookup(usk, 2L, false)), List.of()); + when(callbacks.shouldAddRandomEditions(any(Random.class), anyBoolean())).thenReturn(false); + when(watchingKeys.getEditionsToFetch( + anyLong(), any(Random.class), anyList(), anyBoolean(), anyBoolean())) + .thenReturn(plan); + + USKAttemptManager manager = + newManager(callbacks, uskManager, watchingKeys, false, false, usk, parent); + manager.addNewAttempts(0L, context, true); + manager.clearAttemptsToStart(); + + USKAttempt attempt = manager.snapshotRunningAttempts()[0]; + attempt.cancelled = true; + attempt.succeeded = true; + + String description = manager.runningAttemptsDescription(); + + assertTrue(description.contains("2")); + assertTrue(description.contains("(cancelled)")); + assertTrue(description.contains("(succeeded)")); + } +} diff --git a/src/test/java/network/crypta/client/async/USKAttemptTest.java b/src/test/java/network/crypta/client/async/USKAttemptTest.java new file mode 100644 index 0000000000..bcb3427c0a --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKAttemptTest.java @@ -0,0 +1,264 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import network.crypta.client.FetchContext; +import network.crypta.client.FetchContextOptions; +import network.crypta.client.events.SimpleEventProducer; +import network.crypta.keys.ClientSSK; +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.Key; +import network.crypta.keys.KeyBlock; +import network.crypta.keys.NodeSSK; +import network.crypta.keys.USK; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKAttemptTest { + + private static final short PROGRESS_PRIORITY = 7; + private static final short NORMAL_PRIORITY = 3; + private static final short PARENT_PRIORITY = 9; + + @Mock private USKAttemptCallbacks callbacks; + @Mock private ClientRequester parent; + @Mock private ClientContext context; + + private FetchContext fetchContext; + private USK usk; + + @BeforeEach + void setUp() throws Exception { + byte[] pubKeyHash = new byte[NodeSSK.PUBKEY_HASH_SIZE]; + byte[] cryptoKey = new byte[ClientSSK.CRYPTO_KEY_LENGTH]; + byte[] extras = + new byte[] { + NodeSSK.SSK_VERSION, 0, Key.ALGO_AES_PCFB_256_SHA256, 0, (byte) KeyBlock.HASH_SHA256 + }; + usk = new USK(pubKeyHash, cryptoKey, extras, "site", 0L); + + fetchContext = + new FetchContext( + FetchContextOptions.builder() + .limits(16 * 1024L, 16 * 1024L, 4096) + .archiveLimits(1, 0, 0, true) + .retryLimits(0, 0, 2) + .splitfileLimits(true, 0, 0) + .behavior(false, false, false) + .clientOptions(new SimpleEventProducer(), true, false) + .filterOverrides(null, null, null) + .build()); + } + + @Test + void onSuccess_whenCalled_marksSucceededAndNotifiesCallbacks() { + USKAttempt attempt = newAttempt(false); + ClientSSKBlock block = mock(ClientSSKBlock.class); + + attempt.onSuccess(block, context); + + assertTrue(attempt.succeeded); + assertNull(attempt.checker); + verify(callbacks).onSuccess(attempt, false, block, context); + } + + @Test + void onDNF_whenCalled_marksDnfAndNotifiesCallbacks() { + USKAttempt attempt = newAttempt(false); + + attempt.onDNF(context); + + assertTrue(attempt.dnf); + assertNull(attempt.checker); + verify(callbacks).onDNF(attempt, context); + } + + @Test + void onFatalAuthorError_whenCalled_reportsDontUpdateSuccess() { + USKAttempt attempt = newAttempt(false); + + attempt.onFatalAuthorError(context); + + assertNull(attempt.checker); + verify(callbacks).onSuccess(attempt, true, null, context); + } + + @Test + void onNetworkError_whenCalled_reportsDnf() { + USKAttempt attempt = newAttempt(false); + + attempt.onNetworkError(context); + + assertNull(attempt.checker); + verify(callbacks).onDNF(attempt, context); + } + + @Test + void onCancelled_whenCalledTwice_notifiesOnce() { + USKAttempt attempt = newAttempt(false); + + attempt.onCancelled(context); + attempt.onCancelled(context); + + assertNull(attempt.checker); + verify(callbacks, times(1)).onCancelled(attempt, context); + } + + @Test + void cancel_whenCheckerPresent_invokesCheckerCancelAndNotifiesOnce() { + USKAttempt attempt = newAttempt(false); + USKChecker checker = mock(USKChecker.class); + attempt.checker = checker; + + attempt.cancel(context); + + assertTrue(attempt.cancelled); + assertNull(attempt.checker); + verify(checker).cancel(context); + verify(callbacks, times(1)).onCancelled(attempt, context); + } + + @Test + void schedule_whenCheckerPresent_delegatesToChecker() { + USKAttempt attempt = newAttempt(false); + USKChecker checker = mock(USKChecker.class); + attempt.checker = checker; + + attempt.schedule(context); + + verify(checker).schedule(context); + } + + @Test + void schedule_whenCheckerMissing_doesNothing() { + USKAttempt attempt = newAttempt(false); + attempt.checker = null; + + attempt.schedule(context); + + assertNull(attempt.checker); + } + + @Test + void reloadPollParameters_whenCheckerPresent_refreshesChecker() { + USKAttempt attempt = newAttempt(false); + USKChecker checker = mock(USKChecker.class); + attempt.checker = checker; + + attempt.reloadPollParameters(); + + verify(checker).onChangedFetchContext(); + } + + @Test + void reloadPollParameters_whenCheckerMissing_doesNothing() { + USKAttempt attempt = newAttempt(false); + attempt.checker = null; + + attempt.reloadPollParameters(); + + assertNull(attempt.checker); + } + + @Test + void getPriority_whenBackgroundForeverNotInCooldown_returnsProgressPriority() { + USKAttempt attempt = newAttempt(true); + when(callbacks.isBackgroundPoll()).thenReturn(true); + when(callbacks.getProgressPollPriority()).thenReturn(PROGRESS_PRIORITY); + + short priority = attempt.getPriority(); + + assertEquals(PROGRESS_PRIORITY, priority); + } + + @Test + void getPriority_whenBackgroundForeverInCooldown_returnsNormalPriority() { + USKAttempt attempt = newAttempt(true); + when(callbacks.isBackgroundPoll()).thenReturn(true); + when(callbacks.getNormalPollPriority()).thenReturn(NORMAL_PRIORITY); + + attempt.onEnterFiniteCooldown(context); + + short priority = attempt.getPriority(); + + assertEquals(NORMAL_PRIORITY, priority); + verify(callbacks).onEnterFiniteCooldown(context); + } + + @Test + void getPriority_whenBackgroundNonForever_returnsNormalPriority() { + USKAttempt attempt = newAttempt(false); + when(callbacks.isBackgroundPoll()).thenReturn(true); + when(callbacks.getNormalPollPriority()).thenReturn(NORMAL_PRIORITY); + + short priority = attempt.getPriority(); + + assertEquals(NORMAL_PRIORITY, priority); + } + + @Test + void getPriority_whenNotBackground_returnsParentPriority() { + USKAttempt attempt = newAttempt(false); + when(callbacks.isBackgroundPoll()).thenReturn(false); + when(parent.getPriorityClass()).thenReturn(PARENT_PRIORITY); + + short priority = attempt.getPriority(); + + assertEquals(PARENT_PRIORITY, priority); + } + + @Test + void everInCooldown_whenNeverTriggered_returnsFalse() { + USKAttempt attempt = newAttempt(false); + + assertFalse(attempt.everInCooldown()); + } + + @Test + void toString_whenForeverIncludesUriAndFlag() { + USKAttempt attempt = newAttempt(true); + + String description = attempt.toString(); + + assertNotNull(description); + assertTrue(description.contains("USKAttempt for")); + assertTrue(description.contains(usk.getURI().toString())); + assertTrue(description.contains("(forever)")); + } + + @Test + void toString_whenOneOffOmitsForeverFlag() { + USKAttempt attempt = newAttempt(false); + + String description = attempt.toString(); + + assertNotNull(description); + assertTrue(description.contains("USKAttempt for")); + assertTrue(description.contains(usk.getURI().toString())); + assertFalse(description.contains("(forever)")); + } + + private USKAttempt newAttempt(boolean forever) { + USKAttemptContext attemptContext = + new USKAttemptContext(callbacks, usk, fetchContext, fetchContext, parent, false); + USKKeyWatchSet.Lookup lookup = new USKKeyWatchSet.Lookup(); + lookup.val = 11L; + lookup.key = usk.getSSK(lookup.val); + lookup.ignoreStore = false; + lookup.label = "test"; + return new USKAttempt(attemptContext, lookup, forever); + } +} diff --git a/src/test/java/network/crypta/client/async/USKCompletionCoordinatorTest.java b/src/test/java/network/crypta/client/async/USKCompletionCoordinatorTest.java new file mode 100644 index 0000000000..75f026496d --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKCompletionCoordinatorTest.java @@ -0,0 +1,269 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.net.MalformedURLException; +import java.util.Arrays; +import network.crypta.crypt.RandomSource; +import network.crypta.keys.ClientSSK; +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.Key; +import network.crypta.keys.KeyBlock; +import network.crypta.keys.NodeSSK; +import network.crypta.keys.USK; +import network.crypta.node.Node; +import network.crypta.node.NodeClientCore; +import network.crypta.node.RequestStarter; +import network.crypta.support.api.Bucket; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@SuppressWarnings("java:S100") +@ExtendWith(MockitoExtension.class) +class USKCompletionCoordinatorTest { + + @Mock private USKCompletionHandler completionHandler; + @Mock private USKManager uskManager; + @Mock private ClientRequester parent; + @Mock private ClientContext context; + + private USK usk; + + @BeforeEach + void setUp() throws Exception { + usk = newUSK(); + } + + @Test + void applyDecodedData_whenDecodeFalse_doesNothing() { + USKCompletionCoordinator coordinator = newCoordinator(false); + ClientSSKBlock block = mock(ClientSSKBlock.class); + + coordinator.applyDecodedData(false, block, context); + + //noinspection resource + verify(completionHandler, never()).decodeBlockIfNeeded(any(Boolean.class), any(), any(), any()); + verify(completionHandler, never()).applyDecodedData(any(Boolean.class), any(), any()); + } + + @Test + void applyDecodedData_whenDecodeTrue_decodesAndApplies() { + USKCompletionCoordinator coordinator = newCoordinator(false); + ClientSSKBlock block = mock(ClientSSKBlock.class); + Bucket bucket = mock(Bucket.class); + when(completionHandler.decodeBlockIfNeeded(true, block, context, parent)).thenReturn(bucket); + + coordinator.applyDecodedData(true, block, context); + + //noinspection resource + verify(completionHandler).decodeBlockIfNeeded(true, block, context, parent); + verify(completionHandler).applyDecodedData(true, block, bucket); + } + + @Test + void applyFoundDecodedData_whenCalled_delegates() { + USKCompletionCoordinator coordinator = newCoordinator(false); + byte[] data = new byte[] {1, 2, 3}; + + coordinator.applyFoundDecodedData(true, true, (short) 7, data, context); + + verify(completionHandler).applyFoundDecodedData(true, true, (short) 7, data, context); + } + + @Test + void hasLastRequestData_whenHandlerReportsTrue_returnsTrue() { + when(completionHandler.hasLastRequestData()).thenReturn(true); + USKCompletionCoordinator coordinator = newCoordinator(false); + + assertTrue(coordinator.hasLastRequestData()); + } + + @Test + void clearLastRequestData_whenCalled_delegates() { + USKCompletionCoordinator coordinator = newCoordinator(false); + + coordinator.clearLastRequestData(); + + verify(completionHandler).clearLastRequestData(); + } + + @Test + void releaseLastDataBytes_whenCalled_returnsHandlerValue() { + byte[] expected = new byte[] {9, 4}; + when(completionHandler.releaseLastDataBytes()).thenReturn(expected); + USKCompletionCoordinator coordinator = newCoordinator(false); + + assertSame(expected, coordinator.releaseLastDataBytes()); + } + + @Test + void lastCompressionCodec_whenCalled_returnsHandlerValue() { + when(completionHandler.lastCompressionCodec()).thenReturn((short) 11); + USKCompletionCoordinator coordinator = newCoordinator(false); + + assertEquals((short) 11, coordinator.lastCompressionCodec()); + } + + @Test + void lastWasMetadata_whenCalled_returnsHandlerValue() { + when(completionHandler.lastWasMetadata()).thenReturn(true); + USKCompletionCoordinator coordinator = newCoordinator(false); + + assertTrue(coordinator.lastWasMetadata()); + } + + @Test + void completeCallbacks_whenEditionMissing_callsFailureAndCleansUp() throws Exception { + KeyListenerTracker tracker = mock(KeyListenerTracker.class); + ClientRequestScheduler scheduler = newScheduler(context, tracker); + when(context.getSskFetchScheduler(false)).thenReturn(scheduler); + when(uskManager.lookupLatestSlot(usk)).thenReturn(-1L); + + USKFetcher fetcher = mock(USKFetcher.class); + USKFetcherCallback callback = mock(USKFetcherCallback.class); + USKCompletionCoordinator coordinator = newCoordinator(false); + + coordinator.completeCallbacks(context, fetcher, new USKFetcherCallback[] {callback}); + + verify(uskManager).unsubscribe(usk, fetcher); + verify(uskManager).onFinished(fetcher); + verify(tracker).removePendingKeys((KeyListener) fetcher); + verify(callback).onFailure(context); + verify(callback, never()).onFoundEdition(any()); + verify(completionHandler).releaseLastDataBytes(); + verify(completionHandler).lastCompressionCodec(); + verify(completionHandler).lastWasMetadata(); + } + + @Test + void completeCallbacks_whenEditionFound_callsFoundEditionWithRetainedData() throws Exception { + KeyListenerTracker tracker = mock(KeyListenerTracker.class); + ClientRequestScheduler scheduler = newScheduler(context, tracker); + when(context.getSskFetchScheduler(true)).thenReturn(scheduler); + when(uskManager.lookupLatestSlot(usk)).thenReturn(5L); + byte[] data = new byte[] {1, 2}; + when(completionHandler.releaseLastDataBytes()).thenReturn(data); + when(completionHandler.lastCompressionCodec()).thenReturn((short) 3); + when(completionHandler.lastWasMetadata()).thenReturn(true); + + USKFetcher fetcher = mock(USKFetcher.class); + USKFetcherCallback callback = mock(USKFetcherCallback.class); + USKCompletionCoordinator coordinator = newCoordinator(true); + + coordinator.completeCallbacks(context, fetcher, new USKFetcherCallback[] {callback}); + + ArgumentCaptor captor = ArgumentCaptor.forClass(USKFoundEdition.class); + verify(callback).onFoundEdition(captor.capture()); + verify(callback, never()).onFailure(any()); + + USKFoundEdition found = captor.getValue(); + assertEquals(5L, found.edition()); + assertEquals(usk.copy(5L), found.key()); + assertSame(context, found.context()); + assertTrue(found.metadata()); + assertEquals((short) 3, found.codec()); + assertArrayEquals(data, found.data()); + assertFalse(found.newKnownGood()); + assertFalse(found.newSlotToo()); + } + + @Test + void completeCallbacks_whenCallbackThrows_continuesToNext() throws Exception { + KeyListenerTracker tracker = mock(KeyListenerTracker.class); + ClientRequestScheduler scheduler = newScheduler(context, tracker); + when(context.getSskFetchScheduler(false)).thenReturn(scheduler); + when(uskManager.lookupLatestSlot(usk)).thenReturn(-1L); + + USKFetcher fetcher = mock(USKFetcher.class); + USKFetcherCallback throwing = mock(USKFetcherCallback.class); + USKFetcherCallback next = mock(USKFetcherCallback.class); + doThrow(new RuntimeException("boom")).when(throwing).onFailure(context); + + USKCompletionCoordinator coordinator = newCoordinator(false); + + assertDoesNotThrow( + () -> + coordinator.completeCallbacks( + context, fetcher, new USKFetcherCallback[] {throwing, next})); + + verify(next).onFailure(context); + } + + @Test + void finishCancelled_whenCalled_notifiesAllCallbacks() { + USKCompletionCoordinator coordinator = newCoordinator(true); + USKFetcherCallback first = mock(USKFetcherCallback.class); + USKFetcherCallback second = mock(USKFetcherCallback.class); + + coordinator.finishCancelled(context, new USKFetcherCallback[] {first, second}); + + verify(first).onCancelled(context); + verify(second).onCancelled(context); + } + + private USKCompletionCoordinator newCoordinator(boolean realTimeFlag) { + return new USKCompletionCoordinator(completionHandler, uskManager, usk, parent, realTimeFlag); + } + + private static final String SCHED_TRANSIENT_FIELD = "schedTransient"; + private static final String DEFAULT_SITE = "site"; + private static final long DEFAULT_EDITION = 3L; + + private static ClientRequestScheduler newScheduler( + ClientContext context, KeyListenerTracker tracker) throws Exception { + RandomSource random = mock(RandomSource.class); + RequestStarter starter = mock(RequestStarter.class); + Node node = mock(Node.class); + NodeClientCore core = mock(NodeClientCore.class); + DatastoreChecker datastoreChecker = mock(DatastoreChecker.class); + when(core.getStoreChecker()).thenReturn(datastoreChecker); + ClientRequestScheduler scheduler = + new ClientRequestScheduler( + new ClientRequestScheduler.SchedulerMode(false, true, false), + random, + starter, + node, + core, + "test", + context); + setSchedTransient(scheduler, tracker); + return scheduler; + } + + private static void setSchedTransient( + ClientRequestScheduler scheduler, KeyListenerTracker tracker) throws Exception { + Field field = scheduler.getClass().getDeclaredField(SCHED_TRANSIENT_FIELD); + field.setAccessible(true); + field.set(scheduler, tracker); + } + + private static USK newUSK() throws MalformedURLException { + byte[] pubKeyHash = new byte[NodeSSK.PUBKEY_HASH_SIZE]; + byte[] cryptoKey = new byte[ClientSSK.CRYPTO_KEY_LENGTH]; + Arrays.fill(pubKeyHash, (byte) 0x11); + Arrays.fill(cryptoKey, (byte) 0x22); + byte[] extra = new byte[5]; + extra[0] = NodeSSK.SSK_VERSION; + extra[1] = 0; + extra[2] = Key.ALGO_AES_PCFB_256_SHA256; + extra[3] = 0; + extra[4] = (byte) KeyBlock.HASH_SHA256; + return new USK(pubKeyHash, cryptoKey, extra, DEFAULT_SITE, DEFAULT_EDITION); + } +} diff --git a/src/test/java/network/crypta/client/async/USKCompletionHandlerTest.java b/src/test/java/network/crypta/client/async/USKCompletionHandlerTest.java new file mode 100644 index 0000000000..ce500f0f33 --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKCompletionHandlerTest.java @@ -0,0 +1,289 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.io.OutputStream; +import java.lang.reflect.Field; +import network.crypta.keys.ClientSSKBlock; +import network.crypta.keys.KeyDecodeException; +import network.crypta.support.api.Bucket; +import network.crypta.support.api.BucketFactory; +import network.crypta.support.io.ArrayBucket; +import network.crypta.support.io.TempBucketFactory; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; + +@SuppressWarnings("java:S100") +@ExtendWith(MockitoExtension.class) +class USKCompletionHandlerTest { + + @Test + void decodeBlockIfNeeded_whenDecodeFalse_returnsNull() { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientSSKBlock block = mock(ClientSSKBlock.class); + ClientContext context = mock(ClientContext.class); + ClientRequester parent = mock(ClientRequester.class); + + //noinspection ConstantValue + Bucket result = handler.decodeBlockIfNeeded(false, block, context, parent); + + //noinspection ConstantValue + assertNull(result); + verifyNoInteractions(block, context, parent); + } + + @Test + void decodeBlockIfNeeded_whenBlockNull_returnsNull() { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientContext context = mock(ClientContext.class); + ClientRequester parent = mock(ClientRequester.class); + + //noinspection ConstantValue + Bucket result = handler.decodeBlockIfNeeded(true, null, context, parent); + + //noinspection ConstantValue + assertNull(result); + verifyNoInteractions(context, parent); + } + + @Test + @SuppressWarnings("resource") + void decodeBlockIfNeeded_whenDecodeTrue_returnsBucket() throws Exception { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientSSKBlock block = mock(ClientSSKBlock.class); + BucketFactory bucketFactory = mock(BucketFactory.class); + ClientContext context = mock(ClientContext.class); + ClientRequester parent = mock(ClientRequester.class); + Bucket bucket = mock(Bucket.class); + when(parent.persistent()).thenReturn(true); + when(context.getBucketFactory(true)).thenReturn(bucketFactory); + when(block.decode(bucketFactory, 1025, true)).thenReturn(bucket); + + Bucket result = handler.decodeBlockIfNeeded(true, block, context, parent); + + assertSame(bucket, result); + verify(context).getBucketFactory(true); + verify(block).decode(bucketFactory, 1025, true); + } + + @Test + void decodeBlockIfNeeded_whenDecodeThrowsKeyDecodeException_returnsNull() throws Exception { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientSSKBlock block = mock(ClientSSKBlock.class); + BucketFactory bucketFactory = mock(BucketFactory.class); + ClientContext context = mock(ClientContext.class); + ClientRequester parent = mock(ClientRequester.class); + when(parent.persistent()).thenReturn(false); + when(context.getBucketFactory(false)).thenReturn(bucketFactory); + when(block.decode(bucketFactory, 1025, true)).thenThrow(new KeyDecodeException("bad")); + + Bucket result = handler.decodeBlockIfNeeded(true, block, context, parent); + + assertNull(result); + } + + @Test + void decodeBlockIfNeeded_whenDecodeThrowsIOException_returnsNull() throws Exception { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientSSKBlock block = mock(ClientSSKBlock.class); + BucketFactory bucketFactory = mock(BucketFactory.class); + ClientContext context = mock(ClientContext.class); + ClientRequester parent = mock(ClientRequester.class); + when(parent.persistent()).thenReturn(true); + when(context.getBucketFactory(true)).thenReturn(bucketFactory); + when(block.decode(bucketFactory, 1025, true)).thenThrow(new IOException("io")); + + Bucket result = handler.decodeBlockIfNeeded(true, block, context, parent); + + assertNull(result); + } + + @Test + void applyDecodedData_whenDecodeFalse_doesNotChangeState() { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientSSKBlock block = mock(ClientSSKBlock.class); + Bucket data = mock(Bucket.class); + + handler.applyDecodedData(false, block, data); + + assertEquals(0, handler.lastCompressionCodec()); + assertFalse(handler.lastWasMetadata()); + assertFalse(handler.hasLastRequestData()); + verifyNoInteractions(block, data); + } + + @Test + void applyDecodedData_whenKeepLastDataTrue_replacesPreviousBucket() { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientSSKBlock block = mock(ClientSSKBlock.class); + Bucket first = mock(Bucket.class); + Bucket second = mock(Bucket.class); + when(block.getCompressionCodec()).thenReturn((short) 5); + when(block.isMetadata()).thenReturn(true); + + handler.applyDecodedData(true, block, first); + handler.applyDecodedData(true, block, second); + + assertTrue(handler.hasLastRequestData()); + assertEquals((short) 5, handler.lastCompressionCodec()); + assertTrue(handler.lastWasMetadata()); + verify(first).free(); + verify(second, never()).free(); + } + + @Test + void applyDecodedData_whenKeepLastDataFalse_freesProvidedBucket() { + USKCompletionHandler handler = new USKCompletionHandler(false); + ClientSSKBlock block = mock(ClientSSKBlock.class); + Bucket data = mock(Bucket.class); + when(block.getCompressionCodec()).thenReturn((short) 2); + when(block.isMetadata()).thenReturn(false); + + handler.applyDecodedData(true, block, data); + + verify(data).free(); + assertFalse(handler.hasLastRequestData()); + assertEquals((short) 2, handler.lastCompressionCodec()); + assertFalse(handler.lastWasMetadata()); + } + + @Test + void applyDecodedData_whenBlockNull_resetsCodecAndMetadata() { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientSSKBlock block = mock(ClientSSKBlock.class); + Bucket data = mock(Bucket.class); + when(block.getCompressionCodec()).thenReturn((short) 9); + when(block.isMetadata()).thenReturn(true); + + handler.applyDecodedData(true, block, data); + handler.applyDecodedData(true, null, null); + + assertEquals((short) -1, handler.lastCompressionCodec()); + assertFalse(handler.lastWasMetadata()); + assertFalse(handler.hasLastRequestData()); + } + + @Test + void applyFoundDecodedData_whenDecodeFalse_doesNothing() { + USKCompletionHandler handler = new USKCompletionHandler(true); + ClientContext context = mock(ClientContext.class); + + handler.applyFoundDecodedData(false, true, (short) 3, new byte[] {1}, context); + + assertEquals(0, handler.lastCompressionCodec()); + assertFalse(handler.lastWasMetadata()); + assertFalse(handler.hasLastRequestData()); + } + + @Test + void applyFoundDecodedData_whenKeepLastDataTrue_storesBucketAndReleasesBytes() throws Exception { + USKCompletionHandler handler = new USKCompletionHandler(true); + TempBucketFactory tempBucketFactory = mock(TempBucketFactory.class); + when(tempBucketFactory.makeBucket(anyLong())).thenAnswer(_ -> new ArrayBucket()); + ClientContext context = mock(ClientContext.class); + setField(context, "tempBucketFactory", tempBucketFactory); + byte[] data = new byte[] {1, 2, 3}; + + handler.applyFoundDecodedData(true, true, (short) 4, data, context); + + assertTrue(handler.hasLastRequestData()); + assertEquals((short) 4, handler.lastCompressionCodec()); + assertTrue(handler.lastWasMetadata()); + assertArrayEquals(data, handler.releaseLastDataBytes()); + assertFalse(handler.hasLastRequestData()); + } + + @Test + void applyFoundDecodedData_whenBucketCreationFails_keepsNoDataButUpdatesFlags() throws Exception { + USKCompletionHandler handler = new USKCompletionHandler(true); + TempBucketFactory tempBucketFactory = mock(TempBucketFactory.class); + when(tempBucketFactory.makeBucket(anyLong())).thenThrow(new IOException("nope")); + ClientContext context = mock(ClientContext.class); + setField(context, "tempBucketFactory", tempBucketFactory); + + handler.applyFoundDecodedData(true, false, (short) 7, new byte[] {9}, context); + + assertEquals((short) 7, handler.lastCompressionCodec()); + assertFalse(handler.lastWasMetadata()); + assertFalse(handler.hasLastRequestData()); + } + + @Test + void releaseLastDataBytes_whenNoData_returnsNull() { + USKCompletionHandler handler = new USKCompletionHandler(true); + + assertNull(handler.releaseLastDataBytes()); + } + + @Test + void releaseLastDataBytes_whenBucketReadFails_returnsNullAndFrees() throws Exception { + USKCompletionHandler handler = new USKCompletionHandler(true); + Bucket bucket = mock(Bucket.class); + when(bucket.size()).thenReturn(2L); + when(bucket.getInputStreamUnbuffered()).thenThrow(new IOException("boom")); + setField(handler, "lastRequestData", bucket); + + assertNull(handler.releaseLastDataBytes()); + + verify(bucket).free(); + assertFalse(handler.hasLastRequestData()); + } + + @Test + void clearLastRequestData_whenBucketExists_freesAndClears() { + USKCompletionHandler handler = new USKCompletionHandler(true); + Bucket bucket = mock(Bucket.class); + setField(handler, "lastRequestData", bucket); + + handler.clearLastRequestData(); + + verify(bucket).free(); + assertFalse(handler.hasLastRequestData()); + } + + @Test + void releaseLastDataBytes_whenBucketPresent_returnsBytesAndClears() throws Exception { + USKCompletionHandler handler = new USKCompletionHandler(true); + ArrayBucket bucket = new ArrayBucket(); + byte[] data = new byte[] {4, 5}; + try (OutputStream os = bucket.getOutputStreamUnbuffered()) { + os.write(data); + } + setField(handler, "lastRequestData", bucket); + + assertArrayEquals(data, handler.releaseLastDataBytes()); + + assertFalse(handler.hasLastRequestData()); + } + + private static void setField(Object target, String fieldName, Object value) { + try { + Field field = target.getClass().getDeclaredField(fieldName); + field.setAccessible(true); + field.set(target, value); + } catch (NoSuchFieldException _) { + try { + Field field = target.getClass().getSuperclass().getDeclaredField(fieldName); + field.setAccessible(true); + field.set(target, value); + } catch (ReflectiveOperationException ex) { + throw new AssertionError(ex); + } + } catch (ReflectiveOperationException e) { + throw new AssertionError(e); + } + } +} diff --git a/src/test/java/network/crypta/client/async/USKKeyWatchSetTest.java b/src/test/java/network/crypta/client/async/USKKeyWatchSetTest.java new file mode 100644 index 0000000000..ddbdb32d55 --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKKeyWatchSetTest.java @@ -0,0 +1,187 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.net.MalformedURLException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import network.crypta.keys.ClientSSK; +import network.crypta.keys.Key; +import network.crypta.keys.KeyBlock; +import network.crypta.keys.NodeSSK; +import network.crypta.keys.SSKBlock; +import network.crypta.keys.SSKVerifyException; +import network.crypta.keys.USK; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKKeyWatchSetTest { + + @Test + void getEditionsToFetch_whenAlreadyRunningAndSuggestedAhead_deduplicatesAndRemovesRunning() + throws Exception { + // Arrange + USK usk = newUsk(createPubKeyHash((byte) 1), cryptoKey((byte) 3), 5L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 2L, 2, false); + USKKeyWatchSet.Lookup runningLookup = new USKKeyWatchSet.Lookup(); + runningLookup.val = 3L; + List alreadyRunning = new ArrayList<>(List.of(runningLookup)); + + // Act + USKKeyWatchSet.ToFetch result = + watchSet.getEditionsToFetch(2L, secureRandom(), alreadyRunning, false, true); + + // Assert + assertTrue(alreadyRunning.isEmpty()); + assertEquals(List.of(4L, 5L), editions(result.fetch)); + assertEquals(0, result.poll.length); + } + + @Test + void updateSubscriberHints_whenPersistentAndSuggestedAhead_tracksSurvivingHints() + throws Exception { + // Arrange + USK usk = newUsk(createPubKeyHash((byte) 2), cryptoKey((byte) 7), 10L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + watchSet.addHintEdition(8L, 5L); + + // Act + watchSet.updateSubscriberHints(new Long[] {6L, 6L, 4L, 9L}, 5L); + + // Assert + assertEquals((1L + 4L) * USKKeyWatchSet.WATCH_KEYS, watchSet.size()); + long sizeBefore = watchSet.size(); + watchSet.addHintEdition(4L, 5L); + assertEquals(sizeBefore, watchSet.size()); + } + + @Test + void definitelyWantKey_whenKeyNotNodeSsk_returnsMinusOne() throws Exception { + // Arrange + USK usk = newUsk(createPubKeyHash((byte) 6), cryptoKey((byte) 11), 0L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + Key key = mock(Key.class); + + // Act + short priority = watchSet.definitelyWantKey(key, 0L, (short) 2); + + // Assert + assertEquals(-1, priority); + } + + @Test + void probablyWantKey_whenPubKeyHashMismatch_returnsFalse() throws Exception { + // Arrange + USK usk = newUsk(createPubKeyHash((byte) 8), cryptoKey((byte) 12), 0L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + USK otherUsk = newUsk(createPubKeyHash((byte) 9), cryptoKey((byte) 12), 0L); + NodeSSK key = nodeKeyForEditionZero(otherUsk); + + // Act + boolean wanted = watchSet.probablyWantKey(key, 0L); + + // Assert + assertFalse(wanted); + } + + @Test + void getDatastoreCheckers_whenHintsPresent_returnsCheckers() throws Exception { + // Arrange + USK usk = newUsk(createPubKeyHash((byte) 10), cryptoKey((byte) 13), 2L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + + // Act + List checkers = watchSet.getDatastoreCheckers(0L); + + // Assert + assertNotNull(checkers); + assertEquals(2, checkers.size()); + assertEquals(USKKeyWatchSet.WATCH_KEYS, checkers.get(0).keysToCheck.length); + assertEquals(USKKeyWatchSet.WATCH_KEYS, checkers.get(1).keysToCheck.length); + } + + @Test + void decode_whenDocnameMismatch_throwsVerifyException() throws Exception { + // Arrange + USK usk = newUsk(createPubKeyHash((byte) 11), cryptoKey((byte) 14), 0L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + ClientSSK csk = usk.getSSK(0L); + byte[] mismatched = copyEhDocname(csk); + + mismatched[0] ^= 0x01; + NodeSSK nodeKey = new NodeSSK(usk.getPubKeyHash(), mismatched, Key.ALGO_AES_PCFB_256_SHA256); + SSKBlock block = mock(SSKBlock.class); + when(block.getKey()).thenReturn(nodeKey); + + // Act + Assert + assertThrows(SSKVerifyException.class, () -> watchSet.decode(block, 0L)); + } + + @Test + void matchBlock_whenNonSskBlock_returnsNull() throws Exception { + // Arrange + USK usk = newUsk(createPubKeyHash((byte) 13), cryptoKey((byte) 22), 0L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + NodeSSK key = nodeKeyForEditionZero(usk); + KeyBlock block = mock(KeyBlock.class); + + // Act + USKKeyWatchSet.MatchedBlock matched = watchSet.matchBlock(key, block, 0L); + + // Assert + assertNull(matched); + } + + private static byte[] createPubKeyHash(byte seed) { + byte[] data = new byte[NodeSSK.PUBKEY_HASH_SIZE]; + Arrays.fill(data, seed); + return data; + } + + private static List editions(USKKeyWatchSet.Lookup[] lookups) { + return Arrays.stream(lookups).map(lookup -> lookup.val).sorted().toList(); + } + + private static NodeSSK nodeKeyForEditionZero(USK usk) { + ClientSSK csk = usk.getSSK(0L); + return new NodeSSK(usk.getPubKeyHash(), csk.ehDocname, Key.ALGO_AES_PCFB_256_SHA256); + } + + private static byte[] copyEhDocname(ClientSSK csk) { + assertNotNull(csk); + assertNotNull(csk.ehDocname); + return csk.ehDocname.clone(); + } + + private static USK newUsk(byte[] pubKeyHash, byte[] cryptoKey, long suggestedEdition) + throws MalformedURLException { + byte[] extras = + new byte[] { + NodeSSK.SSK_VERSION, 0, Key.ALGO_AES_PCFB_256_SHA256, 0, (byte) KeyBlock.HASH_SHA256 + }; + return new USK(pubKeyHash, cryptoKey, extras, "site", suggestedEdition); + } + + private static Random secureRandom() { + return new SecureRandom(); + } + + private static byte[] cryptoKey(byte seed) { + byte[] cryptoKey = new byte[ClientSSK.CRYPTO_KEY_LENGTH]; + Arrays.fill(cryptoKey, seed); + return cryptoKey; + } +} diff --git a/src/test/java/network/crypta/client/async/USKPollingRoundTest.java b/src/test/java/network/crypta/client/async/USKPollingRoundTest.java new file mode 100644 index 0000000000..a626da4eca --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKPollingRoundTest.java @@ -0,0 +1,504 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +import java.io.Serial; +import java.net.MalformedURLException; +import java.util.Random; +import network.crypta.client.ArchiveManager; +import network.crypta.client.FetchContext; +import network.crypta.client.FetchContextOptions; +import network.crypta.client.InsertContext; +import network.crypta.client.InsertContextOptions; +import network.crypta.client.events.SimpleEventProducer; +import network.crypta.client.filter.LinkFilterExceptionProvider; +import network.crypta.clients.fcp.PersistentRequestRoot; +import network.crypta.config.Config; +import network.crypta.crypt.MasterSecret; +import network.crypta.crypt.RandomSource; +import network.crypta.keys.ClientSSK; +import network.crypta.keys.Key; +import network.crypta.keys.KeyBlock; +import network.crypta.keys.NodeSSK; +import network.crypta.keys.USK; +import network.crypta.node.ClientContextResources; +import network.crypta.support.MemoryLimitedJobRunner; +import network.crypta.support.PriorityAwareExecutor; +import network.crypta.support.Ticker; +import network.crypta.support.api.LockableRandomAccessBufferFactory; +import network.crypta.support.compress.RealCompressor; +import network.crypta.support.io.FileRandomAccessBufferFactory; +import network.crypta.support.io.FilenameGenerator; +import network.crypta.support.io.PersistentFileTracker; +import network.crypta.support.io.PersistentTempBucketFactory; +import network.crypta.support.io.TempBucketFactory; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKPollingRoundTest { + + private static final class FixedRandomSource extends RandomSource { + @Serial private static final long serialVersionUID = 1L; + + @Override + public int nextInt(int bound) { + return 0; + } + + @Override + public int acceptEntropy( + network.crypta.crypt.EntropySource source, long data, int entropyGuess) { + return 0; + } + + @Override + public int acceptTimerEntropy(network.crypta.crypt.EntropySource timer) { + return 0; + } + + @Override + public int acceptTimerEntropy(network.crypta.crypt.EntropySource fnpTimingSource, double bias) { + return 0; + } + + @Override + public int acceptEntropyBytes( + network.crypta.crypt.EntropySource myPacketDataSource, + byte[] buf, + int offset, + int length, + double bias) { + return 0; + } + + @Override + public void close() { + // No-op for deterministic tests. + } + } + + private static ClientContext minimalContext(RandomSource randomSource) { + return new ClientContext( + 1L, + new ClientContextRuntime( + mock(ClientLayerPersister.class), + mock(PriorityAwareExecutor.class), + mock(MemoryLimitedJobRunner.class), + mock(Ticker.class), + randomSource, + new Random(123), + mock(MasterSecret.class)), + new ClientContextStorageFactories( + mock(PersistentTempBucketFactory.class), + mock(TempBucketFactory.class), + mock(PersistentFileTracker.class), + mock(FilenameGenerator.class), + mock(FilenameGenerator.class), + mock(FileRandomAccessBufferFactory.class), + mock(FileRandomAccessBufferFactory.class)), + new ClientContextRafFactories( + mock(LockableRandomAccessBufferFactory.class), + mock(LockableRandomAccessBufferFactory.class)), + new ClientContextServices( + new ClientContextResources(mock(ArchiveManager.class), mock(HealingQueue.class)), + mock(USKManager.class), + mock(RealCompressor.class), + mock(DatastoreChecker.class), + mock(PersistentRequestRoot.class), + mock(LinkFilterExceptionProvider.class)), + new ClientContextDefaults(newFetchContext(), newInsertContext(), mock(Config.class))); + } + + private static FetchContext newFetchContext() { + return new FetchContext( + FetchContextOptions.builder() + .limits(0, 0, 0) + .archiveLimits(1, 0, 0, true) + .retryLimits(0, 0, 0) + .splitfileLimits(false, 0, 0) + .behavior(false, false, false) + .clientOptions(new SimpleEventProducer(), false, false) + .filterOverrides(null, null, null) + .build()); + } + + private static InsertContext newInsertContext() { + return new InsertContext( + InsertContextOptions.builder() + .retryLimits(0, 0) + .splitfileSegmentLimits(0, 0) + .clientOptions(new SimpleEventProducer(), false, false, false) + .compressorDescriptor(null) + .redundancy(0, 0) + .compatibility(InsertContext.CompatibilityMode.COMPAT_CURRENT) + .build()); + } + + private static USK newUSK(long suggestedEdition) throws MalformedURLException { + byte[] pubKeyHash = new byte[NodeSSK.PUBKEY_HASH_SIZE]; + byte[] cryptoKey = new byte[ClientSSK.CRYPTO_KEY_LENGTH]; + byte[] extras = + new byte[] { + NodeSSK.SSK_VERSION, 0, Key.ALGO_AES_PCFB_256_SHA256, 0, (byte) KeyBlock.HASH_SHA256 + }; + return new USK(pubKeyHash, cryptoKey, extras, "site", suggestedEdition); + } + + private static USKPollingRound newRound( + USKAttemptManager attempts, + USKStoreCheckCoordinator storeChecks, + USKDateHintFetches dbrHintFetches, + USKSubscriberRegistry subscribers, + USKManager uskManager, + USK usk, + long sleepTime, + boolean firstLoop, + long origSleepTime, + long maxSleepTime) { + USKPollingRoundContext context = + new USKPollingRoundContext( + attempts, storeChecks, dbrHintFetches, subscribers, uskManager, usk, false); + return new USKPollingRound(context, sleepTime, firstLoop, origSleepTime, maxSleepTime); + } + + @ParameterizedTest + @CsvSource({"true,false", "false,true"}) + void resolvePollingAttemptsIfAllChecksDone_whenCancelledOrCompleted_returnsNotReady( + boolean cancelled, boolean completed) throws Exception { + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + + USKPollingRound.PollingResolution res = + round.resolvePollingAttemptsIfAllChecksDone(cancelled, completed); + + assertFalse(res.ready); + assertEquals(0, res.attempts.length); + } + + @Test + void resolvePollingAttemptsIfAllChecksDone_whenStoreCheckRunning_returnsNotReady() + throws Exception { + USKStoreCheckCoordinator storeChecks = mock(USKStoreCheckCoordinator.class); + when(storeChecks.isStoreCheckRunning()).thenReturn(true); + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + storeChecks, + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + mock(USKManager.class), + newUSK(0L), + 100L, + false, + 50L, + 500L); + + USKPollingRound.PollingResolution res = + round.resolvePollingAttemptsIfAllChecksDone(false, false); + + assertFalse(res.ready); + assertEquals(0, res.attempts.length); + } + + @Test + void resolvePollingAttemptsIfAllChecksDone_whenRunningAttempts_returnsNotReady() + throws Exception { + USKAttemptManager attempts = mock(USKAttemptManager.class); + when(attempts.hasRunningAttempts()).thenReturn(true); + USKPollingRound round = + newRound( + attempts, + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + + USKPollingRound.PollingResolution res = + round.resolvePollingAttemptsIfAllChecksDone(false, false); + + assertFalse(res.ready); + assertEquals(0, res.attempts.length); + } + + @Test + void resolvePollingAttemptsIfAllChecksDone_whenNoPollingAttempts_returnsNotReady() + throws Exception { + USKAttemptManager attempts = mock(USKAttemptManager.class); + when(attempts.hasNoPollingAttempts()).thenReturn(true); + USKPollingRound round = + newRound( + attempts, + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + + USKPollingRound.PollingResolution res = + round.resolvePollingAttemptsIfAllChecksDone(false, false); + + assertFalse(res.ready); + assertEquals(0, res.attempts.length); + } + + @Test + void resolvePollingAttemptsIfAllChecksDone_whenHintsOutstanding_returnsNotReady() + throws Exception { + USKDateHintFetches dbrHintFetches = mock(USKDateHintFetches.class); + when(dbrHintFetches.hasOutstanding()).thenReturn(true); + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + mock(USKStoreCheckCoordinator.class), + dbrHintFetches, + mock(USKSubscriberRegistry.class), + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + + USKPollingRound.PollingResolution res = + round.resolvePollingAttemptsIfAllChecksDone(false, false); + + assertFalse(res.ready); + assertEquals(0, res.attempts.length); + } + + @Test + void resolvePollingAttemptsIfAllChecksDone_whenReady_returnsSnapshot() throws Exception { + USKAttemptManager attempts = mock(USKAttemptManager.class); + USKAttempt[] snapshot = new USKAttempt[] {mock(USKAttempt.class)}; + when(attempts.snapshotPollingAttempts()).thenReturn(snapshot); + USKPollingRound round = + newRound( + attempts, + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + + USKPollingRound.PollingResolution res = + round.resolvePollingAttemptsIfAllChecksDone(false, false); + + assertTrue(res.ready); + assertSame(snapshot, res.attempts); + } + + @Test + void checkFinishedForNow_whenAttemptNeverCooled_doesNotNotify() throws Exception { + USKAttemptManager attempts = mock(USKAttemptManager.class); + USKAttempt attempt = mock(USKAttempt.class); + when(attempts.snapshotPollingAttempts()).thenReturn(new USKAttempt[] {attempt}); + USKSubscriberRegistry subscribers = mock(USKSubscriberRegistry.class); + when(attempt.everInCooldown()).thenReturn(false); + USKPollingRound round = + newRound( + attempts, + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + subscribers, + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + ClientContext context = mock(ClientContext.class); + + round.checkFinishedForNow(context, false, false); + + verifyNoInteractions(subscribers); + } + + @Test + void checkFinishedForNow_whenAllAttemptsCooled_notifiesSubscribers() throws Exception { + USKAttemptManager attempts = mock(USKAttemptManager.class); + USKAttempt attempt = mock(USKAttempt.class); + when(attempt.everInCooldown()).thenReturn(true); + when(attempts.snapshotPollingAttempts()).thenReturn(new USKAttempt[] {attempt}); + USKSubscriberRegistry subscribers = mock(USKSubscriberRegistry.class); + USKProgressCallback callback = mock(USKProgressCallback.class); + USKCallback otherCallback = mock(USKCallback.class); + when(subscribers.snapshotSubscribers()).thenReturn(new USKCallback[] {callback, otherCallback}); + USKPollingRound round = + newRound( + attempts, + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + subscribers, + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + ClientContext context = mock(ClientContext.class); + + round.checkFinishedForNow(context, false, false); + + verify(callback).onRoundFinished(context); + verifyNoInteractions(otherCallback); + } + + @ParameterizedTest + @CsvSource({"true,false", "false,true"}) + void notifyFinishedForNow_whenCancelledOrCompleted_skipsCallbacks( + boolean cancelled, boolean completed) throws Exception { + USKSubscriberRegistry subscribers = mock(USKSubscriberRegistry.class); + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + subscribers, + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + ClientContext context = mock(ClientContext.class); + + round.notifyFinishedForNow(context, cancelled, completed); + + verifyNoInteractions(subscribers); + } + + @Test + void rescheduleBackgroundPoll_whenNoProgress_doublesAndCapsSleepTime() throws Exception { + USKManager manager = mock(USKManager.class); + USK usk = newUSK(10L); + when(manager.lookupLatestSlot(usk)).thenReturn(10L); + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + manager, + usk, + 60L, + true, + 30L, + 100L); + ClientContext context = minimalContext(new FixedRandomSource()); + + long delay = round.rescheduleBackgroundPoll(context, 10L); + + assertEquals(0L, delay); + assertEquals(100L, round.sleepTime()); + assertTrue(round.firstLoop()); + } + + @Test + void rescheduleBackgroundPoll_whenNoProgress_keepsBackoffUnderMax() throws Exception { + USKManager manager = mock(USKManager.class); + USK usk = newUSK(10L); + when(manager.lookupLatestSlot(usk)).thenReturn(10L); + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + manager, + usk, + 40L, + true, + 30L, + 200L); + ClientContext context = minimalContext(new FixedRandomSource()); + + long delay = round.rescheduleBackgroundPoll(context, 10L); + + assertEquals(0L, delay); + assertEquals(80L, round.sleepTime()); + assertTrue(round.firstLoop()); + } + + @Test + void rescheduleBackgroundPoll_whenProgressDetected_resetsSleepTimeAndFirstLoop() + throws Exception { + USKManager manager = mock(USKManager.class); + USK usk = newUSK(10L); + when(manager.lookupLatestSlot(usk)).thenReturn(15L); + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + manager, + usk, + 40L, + true, + 20L, + 200L); + ClientContext context = minimalContext(new FixedRandomSource()); + + long delay = round.rescheduleBackgroundPoll(context, 12L); + + assertEquals(0L, delay); + assertEquals(20L, round.sleepTime()); + assertFalse(round.firstLoop()); + } + + @Test + void setFirstLoop_whenInvoked_updatesState() throws Exception { + USKPollingRound round = + newRound( + mock(USKAttemptManager.class), + mock(USKStoreCheckCoordinator.class), + mock(USKDateHintFetches.class), + mock(USKSubscriberRegistry.class), + mock(USKManager.class), + newUSK(0L), + 100L, + true, + 50L, + 500L); + + round.setFirstLoop(false); + + //noinspection ConstantValue + assertFalse(round.firstLoop()); + } +} diff --git a/src/test/java/network/crypta/client/async/USKPriorityPolicyTest.java b/src/test/java/network/crypta/client/async/USKPriorityPolicyTest.java new file mode 100644 index 0000000000..f88d3fc1ea --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKPriorityPolicyTest.java @@ -0,0 +1,95 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.stream.Stream; +import network.crypta.node.RequestStarter; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKPriorityPolicyTest { + + @Mock private USKAttemptManager attempts; + + private USKPriorityPolicy policy; + + @BeforeEach + void setUp() { + policy = new USKPriorityPolicy(attempts); + } + + @Test + void normalPriority_whenNewPolicy_returnsDefaults() { + assertEquals(RequestStarter.PREFETCH_PRIORITY_CLASS, policy.normalPriority()); + assertEquals(RequestStarter.UPDATE_PRIORITY_CLASS, policy.progressPriority()); + } + + @Test + void updatePriorities_whenCallbacksProvideMinValues_updatesToMinimumAcrossArrays() { + USKCallback subscriberA = mock(USKCallback.class); + USKCallback subscriberB = mock(USKCallback.class); + USKFetcherCallback fetcherCallback = mock(USKFetcherCallback.class); + when(subscriberA.getPollingPriorityNormal()).thenReturn((short) 4); + when(subscriberA.getPollingPriorityProgress()).thenReturn((short) 4); + when(subscriberB.getPollingPriorityNormal()).thenReturn((short) 2); + when(subscriberB.getPollingPriorityProgress()).thenReturn((short) 5); + when(fetcherCallback.getPollingPriorityNormal()).thenReturn((short) 3); + when(fetcherCallback.getPollingPriorityProgress()).thenReturn((short) 1); + + policy.updatePriorities( + new USKCallback[] {subscriberA, subscriberB}, + new USKFetcherCallback[] {fetcherCallback}, + "fetcher"); + + assertEquals(2, policy.normalPriority()); + assertEquals(1, policy.progressPriority()); + verify(subscriberA).getPollingPriorityNormal(); + verify(subscriberA).getPollingPriorityProgress(); + verify(subscriberB).getPollingPriorityNormal(); + verify(subscriberB).getPollingPriorityProgress(); + verify(fetcherCallback).getPollingPriorityNormal(); + verify(fetcherCallback).getPollingPriorityProgress(); + verify(attempts).reloadPollParameters(); + } + + @Test + void updatePriorities_whenNoCallbacks_resetsToDefaultsAndReloads() { + USKCallback subscriber = mock(USKCallback.class); + when(subscriber.getPollingPriorityNormal()).thenReturn((short) 2); + when(subscriber.getPollingPriorityProgress()).thenReturn((short) 1); + + policy.updatePriorities(new USKCallback[] {subscriber}, new USKFetcherCallback[0], "fetcher"); + policy.updatePriorities(new USKCallback[0], new USKFetcherCallback[0], "fetcher"); + + assertEquals(RequestStarter.PREFETCH_PRIORITY_CLASS, policy.normalPriority()); + assertEquals(RequestStarter.UPDATE_PRIORITY_CLASS, policy.progressPriority()); + verify(attempts, times(2)).reloadPollParameters(); + } + + @ParameterizedTest + @MethodSource("nullCallbackInputs") + void updatePriorities_whenNullCallbacks_throwsNullPointerException( + USKCallback[] subscribers, USKFetcherCallback[] fetcherCallbacks) { + assertThrows( + NullPointerException.class, + () -> policy.updatePriorities(subscribers, fetcherCallbacks, "fetcher")); + } + + private static Stream nullCallbackInputs() { + return Stream.of( + Arguments.of(null, new USKFetcherCallback[0]), Arguments.of(new USKCallback[0], null)); + } +} diff --git a/src/test/java/network/crypta/client/async/USKSchedulingCoordinatorTest.java b/src/test/java/network/crypta/client/async/USKSchedulingCoordinatorTest.java new file mode 100644 index 0000000000..fc9e03d2e7 --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKSchedulingCoordinatorTest.java @@ -0,0 +1,138 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKSchedulingCoordinatorTest { + + @Mock private USKAttemptManager attempts; + @Mock private USKStoreCheckCoordinator storeChecks; + @Mock private USKDateHintFetches dbrHintFetches; + @Mock private ClientContext context; + + @Test + void buildSchedulePlan_whenNoAttemptsAndStoreChecksNeeded_addsAttemptsAndRegisters() { + USKSchedulingCoordinator coordinator = newCoordinator(false); + when(attempts.hasPendingAttempts()).thenReturn(false); + when(attempts.hasRunningAttempts()).thenReturn(false); + when(attempts.hasNoPollingAttempts()).thenReturn(true); + when(storeChecks.fillKeysWatching(3L, context)).thenReturn(false); + + USKSchedulingCoordinator.SchedulePlan plan = + coordinator.buildSchedulePlan(3L, false, context, true); + + verify(attempts).addNewAttempts(3L, context, true); + verify(storeChecks).fillKeysWatching(3L, context); + assertTrue(plan.registerNow); + assertFalse(plan.completeCheckingStore); + assertTrue(coordinator.isStarted()); + assertFalse(coordinator.scheduleAfterDBRsDone()); + assertEquals(4L, coordinator.valueAtSchedule()); + } + + @Test + void buildSchedulePlan_whenStoreCheckAlreadyRunning_doesNotRegister() { + USKSchedulingCoordinator coordinator = newCoordinator(false); + when(storeChecks.fillKeysWatching(5L, context)).thenReturn(true); + + USKSchedulingCoordinator.SchedulePlan plan = + coordinator.buildSchedulePlan(5L, false, context, false); + + verify(storeChecks).fillKeysWatching(5L, context); + assertFalse(plan.registerNow); + } + + @Test + void buildSchedulePlan_whenStartedDBRsWithUnknownEdition_defersScheduling() { + USKSchedulingCoordinator coordinator = newCoordinator(false); + + USKSchedulingCoordinator.SchedulePlan plan = + coordinator.buildSchedulePlan(0L, true, context, false); + + assertFalse(plan.registerNow); + assertTrue(coordinator.scheduleAfterDBRsDone()); + verify(storeChecks, never()).fillKeysWatching(0L, context); + } + + @Test + void buildSchedulePlan_whenDeferredAndDBRsOutstanding_skipsStoreCheck() { + USKSchedulingCoordinator coordinator = newCoordinator(false); + coordinator.setScheduleAfterDBRsDone(true); + when(dbrHintFetches.hasOutstanding()).thenReturn(true); + + USKSchedulingCoordinator.SchedulePlan plan = + coordinator.buildSchedulePlan(2L, false, context, false); + + verify(dbrHintFetches).hasOutstanding(); + verify(storeChecks, never()).fillKeysWatching(2L, context); + assertFalse(plan.registerNow); + } + + @Test + void buildSchedulePlan_whenDeferredAndDBRsComplete_registersStoreCheck() { + USKSchedulingCoordinator coordinator = newCoordinator(false); + coordinator.setScheduleAfterDBRsDone(true); + when(dbrHintFetches.hasOutstanding()).thenReturn(false); + when(storeChecks.fillKeysWatching(7L, context)).thenReturn(false); + + USKSchedulingCoordinator.SchedulePlan plan = + coordinator.buildSchedulePlan(7L, false, context, false); + + verify(dbrHintFetches).hasOutstanding(); + verify(storeChecks).fillKeysWatching(7L, context); + assertTrue(plan.registerNow); + } + + @Test + void buildSchedulePlan_whenStoreOnlyAndChecksFinished_marksCompleteCheckingStore() { + USKSchedulingCoordinator coordinator = newCoordinator(true); + coordinator.setScheduleAfterDBRsDone(true); + when(storeChecks.fillKeysWatching(9L, context)).thenReturn(true); + when(storeChecks.isStoreCheckRunning()).thenReturn(false); + + USKSchedulingCoordinator.SchedulePlan plan = + coordinator.buildSchedulePlan(9L, false, context, false); + + verify(attempts, never()).addNewAttempts(9L, context, false); + assertTrue(plan.completeCheckingStore); + assertFalse(plan.registerNow); + } + + @Test + void valueAtSchedule_whenCalledMultipleTimes_tracksMaxValue() { + USKSchedulingCoordinator coordinator = newCoordinator(false); + when(storeChecks.fillKeysWatching(4L, context)).thenReturn(true); + when(storeChecks.fillKeysWatching(1L, context)).thenReturn(true); + + coordinator.buildSchedulePlan(4L, false, context, false); + coordinator.buildSchedulePlan(1L, false, context, false); + + assertEquals(5L, coordinator.valueAtSchedule()); + } + + @Test + void resetStarted_whenCalled_clearsStartedFlag() { + USKSchedulingCoordinator coordinator = newCoordinator(false); + when(storeChecks.fillKeysWatching(6L, context)).thenReturn(true); + + coordinator.buildSchedulePlan(6L, false, context, false); + coordinator.resetStarted(); + + assertFalse(coordinator.isStarted()); + } + + private USKSchedulingCoordinator newCoordinator(boolean checkStoreOnly) { + return new USKSchedulingCoordinator(attempts, storeChecks, dbrHintFetches, checkStoreOnly); + } +} diff --git a/src/test/java/network/crypta/client/async/USKStoreCheckCoordinatorTest.java b/src/test/java/network/crypta/client/async/USKStoreCheckCoordinatorTest.java new file mode 100644 index 0000000000..d30b47d239 --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKStoreCheckCoordinatorTest.java @@ -0,0 +1,472 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Constructor; +import java.net.MalformedURLException; +import java.util.Arrays; +import java.util.List; +import network.crypta.keys.ClientSSK; +import network.crypta.keys.Key; +import network.crypta.keys.KeyBlock; +import network.crypta.keys.NodeSSK; +import network.crypta.keys.USK; +import network.crypta.node.SendableGet; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.mockito.ArgumentCaptor; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKStoreCheckCoordinatorTest { + + @Test + @DisplayName("fillKeysWatching_whenCheckerAlreadyRunning_returnsTrueWithoutRegistering") + void fillKeysWatching_whenCheckerAlreadyRunning_returnsTrueWithoutRegistering() throws Exception { + // Arrange + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USK origUsk = mock(USK.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + USKStoreCheckCoordinator coordinator = + newCoordinator(watchingKeys, attempts, parent, false, uskManager, origUsk, callbacks, true); + ClientContext context = mock(ClientContext.class); + setRunningStoreChecker(coordinator, mock(USKStoreCheckerGetter.class)); + + // Act + boolean result = coordinator.fillKeysWatching(5L, context); + + // Assert + assertTrue(result); + verifyNoInteractions(watchingKeys); + } + + @Test + @DisplayName("fillKeysWatching_whenNoDatastoreCheckers_returnsFalse") + void fillKeysWatching_whenNoDatastoreCheckers_returnsFalse() { + // Arrange + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USK origUsk = mock(USK.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + when(watchingKeys.getDatastoreCheckers(10L)).thenReturn(null); + + USKStoreCheckCoordinator coordinator = + newCoordinator( + watchingKeys, attempts, parent, false, uskManager, origUsk, callbacks, false); + ClientContext context = mock(ClientContext.class); + + // Act + boolean result = coordinator.fillKeysWatching(10L, context); + + // Assert + assertFalse(result); + } + + @Test + @DisplayName("fillKeysWatching_whenRegisterThrows_clearsRunningChecker") + void fillKeysWatching_whenRegisterThrows_clearsRunningChecker() throws Exception { + // Arrange + USK usk = newUsk((byte) 1, (byte) 2, 1L); + USKKeyWatchSet watchingKeys = new USKKeyWatchSet(usk, 0L, 1, false); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + + USKStoreCheckCoordinator coordinator = + newCoordinator(watchingKeys, attempts, parent, false, uskManager, usk, callbacks, true); + ClientContext context = mock(ClientContext.class); + ClientRequestScheduler scheduler = mock(ClientRequestScheduler.class); + when(context.getSskFetchScheduler(true)).thenReturn(scheduler); + doThrow(new IllegalStateException("boom")) + .when(scheduler) + .register(eq(null), any(SendableGet[].class), eq(false), eq(null), eq(false)); + + // Act + boolean result = coordinator.fillKeysWatching(0L, context); + + // Assert + assertTrue(result); + assertFalse(coordinator.isStoreCheckRunning()); + } + + @Test + @DisplayName("preRegisterStoreChecker_whenCancelled_unregistersAndStops") + void preRegisterStoreChecker_whenCancelled_unregistersAndStops() { + // Arrange + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USK origUsk = mock(USK.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + when(callbacks.isCancelled()).thenReturn(true); + + USKStoreCheckCoordinator coordinator = + newCoordinator(watchingKeys, attempts, parent, false, uskManager, origUsk, callbacks, true); + USKStoreCheckerGetter storeChecker = mock(USKStoreCheckerGetter.class); + when(storeChecker.getPriorityClass()).thenReturn((short) 2); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); + ClientContext context = mock(ClientContext.class); + + // Act + boolean result = coordinator.preRegisterStoreChecker(storeChecker, checker, context, true); + + // Assert + //noinspection ConstantValue + assertTrue(result); + assertFalse(coordinator.isStoreCheckRunning()); + verify(storeChecker).unregister(context, (short) 2); + verifyNoInteractions(attempts); + verifyNoInteractions(parent); + verifyNoInteractions(checker); + } + + @Test + @DisplayName("preRegisterStoreChecker_whenAttemptsAvailable_sendsToNetworkAndProcesses") + void preRegisterStoreChecker_whenAttemptsAvailable_sendsToNetworkAndProcesses() { + // Arrange + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USK origUsk = mock(USK.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + when(callbacks.isCancelled()).thenReturn(false); + + ClientContext context = mock(ClientContext.class); + USKStoreCheckCoordinator coordinator = + spy( + newCoordinator( + watchingKeys, attempts, parent, false, uskManager, origUsk, callbacks, true)); + doReturn(true).when(coordinator).fillKeysWatching(eq(42L), same(context)); + when(uskManager.lookupLatestSlot(origUsk)).thenReturn(42L); + + USKStoreCheckerGetter storeChecker = mock(USKStoreCheckerGetter.class); + when(storeChecker.getPriorityClass()).thenReturn((short) 1); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); + USKAttempt[] attemptsToStart = new USKAttempt[] {mock(USKAttempt.class)}; + when(attempts.snapshotAttemptsToStart()).thenReturn(attemptsToStart); + + // Act + boolean result = coordinator.preRegisterStoreChecker(storeChecker, checker, context, false); + + // Assert + //noinspection ConstantValue + assertFalse(result); + verify(storeChecker).unregister(context, (short) 1); + verify(attempts).snapshotAttemptsToStart(); + verify(attempts).clearAttemptsToStart(); + verify(checker).checked(); + verify(parent).toNetwork(context); + verify(callbacks).notifySendingToNetwork(context); + verify(callbacks).processAttemptsAfterStoreCheck(attemptsToStart, context); + verify(uskManager).lookupLatestSlot(origUsk); + verify(coordinator).fillKeysWatching(42L, context); + } + + @ParameterizedTest(name = "defer={0}") + @CsvSource({"true", "false"}) + @DisplayName("preRegisterStoreChecker_whenStoreOnlyAndChecksFinished_finishesOrDefers") + void preRegisterStoreChecker_whenStoreOnlyAndChecksFinished_finishesOrDefers(boolean defer) { + // Arrange + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USK origUsk = mock(USK.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + when(callbacks.isCancelled()).thenReturn(false); + when(callbacks.shouldDeferUntilDBRs()).thenReturn(defer); + + ClientContext context = mock(ClientContext.class); + USKStoreCheckCoordinator coordinator = + spy( + newCoordinator( + watchingKeys, attempts, parent, true, uskManager, origUsk, callbacks, true)); + doReturn(false).when(coordinator).fillKeysWatching(eq(9L), same(context)); + when(uskManager.lookupLatestSlot(origUsk)).thenReturn(9L); + USKAttempt[] attemptsToStart = new USKAttempt[0]; + when(attempts.snapshotAttemptsToStart()).thenReturn(attemptsToStart); + + USKStoreCheckerGetter storeChecker = mock(USKStoreCheckerGetter.class); + when(storeChecker.getPriorityClass()).thenReturn((short) 4); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); + + // Act + boolean result = coordinator.preRegisterStoreChecker(storeChecker, checker, context, true); + + // Assert + //noinspection ConstantValue + assertTrue(result); + verify(callbacks).processAttemptsAfterStoreCheck(attemptsToStart, context); + if (defer) { + verify(callbacks).setScheduleAfterDBRsDone(true); + verify(callbacks, never()).finishSuccess(context); + } else { + verify(callbacks).finishSuccess(context); + verify(callbacks, never()).setScheduleAfterDBRsDone(true); + } + } + + @Test + @DisplayName("preRegisterStoreChecker_whenCancelledAfterSnapshot_ignoresAttempts") + void preRegisterStoreChecker_whenCancelledAfterSnapshot_ignoresAttempts() { + // Arrange + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USK origUsk = mock(USK.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + when(callbacks.isCancelled()).thenReturn(false, true); + + ClientContext context = mock(ClientContext.class); + USKStoreCheckCoordinator coordinator = + spy( + newCoordinator( + watchingKeys, attempts, parent, false, uskManager, origUsk, callbacks, true)); + doReturn(true).when(coordinator).fillKeysWatching(eq(4L), same(context)); + when(uskManager.lookupLatestSlot(origUsk)).thenReturn(4L); + + USKStoreCheckerGetter storeChecker = mock(USKStoreCheckerGetter.class); + when(storeChecker.getPriorityClass()).thenReturn((short) 3); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); + USKAttempt[] attemptsToStart = new USKAttempt[] {mock(USKAttempt.class)}; + when(attempts.snapshotAttemptsToStart()).thenReturn(attemptsToStart); + + // Act + coordinator.preRegisterStoreChecker(storeChecker, checker, context, false); + + // Assert + ArgumentCaptor captor = ArgumentCaptor.forClass(USKAttempt[].class); + verify(callbacks).processAttemptsAfterStoreCheck(captor.capture(), same(context)); + assertEquals(0, captor.getValue().length); + verify(parent, never()).toNetwork(context); + verify(callbacks, never()).notifySendingToNetwork(context); + } + + @Test + @DisplayName("cancelStoreChecker_whenRunning_unregistersAndClears") + void cancelStoreChecker_whenRunning_unregistersAndClears() throws Exception { + // Arrange + USKKeyWatchSet watchingKeys = mock(USKKeyWatchSet.class); + USKAttemptManager attempts = mock(USKAttemptManager.class); + ClientRequester parent = mock(ClientRequester.class); + USKManager uskManager = mock(USKManager.class); + USK origUsk = mock(USK.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + USKStoreCheckCoordinator coordinator = + newCoordinator(watchingKeys, attempts, parent, false, uskManager, origUsk, callbacks, true); + + USKStoreCheckerGetter storeChecker = mock(USKStoreCheckerGetter.class); + when(storeChecker.getPriorityClass()).thenReturn((short) 7); + setRunningStoreChecker(coordinator, storeChecker); + ClientContext context = mock(ClientContext.class); + + // Act + coordinator.cancelStoreChecker(context); + + // Assert + assertFalse(coordinator.isStoreCheckRunning()); + verify(storeChecker).unregister(context, (short) 7); + } + + @Test + @DisplayName("uskStoreChecker_getKeys_whenSingleChecker_returnsOriginalArray") + void uskStoreChecker_getKeys_whenSingleChecker_returnsOriginalArray() throws Exception { + // Arrange + USK usk = newUsk((byte) 9, (byte) 10, 0L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + NodeSSK key = nodeKeyForEdition(usk, 0L); + USKKeyWatchSet.KeyList.StoreSubChecker subChecker = + newStoreSubChecker(watchSet, new NodeSSK[] {key}); + USKStoreCheckCoordinator coordinator = + newCoordinator( + watchSet, + mock(USKAttemptManager.class), + mock(ClientRequester.class), + false, + mock(USKManager.class), + usk, + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class), + true); + + USKStoreCheckCoordinator.USKStoreChecker checker = + coordinator.new USKStoreChecker(List.of(subChecker)); + + // Act + Key[] keys = checker.getKeys(); + + // Assert + assertSame(subChecker.keysToCheck, keys); + } + + @Test + @DisplayName("uskStoreChecker_getKeys_whenMultipleCheckers_deduplicates") + void uskStoreChecker_getKeys_whenMultipleCheckers_deduplicates() throws Exception { + // Arrange + USK usk = newUsk((byte) 12, (byte) 13, 0L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + NodeSSK key1 = nodeKeyForEdition(usk, 0L); + NodeSSK key2 = nodeKeyForEdition(usk, 1L); + NodeSSK key3 = nodeKeyForEdition(usk, 2L); + USKKeyWatchSet.KeyList.StoreSubChecker first = + newStoreSubChecker(watchSet, new NodeSSK[] {key1, key2}); + USKKeyWatchSet.KeyList.StoreSubChecker second = + newStoreSubChecker(watchSet, new NodeSSK[] {key2, key3}); + + USKStoreCheckCoordinator coordinator = + newCoordinator( + watchSet, + mock(USKAttemptManager.class), + mock(ClientRequester.class), + false, + mock(USKManager.class), + usk, + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class), + true); + + USKStoreCheckCoordinator.USKStoreChecker checker = + coordinator.new USKStoreChecker(List.of(first, second)); + + // Act + Key[] keys = checker.getKeys(); + + // Assert + assertArrayEquals(new Key[] {key1, key2, key3}, keys); + } + + @Test + @DisplayName("uskStoreChecker_checked_whenCalled_notifiesSubCheckers") + void uskStoreChecker_checked_whenCalled_notifiesSubCheckers() throws Exception { + // Arrange + USK usk = newUsk((byte) 20, (byte) 21, 0L); + USKKeyWatchSet watchSet = new USKKeyWatchSet(usk, 0L, 1, false); + NodeSSK key = nodeKeyForEdition(usk, 0L); + USKKeyWatchSet.KeyList.StoreSubChecker realChecker = + newStoreSubChecker(watchSet, new NodeSSK[] {key}); + USKKeyWatchSet.KeyList.StoreSubChecker subChecker = spy(realChecker); + + USKStoreCheckCoordinator coordinator = + newCoordinator( + watchSet, + mock(USKAttemptManager.class), + mock(ClientRequester.class), + false, + mock(USKManager.class), + usk, + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class), + true); + + USKStoreCheckCoordinator.USKStoreChecker checker = + coordinator.new USKStoreChecker(List.of(subChecker)); + + // Act + checker.checked(); + + // Assert + verify(subChecker).checked(); + } + + private static USKStoreCheckCoordinator newCoordinator( + USKKeyWatchSet watchingKeys, + USKAttemptManager attempts, + ClientRequester parent, + boolean checkStoreOnly, + USKManager uskManager, + USK origUsk, + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks, + boolean realTimeFlag) { + USKStoreCheckCoordinator.Params params = + USKStoreCheckCoordinator.Params.builder() + .watchingKeys(watchingKeys) + .attempts(attempts) + .parent(parent) + .checkStoreOnly(checkStoreOnly) + .uskManager(uskManager) + .origUSK(origUsk) + .callbacks(callbacks) + .realTimeFlag(realTimeFlag) + .build(); + return new USKStoreCheckCoordinator(params); + } + + private static USK newUsk(byte pubKeySeed, byte cryptoSeed, long suggestedEdition) + throws MalformedURLException { + byte[] pubKeyHash = new byte[NodeSSK.PUBKEY_HASH_SIZE]; + byte[] cryptoKey = new byte[ClientSSK.CRYPTO_KEY_LENGTH]; + byte[] extras = + new byte[] { + NodeSSK.SSK_VERSION, 0, Key.ALGO_AES_PCFB_256_SHA256, 0, (byte) KeyBlock.HASH_SHA256 + }; + Arrays.fill(pubKeyHash, pubKeySeed); + Arrays.fill(cryptoKey, cryptoSeed); + return new USK(pubKeyHash, cryptoKey, extras, "site", suggestedEdition); + } + + private static NodeSSK nodeKeyForEdition(USK usk, long edition) { + ClientSSK clientKey = usk.getSSK(edition); + return new NodeSSK(usk.getPubKeyHash(), clientKey.ehDocname, Key.ALGO_AES_PCFB_256_SHA256); + } + + @SuppressWarnings("java:S3011") + private static USKKeyWatchSet.KeyList.StoreSubChecker newStoreSubChecker( + USKKeyWatchSet watchSet, NodeSSK[] keys) { + USKKeyWatchSet.KeyList keyList = watchSet.new KeyList(0L); + try { + Constructor constructor = + USKKeyWatchSet.KeyList.StoreSubChecker.class.getDeclaredConstructor( + USKKeyWatchSet.KeyList.class, NodeSSK[].class, long.class, long.class); + constructor.setAccessible(true); + return constructor.newInstance(keyList, keys, 0L, (long) keys.length); + } catch (ReflectiveOperationException e) { + throw new AssertionError("Unable to build StoreSubChecker for test", e); + } + } + + @SuppressWarnings("java:S3011") + private static void setRunningStoreChecker( + USKStoreCheckCoordinator coordinator, USKStoreCheckerGetter checker) throws Exception { + java.lang.reflect.Field field = + USKStoreCheckCoordinator.class.getDeclaredField("runningStoreChecker"); + field.setAccessible(true); + field.set(coordinator, checker); + } +} diff --git a/src/test/java/network/crypta/client/async/USKStoreCheckerGetterTest.java b/src/test/java/network/crypta/client/async/USKStoreCheckerGetterTest.java index 9a220a49f5..3e98d16fd1 100644 --- a/src/test/java/network/crypta/client/async/USKStoreCheckerGetterTest.java +++ b/src/test/java/network/crypta/client/async/USKStoreCheckerGetterTest.java @@ -37,11 +37,14 @@ class USKStoreCheckerGetterTest { void getContext_whenFetcherHasContext_returnsFetcherContext() { // Arrange FetchContext expectedContext = mock(FetchContext.class); - USKFetcher fetcher = mockFetcherWithContext(expectedContext); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, expectedContext); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act FetchContext actualContext = getter.getContext(); @@ -95,13 +98,17 @@ void chooseKey_whenCalled_returnsNull() { @DisplayName("listKeys_whenCheckerReturnsArray_returnsSameArray") void listKeys_whenCheckerReturnsArray_returnsSameArray() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); Key[] expectedKeys = new Key[] {mock(Key.class), mock(Key.class)}; when(checker.getKeys()).thenReturn(expectedKeys); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act Key[] actualKeys = getter.listKeys(); @@ -117,11 +124,15 @@ void countAllKeys_whenFetcherReturnsCount_delegatesToFetcherCountKeys() { // Arrange USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); when(fetcher.countKeys()).thenReturn(123L); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(fetcher, null); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); ClientContext context = mock(ClientContext.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act long count = getter.countAllKeys(context); @@ -174,11 +185,14 @@ void isSSK_whenCalled_returnsTrue() { @DisplayName("getClientRequest_whenCalled_returnsParent") void getClientRequest_whenCalled_returnsParent() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act ClientRequester clientRequest = getter.getClientRequest(); @@ -192,10 +206,14 @@ void getClientRequest_whenCalled_returnsParent() { void getClientGetState_whenCalled_returnsFetcher() { // Arrange USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(fetcher, null); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act ClientGetState state = getter.getClientGetState(); @@ -210,10 +228,14 @@ void getPriorityClass_whenFetcherReturnsValue_delegatesToFetcher() { // Arrange USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); when(fetcher.getPriorityClass()).thenReturn((short) 7); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(fetcher, null); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act short priority = getter.getPriorityClass(); @@ -227,12 +249,15 @@ void getPriorityClass_whenFetcherReturnsValue_delegatesToFetcher() { @DisplayName("getClient_whenParentIsRealTime_returnsRcRt") void getClient_whenParentIsRealTime_returnsRcRt() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); ClientRequester parent = mock(ClientRequester.class); when(parent.realTimeFlag()).thenReturn(true); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act RequestClient client = getter.getClient(); @@ -245,12 +270,15 @@ void getClient_whenParentIsRealTime_returnsRcRt() { @DisplayName("getClient_whenParentIsBulk_returnsRcBulk") void getClient_whenParentIsBulk_returnsRcBulk() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); ClientRequester parent = mock(ClientRequester.class); when(parent.realTimeFlag()).thenReturn(false); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act RequestClient client = getter.getClient(); @@ -263,38 +291,44 @@ void getClient_whenParentIsBulk_returnsRcBulk() { @DisplayName("isCancelled_whenNotDoneAndFetcherNotCancelled_returnsFalse") void isCancelled_whenNotDoneAndFetcherNotCancelled_returnsFalse() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); - when(fetcher.isCancelled()).thenReturn(false); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); + when(callbacks.isCancelled()).thenReturn(false); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act boolean cancelled = getter.isCancelled(); // Assert assertFalse(cancelled); - verify(fetcher).isCancelled(); + verify(callbacks).isCancelled(); } @Test @DisplayName("isCancelled_whenFetcherCancelled_returnsTrue") void isCancelled_whenFetcherCancelled_returnsTrue() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); - when(fetcher.isCancelled()).thenReturn(true); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); + when(callbacks.isCancelled()).thenReturn(true); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act boolean cancelled = getter.isCancelled(); // Assert assertTrue(cancelled); - verify(fetcher).isCancelled(); + verify(callbacks).isCancelled(); } @ParameterizedTest(name = "toNetwork={0}, delegateReturn={1}") @@ -302,13 +336,16 @@ void isCancelled_whenFetcherCancelled_returnsTrue() { @DisplayName("preRegister_whenCalled_delegatesAndMarksDone") void preRegister_whenCalled_delegatesAndMarksDone(boolean toNetwork, boolean delegateReturn) { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); ClientContext context = mock(ClientContext.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); - when(fetcher.preRegisterStoreChecker(any(), same(checker), same(context), eq(toNetwork))) + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); + when(coordinator.preRegisterStoreChecker(any(), same(checker), same(context), eq(toNetwork))) .thenReturn(delegateReturn); // Act @@ -317,7 +354,7 @@ void preRegister_whenCalled_delegatesAndMarksDone(boolean toNetwork, boolean del // Assert assertEquals(delegateReturn, actualReturn); assertTrue(getter.isCancelled(), "preRegister must mark the SendableGet as done in all cases"); - verify(fetcher) + verify(coordinator) .preRegisterStoreChecker(same(getter), same(checker), same(context), eq(toNetwork)); } @@ -325,13 +362,16 @@ void preRegister_whenCalled_delegatesAndMarksDone(boolean toNetwork, boolean del @DisplayName("preRegister_whenFetcherThrows_propagatesAndMarksDone") void preRegister_whenFetcherThrows_propagatesAndMarksDone() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); ClientContext context = mock(ClientContext.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); - when(fetcher.preRegisterStoreChecker(any(), same(checker), same(context), eq(true))) + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); + when(coordinator.preRegisterStoreChecker(any(), same(checker), same(context), eq(true))) .thenThrow(new IllegalStateException("boom")); // Act @@ -347,38 +387,59 @@ void preRegister_whenFetcherThrows_propagatesAndMarksDone() { @DisplayName("onFailure_whenCalled_doesNotThrowAndDoesNotMarkDone") void onFailure_whenCalled_doesNotThrowAndDoesNotMarkDone() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); - when(fetcher.isCancelled()).thenReturn(false); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); + when(callbacks.isCancelled()).thenReturn(false); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); - USKStoreCheckerGetter getter = new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckerGetter getter = + new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); // Act getter.onFailure(mock(LowLevelGetException.class), null, null); // Assert assertFalse(getter.isCancelled(), "onFailure is expected to be a no-op for store checking"); - verify(fetcher).isCancelled(); + verify(callbacks).isCancelled(); } @Test @DisplayName("constructor_whenParentIsNull_throwsNullPointerException") void constructor_whenParentIsNull_throwsNullPointerException() { // Arrange - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); // Act + Assert assertThrows( - NullPointerException.class, () -> new USKStoreCheckerGetter(fetcher, null, checker)); + NullPointerException.class, + () -> new USKStoreCheckerGetter(coordinator, callbacks, null, checker)); + } + + private static USKStoreCheckCoordinator.USKStoreCheckCallbacks newCallbacks( + USKFetcher fetcher, FetchContext context) { + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = + mock(USKStoreCheckCoordinator.USKStoreCheckCallbacks.class); + if (fetcher != null) { + when(callbacks.fetcher()).thenReturn(fetcher); + } + if (context != null) { + when(callbacks.fetcherContext()).thenReturn(context); + } + return callbacks; } private static USKStoreCheckerGetter newGetter() { - USKFetcher fetcher = mockFetcherWithContext(mock(FetchContext.class)); + USKStoreCheckCoordinator coordinator = mock(USKStoreCheckCoordinator.class); + USKStoreCheckCoordinator.USKStoreCheckCallbacks callbacks = newCallbacks(null, null); ClientRequester parent = mock(ClientRequester.class); - USKFetcher.USKStoreChecker checker = mock(USKFetcher.USKStoreChecker.class); - return new USKStoreCheckerGetter(fetcher, parent, checker); + USKStoreCheckCoordinator.USKStoreChecker checker = + mock(USKStoreCheckCoordinator.USKStoreChecker.class); + return new USKStoreCheckerGetter(coordinator, callbacks, parent, checker); } @SuppressWarnings("java:S3011") diff --git a/src/test/java/network/crypta/client/async/USKSubscriberRegistryTest.java b/src/test/java/network/crypta/client/async/USKSubscriberRegistryTest.java new file mode 100644 index 0000000000..67ac6245f7 --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKSubscriberRegistryTest.java @@ -0,0 +1,149 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.Arrays; +import network.crypta.keys.USK; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKSubscriberRegistryTest { + + @Mock private USKKeyWatchSet watchingKeys; + @Mock private USKManager uskManager; + @Mock private USKAttemptManager attempts; + @Mock private USK origUSK; + + private USKSubscriberRegistry registry; + + @BeforeEach + void setUp() { + registry = new USKSubscriberRegistry(watchingKeys, uskManager, attempts, origUSK); + } + + @Test + void addSubscriber_whenNewSubscriber_updatesHintsAndPriorities() { + USKCallback subscriber = mockCallback((short) 3, (short) 2); + USKFetcherCallback fetcherCallback = mockFetcherCallback((short) 4, (short) 1); + when(uskManager.lookupLatestSlot(origUSK)).thenReturn(11L); + + registry.addSubscriber(subscriber, 5L, new USKFetcherCallback[] {fetcherCallback}, "fetcher"); + + ArgumentCaptor hintsCaptor = ArgumentCaptor.forClass(Long[].class); + verify(watchingKeys).updateSubscriberHints(hintsCaptor.capture(), eq(11L)); + Long[] hints = hintsCaptor.getValue(); + assertEquals(1, hints.length); + assertEquals(5L, hints[0]); + assertTrue(registry.hasSubscribers()); + assertEquals(3, registry.normalPriority()); + assertEquals(1, registry.progressPriority()); + verify(attempts).reloadPollParameters(); + } + + @Test + void removeSubscriber_whenPresent_updatesHintsAndPriorities() { + USKCallback first = mockCallback((short) 4, (short) 3); + USKCallback second = mockCallback((short) 2, (short) 2); + when(uskManager.lookupLatestSlot(origUSK)).thenReturn(7L); + registry.addSubscriber(first, 3L, new USKFetcherCallback[0], "fetcher"); + registry.addSubscriber(second, 9L, new USKFetcherCallback[0], "fetcher"); + reset(watchingKeys, attempts); + when(uskManager.lookupLatestSlot(origUSK)).thenReturn(7L); + + registry.removeSubscriber(first, new USKFetcherCallback[0], "fetcher"); + + ArgumentCaptor hintsCaptor = ArgumentCaptor.forClass(Long[].class); + verify(watchingKeys).updateSubscriberHints(hintsCaptor.capture(), eq(7L)); + Long[] hints = hintsCaptor.getValue(); + assertEquals(1, hints.length); + assertEquals(9L, hints[0]); + assertEquals(2, registry.normalPriority()); + verify(attempts).reloadPollParameters(); + } + + @Test + void removeCallback_whenCalled_updatesHintsWithoutReloadingPriorities() { + USKCallback subscriber = mockCallback((short) 5, (short) 4); + when(uskManager.lookupLatestSlot(origUSK)).thenReturn(2L); + registry.addSubscriber(subscriber, 12L, new USKFetcherCallback[0], "fetcher"); + reset(watchingKeys, attempts); + when(uskManager.lookupLatestSlot(origUSK)).thenReturn(2L); + + registry.removeCallback(subscriber); + + ArgumentCaptor hintsCaptor = ArgumentCaptor.forClass(Long[].class); + verify(watchingKeys).updateSubscriberHints(hintsCaptor.capture(), eq(2L)); + Long[] hints = hintsCaptor.getValue(); + assertEquals(0, hints.length); + verify(attempts, never()).reloadPollParameters(); + } + + @Test + void snapshotSubscribers_whenCalled_returnsRegisteredSnapshot() { + USKCallback first = mockCallback((short) 2, (short) 2); + USKCallback second = mockCallback((short) 3, (short) 1); + registry.addSubscriber(first, 1L, new USKFetcherCallback[0], "fetcher"); + registry.addSubscriber(second, 2L, new USKFetcherCallback[0], "fetcher"); + + USKCallback[] snapshot = registry.snapshotSubscribers(); + + assertEquals(2, snapshot.length); + assertTrue(Arrays.asList(snapshot).contains(first)); + assertTrue(Arrays.asList(snapshot).contains(second)); + } + + @Test + void refreshAndGetProgressPollPriority_whenCalled_updatesAndReturnsCurrentPriority() { + USKCallback subscriber = mockCallback((short) 5, (short) 4); + USKFetcherCallback fetcherCallback = mockFetcherCallback((short) 6, (short) 2); + registry.addSubscriber(subscriber, 6L, new USKFetcherCallback[0], "fetcher"); + reset(attempts); + + short priority = + registry.refreshAndGetProgressPollPriority( + new USKFetcherCallback[] {fetcherCallback}, "fetcher"); + + assertEquals(2, priority); + assertEquals(2, registry.progressPriority()); + verify(attempts).reloadPollParameters(); + } + + @ParameterizedTest + @CsvSource({"0,false", "2,true"}) + void hasCallbacks_whenArraySizeProvided_returnsExpected(int size, boolean expected) { + USKFetcherCallback[] callbacks = new USKFetcherCallback[size]; + + boolean result = registry.hasCallbacks(callbacks); + + assertEquals(expected, result); + } + + private static USKCallback mockCallback(short normal, short progress) { + USKCallback callback = mock(USKCallback.class); + when(callback.getPollingPriorityNormal()).thenReturn(normal); + when(callback.getPollingPriorityProgress()).thenReturn(progress); + return callback; + } + + private static USKFetcherCallback mockFetcherCallback(short normal, short progress) { + USKFetcherCallback callback = mock(USKFetcherCallback.class); + when(callback.getPollingPriorityNormal()).thenReturn(normal); + when(callback.getPollingPriorityProgress()).thenReturn(progress); + return callback; + } +} diff --git a/src/test/java/network/crypta/client/async/USKSuccessPlannerTest.java b/src/test/java/network/crypta/client/async/USKSuccessPlannerTest.java new file mode 100644 index 0000000000..8010fde6c6 --- /dev/null +++ b/src/test/java/network/crypta/client/async/USKSuccessPlannerTest.java @@ -0,0 +1,73 @@ +package network.crypta.client.async; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; + +import java.util.List; +import java.util.stream.Stream; +import network.crypta.keys.ClientSSKBlock; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +@SuppressWarnings("java:S100") +class USKSuccessPlannerTest { + + @Test + void createSuccessPlan_whenCalled_populatesFields() { + // Arrange + USKSuccessPlanner planner = new USKSuccessPlanner(); + List killAttempts = List.of(org.mockito.Mockito.mock(USKAttempt.class)); + + // Act + USKSuccessPlanner.SuccessPlan plan = planner.createSuccessPlan(true, 7L, false, killAttempts); + + // Assert + assertEquals(true, plan.decode); + assertEquals(7L, plan.curLatest); + assertEquals(false, plan.registerNow); + assertSame(killAttempts, plan.killAttempts); + } + + @Test + void createFoundPlan_whenCalled_populatesFields() { + // Arrange + USKSuccessPlanner planner = new USKSuccessPlanner(); + List killAttempts = List.of(org.mockito.Mockito.mock(USKAttempt.class)); + + // Act + USKSuccessPlanner.FoundPlan plan = planner.createFoundPlan(false, true, killAttempts); + + // Assert + assertEquals(false, plan.decode); + assertEquals(true, plan.registerNow); + assertSame(killAttempts, plan.killAttempts); + } + + @ParameterizedTest + @MethodSource("decodeCases") + void shouldDecode_whenEvaluated_returnsExpectedDecision( + long curLatest, long lastEd, boolean dontUpdate, boolean blockPresent, boolean expected) { + // Arrange + ClientSSKBlock block = blockPresent ? org.mockito.Mockito.mock(ClientSSKBlock.class) : null; + + // Act + boolean decision = USKSuccessPlanner.shouldDecode(curLatest, lastEd, dontUpdate, block); + + // Assert + assertEquals(expected, decision); + } + + private static Stream decodeCases() { + return Stream.of( + Arguments.of(5L, 7L, false, true, false), + Arguments.of(7L, 7L, false, false, true), + Arguments.of(9L, 7L, false, true, true), + Arguments.of(9L, 7L, true, true, true), + Arguments.of(9L, 7L, true, false, false)); + } +} diff --git a/src/test/java/network/crypta/node/simulator/BootstrapSeedTestTest.java b/src/test/java/network/crypta/node/simulator/BootstrapSeedTestTest.java index 1c1ddac339..ac4d51b6a5 100644 --- a/src/test/java/network/crypta/node/simulator/BootstrapSeedTestTest.java +++ b/src/test/java/network/crypta/node/simulator/BootstrapSeedTestTest.java @@ -50,7 +50,7 @@ class BootstrapSeedTestTest { void main_whenSeednodesFileMissing_expectExitNoSeednodes() { SubprocessResult result = assertTimeoutPreemptively( - Duration.ofSeconds(5), () -> runBootstrapSeedTestInSubprocess(tempDir, "missing")); + Duration.ofSeconds(15), () -> runBootstrapSeedTestInSubprocess(tempDir, "missing")); assertEquals( expectedProcessExitCode(BootstrapSeedTest.EXIT_NO_SEEDNODES), @@ -65,7 +65,7 @@ void main_whenSeednodesFileMissing_expectExitNoSeednodes() { void main_whenSeednodesFileEmpty_expectExitNoSeednodes() { SubprocessResult result = assertTimeoutPreemptively( - Duration.ofSeconds(5), () -> runBootstrapSeedTestInSubprocess(tempDir, "empty")); + Duration.ofSeconds(15), () -> runBootstrapSeedTestInSubprocess(tempDir, "empty")); assertEquals( expectedProcessExitCode(BootstrapSeedTest.EXIT_NO_SEEDNODES), @@ -80,7 +80,7 @@ void main_whenSeednodesFileEmpty_expectExitNoSeednodes() { void main_whenGlobalTestInitThrows_expectExitThrewSomething() { SubprocessResult result = assertTimeoutPreemptively( - Duration.ofSeconds(5), () -> runBootstrapSeedTestInSubprocess(tempDir, "throw-init")); + Duration.ofSeconds(15), () -> runBootstrapSeedTestInSubprocess(tempDir, "throw-init")); assertEquals( expectedProcessExitCode(BootstrapSeedTest.EXIT_THREW_SOMETHING),