From 28e50d984085cd29d4b82fa13896b278c9f3be32 Mon Sep 17 00:00:00 2001 From: windweller Date: Tue, 28 Oct 2025 13:33:33 -0400 Subject: [PATCH 1/5] add a worker primitive --- opto/features/priority_search/primitives.py | 95 +++++++++++++++++++++ 1 file changed, 95 insertions(+) create mode 100644 opto/features/priority_search/primitives.py diff --git a/opto/features/priority_search/primitives.py b/opto/features/priority_search/primitives.py new file mode 100644 index 00000000..a93cd0f7 --- /dev/null +++ b/opto/features/priority_search/primitives.py @@ -0,0 +1,95 @@ +import asyncio +import uuid +import time + + +class Worker: + """ + A worker class that represents a stateless coroutine which processes inputs and updates state. + """ + + def __init__(self, func, args=None, kwargs=None, name=None): + self.func = func + self.args = args or () + self.kwargs = kwargs or {} + self.name = name or f"Worker-{uuid.uuid4()}" + self.result = None + self.is_running = False + self.is_done = False + self.start_time = None + self.end_time = None + + async def run(self, state, input_data): + """ + Execute the worker's function asynchronously with the given state and input. + + If the function is a coroutine, it will be awaited. Otherwise, + it will be executed in the default executor to avoid blocking the event loop. + + Parameters + ---------- + state : Any + The current state to be processed by the worker + input_data : Any + The input data to be processed by the worker + + Returns + ------- + tuple + A tuple containing (output, next_state) + """ + self.is_running = True + self.start_time = time.time() + + try: + if asyncio.iscoroutinefunction(self.func): + # If the function is already a coroutine function + output, next_state = await self.func(state, input_data, *self.args, **self.kwargs) + else: + # Run synchronous functions in the default executor + output, next_state = await asyncio.to_thread(self.func, state, input_data, *self.args, **self.kwargs) + + self.result = output + return output, next_state + finally: + self.is_running = False + self.is_done = True + self.end_time = time.time() + + async def __call__(self, state, input_data): + """ + Make the worker callable directly. + + Parameters + ---------- + state : Any + The current state to be processed by the worker + input_data : Any + The input data to be processed by the worker + + Returns + ------- + tuple + A tuple containing (output, next_state) + """ + return await self.run(state, input_data) + + @property + def duration(self): + """ + Calculate the execution duration of the worker. + + Returns + ------- + float or None + The duration in seconds if the worker has completed, otherwise None. + """ + if self.start_time is None: + return None + if self.end_time is None: + return time.time() - self.start_time + return self.end_time - self.start_time + + def __repr__(self): + status = "running" if self.is_running else "done" if self.is_done else "pending" + return f"<{self.name} [{status}]>" From 0daf45fe21a3b29a2ee9d682da551054042b39b0 Mon Sep 17 00:00:00 2001 From: chinganc Date: Wed, 5 Nov 2025 00:25:51 +0000 Subject: [PATCH 2/5] Add generic async controller --- opto/features/async_search/controller.py | 89 ++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 opto/features/async_search/controller.py diff --git a/opto/features/async_search/controller.py b/opto/features/async_search/controller.py new file mode 100644 index 00000000..4f6dd101 --- /dev/null +++ b/opto/features/async_search/controller.py @@ -0,0 +1,89 @@ +import asyncio + + +class Controller: + """ Asynchronous Controller to manage multiple workers processing tasks concurrently. + """ + async def run(self, num_workers, *args, **kwargs): + + await self.init(*args, **kwargs) + + # start all workers concurrently using asyncio.gather + worker_tasks = await asyncio.gather(*(self.create_new_task() for _ in range(num_workers))) + worker_tasks = list(worker_tasks) + + while worker_tasks: + done, pending = await asyncio.wait(worker_tasks, return_when=asyncio.FIRST_COMPLETED) + for task in done: + worker_tasks.remove(task) + result = task.result() + await self.update(result) + if self.should_stop(): + for p in pending: + p.cancel() + return await self.post_process() + else: + new_task = await self.create_new_task() + worker_tasks.append(new_task) + + async def create_new_task(self): + new_worker = await self.create_worker() + new_task = await self.create_task() + return asyncio.create_task(new_worker(new_task)) + + async def init(self , *args, **kwargs): + # Initialize any required state before starting the controller + pass + + async def post_process(self): + # Final processing after all tasks are done + pass + + async def create_worker(self): + # return a coroutine function that can be called with a task + raise NotImplementedError + + async def create_task(self): + raise NotImplementedError + + async def update(self, result): + # process the result and update internal state + raise NotImplementedError + + def should_stop(self): + # return True if stopping condition met else False + raise NotImplementedError + + +if __name__ == "__main__": + + class TestController(Controller): + + async def run(self, num_workers): + self.i = 0 # counter for tasks + return await super().run(num_workers) + + async def create_worker(self): + async def worker(task): + await asyncio.sleep(task) + return f"Completed task with sleep {task}" + return worker + + async def create_task(self): + return 1 # simple task: sleep for 1 second + + async def update(self, result): + self.i += 1 + print(self.i, result) + + def should_stop(self): + if self.i >= 5: + return True + return False # never stop for testing + + import time + controller = TestController() + st = time.time() + asyncio.run(controller.run(num_workers=3)) + used_time = time.time() - st + assert used_time < 2 + 0.5, "Should be less than 2 + eps seconds" \ No newline at end of file From ac120313d7f627beae358855909a9a0bb0ff8c7a Mon Sep 17 00:00:00 2001 From: chinganc Date: Wed, 3 Dec 2025 04:03:55 +0000 Subject: [PATCH 3/5] Add a draft of async search --- opto/features/async_search/async_search.py | 358 +++++++++++++++++++++ 1 file changed, 358 insertions(+) create mode 100644 opto/features/async_search/async_search.py diff --git a/opto/features/async_search/async_search.py b/opto/features/async_search/async_search.py new file mode 100644 index 00000000..8a64072b --- /dev/null +++ b/opto/features/async_search/async_search.py @@ -0,0 +1,358 @@ +import asyncio +from tqdm.asyncio import tqdm +import numpy as np +from typing import Union, List, Dict, Any, Optional, Tuple +import uuid + +from opto import trace +from opto.trace.errors import ExecutionError +from opto.optimizers.optimizer import Optimizer +from opto.trainer.algorithms.algorithm import Trainer +from opto.features.async_search.controller import Controller +from opto.features.async_search.async_sampler import AsyncSampler as Sampler +from opto.features.priority_search.sampler import BatchRollout +from opto.features.priority_search.search_template import Samples +from opto.trainer.loader import DataLoader +from opto.trainer.evaluators import evaluate +from opto.trainer.utils import safe_mean + + +WORKER_TASK = "worker_task" +EVAL_TASK = "eval_task" + +def check_optimizer_parameters(optimizer: Optimizer, agent: trace.Module): + """ Check if the optimizer's parameters are the same as the agent's parameters. """ + assert isinstance(optimizer, Optimizer), "optimizer must be an instance of Optimizer." + agent_params = set(agent.parameters()) + optimizer_params = set(optimizer.parameters) + assert agent_params == optimizer_params, "Optimizer parameters do not match agent parameters." + + +def as_async(func): + """Decorator to make a synchronous function asynchronous.""" + async def wrapper(*args, **kwargs): + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, lambda: func(*args, **kwargs)) + return wrapper + +@as_async +def evaluate_agent(agent, guide, x, info, min_score=None): + try: + output = agent(x).data + score = guide.metric(x, output, info) + except ExecutionError as e: + score = min_score + return score + + +async def evaluate(agent, guide, inputs, infos, min_score=None, num_samples=1, description=None): + """ Evaluate the agent on the inputs and return the scores + + Args: + agent: The agent to evaluate + guide: The guide to use for evaluation + inputs: List of inputs to evaluate on + infos: List of additional information for each input + min_score: Minimum score to return when an exception occurs + num_samples: Number of samples to use to evaluate each input + num_threads: Maximum number of threads to use for parallel evaluation + description: Description to display in the progress bar + """ + assert len(inputs) == len(infos), "Inputs and infos must have the same length" + N = len(inputs) + # Use provided description or generate a default one + eval_description = description or f"Evaluating {N} examples" + # repeat each index num_samples times + indices = [i for i in range(N) for _ in range(num_samples)] + # Run the evaluation in parallel + scores = await asyncio.gather(*(evaluate_agent(agent, guide, inputs[i], infos[i], min_score) for i in indices)) + scores = np.array(scores) + if num_samples > 1: + # scores will be of length N * num_samples + # Reshape scores into an array of shape (N, num_samples) + scores = scores.reshape(N, num_samples) + return scores + +class AsyncSearch(Trainer, Controller): + + def __init__(self, + agent: trace.Module, + optimizer: Union[Optimizer, List[Optimizer]], + num_threads: int = None, + logger: Optional[Any] = None, + *args, **kwargs): + super().__init__(agent, num_threads=num_threads, logger=logger, *args, **kwargs) + + if not isinstance(optimizer, list): + optimizer = [optimizer] + assert len(optimizer) > 0, "Optimizers list is empty." + for opt in optimizer: + check_optimizer_parameters(opt, agent) + self._optimizers = optimizer + + self.n_iters = 0 + self.n_epochs = 0 + self.n_samples = 0 + self._optimizer_index = -1 + self.train_sampler = None + self._train_scores = [] # to store the scores of the agent during training + self._train_num_samples = [] # to store the number of samples used to compute each score + + @property + def optimizer(self): + self._optimizer_index = (self._optimizer_index + 1) % len(self._optimizers) + return self._optimizers[self._optimizer_index] + + # This is the synchronous interface + def train(self, *args, num_threads = 1, **kwargs): + self.num_threads = num_threads or self.num_threads + return asyncio.run(self.run(num_threads, *args, **kwargs)) + + async def async_train(self, *args, num_threads = 1, **kwargs): + self.num_threads = num_threads or self.num_threads + return await self.run(num_threads, *args, **kwargs) + + # TODO clean up + async def init(self, *, + guide, # guide to provide feedback + train_dataset, # dataset of (x, info) pairs to train the agent + # training loop + batch_size = 1, # batch size for updating the agent + num_batches = 1, # number of batches to use from the dataset in each iteration + score_range = None, # minimum score to update the agent + num_epochs = 1, # number of training epochs (int or None) + num_steps = None, # number of training steps (int or None) + verbose = False, # whether to print the output of the agent + # evaluation + test_dataset = None, # dataset of (x, info) pairs to evaluate the agent; if None, use train_dataset + test_guide = None, # guide to provide scores for the test set; if None, use guide + test_frequency: Union[int, None] = 1, # frequency of evaluation NOTE set test_frequency < 0 to skip first evaluation + num_test_samples: int = 1, # number of samples to use to evaluate each input + # logging + log_frequency = None, # frequency of logging + save_frequency: Union[int, None] = None, # frequency of saving the agent + save_path: str = "async_search_checkpoints/", # path to save the agent + **kwargs + ): + + """Initializes the state for the asynchronous search.""" + self.guide = guide + self.train_dataset = train_dataset + # training loop + self.batch_size = batch_size + self.num_batches = num_batches + self._score_range = score_range or (-np.inf, np.inf) + assert len(self._score_range) == 2, "score_range must be a tuple (min_score, max_score)." + assert self._score_range[1] >= self._score_range[0], "score_range must be a tuple (min_score, max_score)." + self.num_epochs = num_epochs + self.num_steps = num_steps if num_steps is not None else 0 + self.verbose = verbose + # evaluation + self.test_dataset = test_dataset or train_dataset + self.test_guide = test_guide or guide + self.test_frequency = test_frequency + self.num_test_samples = num_test_samples + # logging + self.log_frequency = log_frequency or test_frequency + self.save_frequency = save_frequency + self.save_path = save_path + + if self.train_sampler is None: + self.train_sampler = Sampler( + DataLoader(train_dataset, batch_size=batch_size), + guide, + score_range=self._score_range + ) + else: + self.train_sampler.loader.dataset = train_dataset # update the train dataset in the sampler + + # for managing tasks + self._eval_tasks = set() + self._worker_tasks = dict() + + def should_stop(self) -> bool: + """Determine whether the training should stop based on the number of epochs or steps.""" + if self.n_epochs < self.num_epochs or self.n_iters < self.num_steps: + return False + return True + + async def create_task(self): + # Check if it's time for evaluation + if (self.test_frequency is not None) and (self.n_iters % self.test_frequency == 0): + if not (self.n_iters == 0 and self.test_frequency < 0): + # skip the first evaluation if test_frequency < 0 + task_id = EVAL_TASK + ':' + str(self.n_iters) + if task_id not in self._eval_tasks: + # this is to prevent duplicate eval tasks + self._eval_tasks.add(task_id) + return self.eval_task(task_id=task_id) + + # Otherwise, run worker task + print(f"Epoch: {self.n_epochs}. Iteration: {self.n_iters}") + task_id = WORKER_TASK + ':' + str(self.n_iters) + ':' + uuid.uuid4().hex + proposal, task_state = await self.get_task_state() + self._worker_tasks[task_id] = (proposal, task_state) + return self.worker_task(task_id=task_id, + proposal=proposal, + **task_state) + + async def eval_task(self, task_id): + info_test = await self.evaluate_agent(self.test_dataset, self.test_guide) + return task_id, info_test + + async def worker_task(self, task_id, proposal, **kwargs): + samples, info_sample = await self.sample(proposal, verbose=self.verbose) + result, info_update = await self.per_worker_update(samples, **kwargs) + return task_id, (result, info_sample, info_update) + + async def process_result(self, result): + task_id, result = result + if task_id.startswith(EVAL_TASK): + # evaluation task + self._eval_tasks.remove(task_id) + info_test = result + self.log(info_test, prefix="Test/") + return + + # Worker task is done + result, info_sample, info_per_worker_update = result + info_main_update = await self.main_update(result) + + # Log the reuslt of worker task + if (self.save_frequency is not None and self.save_frequency > 0) and self.n_iters % self.save_frequency == 0: + self.save(self.save_path) + + # Log information + assert 'mean_score' in info_sample, "info_sample must contain 'mean_score'." + assert 'self.n_epochs' in info_sample, "info_sample must contain 'self.n_epochs'." + + self._train_scores.append(info_sample['mean_score']) # so that mean can be computed + self._train_num_samples.append(info_sample['num_samples']) + self.n_samples += len(samples) # update the number of samples processed + + if self.n_iters % self.log_frequency == 0: + avg_train_score = np.sum(np.array(self._train_scores) * np.array(self._train_num_samples)) / np.sum(self._train_num_samples) + self.logger.log('Algo/Average train score', avg_train_score, self.n_iters, color='blue') + self.log(info_main_update, prefix="Main Update/") + self.log(info_per_worker_update, prefix="Per Worker Update/") + self.log(info_sample, prefix="Sample/") + self.logger.log('Algo/Number of training samples', self.n_samples, self.n_iters, color='blue') + # Log parameters + for p in self.agent.parameters(): + self.logger.log(f"Parameter/{p.name}", p.data, self.n_iters, color='red') + + # Update counters + self.n_epochs = info_sample['self.n_epochs'] + self.n_iters += 1 / self.num_threads # each worker contributes to 1/num_threads of an iteration + + async def post_process(self): + """Final processing after the controller stops.""" + pass + + @property + def max_score(self): + """ Maximum score that can be achieved by the agent. """ + return self._score_range[1] + + @property + def min_score(self): + """ Minimum score that can be achieved by the agent. """ + return self._score_range[0] + + # Can be overridden by subclasses to implement specific sampling strategies + async def sample(self, agents, verbose=False, **kwargs): + """ Sample a batch of data based on the proposed parameters. All proposals are evaluated on the same batch of inputs. + + Args: + agents (list): A list of trace.Modules (proposed parameters) to evaluate. + **kwargs: Additional keyword arguments that may be used by the implementation. + """ + samples = Samples(*await self.train_sampler.sample(agents, description_prefix='Sampling training minibatch: ')) # create a Samples object to store the samples and the minibatch + # Log information about the sampling + scores = [ g.get_scores() for g in samples.samples] # list of list of scores for each BatchRollout + scores = [item for sublist in scores for item in sublist if item is not None] # flatten the list of scores + log_info = { + 'mean_score': safe_mean(scores, 0), # return 0, if num_samples == 0 so that the weighted mean can be computed + 'num_samples': len(scores), + 'self.n_epochs': self.train_sampler.n_epochs, + } + # check if the scores are within the score range + if hasattr(self, '_score_range') and not (self.min_score <= log_info['mean_score'] <= self.max_score): + print(f"Warning: Mean score {log_info['mean_score']} is out of the range {self._score_range}.") + return samples, log_info + + + async def evaluate_agent(self, test_dataset, guide): + """ Evaluate the agent on the given test dataset. """ + # Use provided num_threads or fall back to self.num_threads + test_scores = await evaluate(self.agent, guide, test_dataset['inputs'], test_dataset['infos'], + min_score=self.min_score, + num_samples=self.num_test_samples, + description=f"Evaluating agent") + test_score = safe_mean(test_scores) + # check if the test_score is within the score range + if hasattr(self, '_score_range') and not (self.min_score <= test_score <= self.max_score): + print(f"Warning: Test score {test_score} is out of the range {self._score_range}.") + return {'test_score': test_score} + + + def log(self, info_log: Dict[str, Any], prefix=""): + """Log information from the algorithm.""" + for key, value in info_log.items(): + if value is not None and self.logger: + try: + self.logger.log(f"{prefix}{key}", value, self.n_iters) + except Exception as e: + print(f"Logging failed for key {key}: {e}") + + # # TODO + def save(self, save_path): + # save agent, data iterator, optimizer states, and other necessary information to resume training + # save self._worker_tasks + pass + + # # TODO + # @classmethod + # def load(cls, load_path: str): + + # # TODO + # def resume(self, *, + # model: trace.Module, + # train_dataset: dict , + # validate_dataset = None, + # test_dataset = None, + # **kwargs): + + + # Unimplemented methods that should be implemented by subclasses + async def main_update(self, result) -> Tuple[Dict[trace.Parameter, Any], Dict[str, Any]]: + """ Update the agent based on the provided result from all workers. + + Return: + update_dict (dict of Parameter: Any): A dictionary containing the updated parameters of the agent. + info_log (dict of str: Any): A dictionary containing logging information about the update process. + """ + + raise NotImplementedError("The update method should be implemented by subclasses.") + + async def get_task_state(self) -> Tuple[trace.Module, dict[str, Any]]: + """ Get the proposal and state needed for the worker task. + + Returns: + proposal (trace.Module): A proposed parameters (trace.Module) for the worker task. + task_state (dict): A dictionary containing any additional state needed for the worker task. + """ + # return proposal and state needed for the worker task + raise NotImplementedError("The update method should be implemented by subclasses.") + + @classmethod + async def per_worker_update(samples: Samples, **kwargs) -> Tuple[Any, dict[str, Any]]: + """ Update the agent based on the provided samples. + Args: + samples (list): A list of samples collected by the proposal returned by get_task_state using training dataset. + + Returns: + results (Any): The results of the update process, which will be passed to main_update. + info_log (dict of str: Any): A dictionary containing logging information about the update process. + """ + raise NotImplementedError("The update method should be implemented by subclasses.") From c103b176e8c09b526ef7389be66007b04abf18e0 Mon Sep 17 00:00:00 2001 From: windweller Date: Thu, 4 Dec 2025 15:45:06 -0800 Subject: [PATCH 4/5] add a mockup design --- opto/features/async_search/mockup.py | 141 +++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 opto/features/async_search/mockup.py diff --git a/opto/features/async_search/mockup.py b/opto/features/async_search/mockup.py new file mode 100644 index 00000000..b28f0122 --- /dev/null +++ b/opto/features/async_search/mockup.py @@ -0,0 +1,141 @@ +import asyncio +import random +from dataclasses import dataclass +from typing import Any +from enum import Enum + + +class TaskStatus(Enum): + PENDING = "pending" + RUNNING = "running" + COMPLETED = "completed" + FAILED = "failed" + + +@dataclass +class Task: + id: int + status: TaskStatus = TaskStatus.PENDING + result: Any = None + + +@dataclass +class TaskResult: + task_id: int + status: TaskStatus + result: Any + duration: float + + +class Worker: + def __init__(self, worker_id: int): + self.worker_id = worker_id + self.is_busy = False + + async def execute(self, task: Task) -> TaskResult: + """Execute a task by sleeping for a random duration (1-10 seconds).""" + self.is_busy = True + task.status = TaskStatus.RUNNING + + sleep_duration = random.uniform(1, 10) + print(f"Worker {self.worker_id}: Starting task {task.id}, will take {sleep_duration:.2f}s") + + try: + await asyncio.sleep(sleep_duration) + task.status = TaskStatus.COMPLETED + task.result = f"Task {task.id} completed by worker {self.worker_id}" + print(f"Worker {self.worker_id}: Completed task {task.id}") + + return TaskResult( + task_id=task.id, + status=TaskStatus.COMPLETED, + result=task.result, + duration=sleep_duration + ) + except Exception as e: + task.status = TaskStatus.FAILED + return TaskResult( + task_id=task.id, + status=TaskStatus.FAILED, + result=str(e), + duration=sleep_duration + ) + finally: + self.is_busy = False + + +class Master: + def __init__(self, num_workers: int = 3): + self.workers = [Worker(i) for i in range(num_workers)] + self.task_counter = 0 + self.results: list[TaskResult] = [] + self._worker_available = asyncio.Condition() + + def create_task(self) -> Task: + """Create a new task with a unique ID.""" + task = Task(id=self.task_counter) + self.task_counter += 1 + return task + + async def dispatch_single(self, task: Task) -> TaskResult: + """Dispatch a single task to the first available worker.""" + worker = await self._wait_for_available_worker() # Actually waits for a free worker + print(f"Master: Assigning task {task.id} to worker {worker.worker_id}") + result = await worker.execute(task) + self.results.append(result) + await self._notify_worker_available() # Signal that this worker is free again + return result + + async def dispatch_batch(self, num_tasks: int) -> list[TaskResult]: + """Dispatch multiple tasks and run them concurrently.""" + tasks = [self.create_task() for _ in range(num_tasks)] + + async def run_task(task: Task) -> TaskResult: + worker = self.workers[task.id % len(self.workers)] + return await worker.execute(task) + + results = await asyncio.gather(*[run_task(task) for task in tasks]) + self.results.extend(results) + return list(results) + + def get_results(self) -> list[TaskResult]: + """Get all completed task results.""" + return self.results + + def _get_available_worker(self) -> Worker | None: + for worker in self.workers: + if not worker.is_busy: + return worker + return None + + async def _wait_for_available_worker(self) -> Worker: + async with self._worker_available: + while True: + worker = self._get_available_worker() + if worker: + return worker + await self._worker_available.wait() + + async def _notify_worker_available(self): + async with self._worker_available: + self._worker_available.notify_all() + + +async def main(): + print("=== Master/Worker Async System Demo ===\n") + + master = Master(num_workers=3) + + print("Dispatching 5 tasks concurrently...\n") + results = await master.dispatch_batch(5) + + print("\n=== Results ===") + for result in results: + print(f"Task {result.task_id}: {result.status.value}, duration: {result.duration:.2f}s") + + total_time = max(r.duration for r in results) + print(f"\nTotal wall-clock time: ~{total_time:.2f}s (tasks ran concurrently)") + + +if __name__ == "__main__": + asyncio.run(main()) From 45309075c29ee0be768db486fcb5f96682a0bbcd Mon Sep 17 00:00:00 2001 From: windweller Date: Sat, 27 Dec 2025 11:18:28 -0500 Subject: [PATCH 5/5] manual update python version --- .github/workflows/ci.yml | 2 +- .github/workflows/python-app.yml | 4 ++-- pyproject.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7889b69d..622c9626 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,7 +49,7 @@ jobs: # 6) Set up Python & install dependencies - uses: actions/setup-python@v5 - with: { python-version: "3.10" } + with: { python-version: "3.13" } - name: Install Python deps run: | pip install -e . diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 8074be85..a111e34f 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -19,10 +19,10 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python 3.10 + - name: Set up Python 3.13 uses: actions/setup-python@v3 with: - python-version: "3.10" + python-version: "3.13" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/pyproject.toml b/pyproject.toml index 829af4e5..2312a403 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ keywords = ["trace", "opto", "AutoDiff"] classifiers = [ "Development Status :: 4 - Beta", "Intended Audience :: Developers", - "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.13", ] [project.optional-dependencies]