diff --git a/examples/clusters_example.py b/examples/clusters_example.py new file mode 100644 index 0000000..8aeadf0 --- /dev/null +++ b/examples/clusters_example.py @@ -0,0 +1,141 @@ +""" +Example demonstrating how to use the Clusters API. + +This example shows how to: +- Create a new compute cluster +- List all clusters +- Get a specific cluster by ID +- Get cluster nodes +- Delete a cluster +""" + +import os +import time + +from verda import VerdaClient +from verda.constants import Actions, Locations + +# Get credentials from environment variables +CLIENT_ID = os.environ.get('VERDA_CLIENT_ID') +CLIENT_SECRET = os.environ.get('VERDA_CLIENT_SECRET') +BASE_URL = os.environ.get('VERDA_BASE_URL', 'https://api.verda.com/v1') + +# Create client +verda = VerdaClient(CLIENT_ID, CLIENT_SECRET, base_url=BASE_URL) + + +def create_cluster_example(): + """Create a new compute cluster.""" + # Get SSH keys + ssh_keys = [key.id for key in verda.ssh_keys.get()] + + # Check if cluster type is available + if not verda.clusters.is_available('16B200', Locations.FIN_03): + raise ValueError('Cluster type 16B200 is not available in FIN_03') + + # Get available images for cluster type + images = verda.clusters.get_cluster_images('16B200') + if 'ubuntu-22.04-cuda-12.9-cluster' not in images: + raise ValueError('Ubuntu 22.04 CUDA 12.9 cluster image is not supported for 16B200') + + # Create a 16B200 cluster + cluster = verda.clusters.create( + hostname='my-compute-cluster', + cluster_type='16B200', + image='ubuntu-22.04-cuda-12.9-cluster', + description='Example compute cluster for distributed training', + ssh_key_ids=ssh_keys, + location=Locations.FIN_03, + shared_volume_name='my-shared-volume', + shared_volume_size=30000, + wait_for_status=None, + ) + + print(f'Creating cluster: {cluster.id}') + print(f'Cluster hostname: {cluster.hostname}') + print(f'Cluster status: {cluster.status}') + print(f'Cluster cluster_type: {cluster.cluster_type}') + print(f'Location: {cluster.location}') + + # Wait for cluster to enter RUNNING status + while cluster.status != verda.constants.cluster_status.RUNNING: + time.sleep(2) + print(f'Waiting for cluster to enter RUNNING status... (status: {cluster.status})') + cluster = verda.clusters.get_by_id(cluster.id) + + print(f'Public IP: {cluster.ip}') + print('Cluster is now running and ready to use!') + + return cluster + + +def list_clusters_example(): + """List all clusters.""" + # Get all clusters + clusters = verda.clusters.get() + + print(f'\nFound {len(clusters)} cluster(s):') + for cluster in clusters: + print( + f' - {cluster.hostname} ({cluster.id}): {cluster.status} - {len(cluster.worker_nodes)} nodes' + ) + + # Get clusters with specific status + running_clusters = verda.clusters.get(status=verda.constants.cluster_status.RUNNING) + print(f'\nFound {len(running_clusters)} running cluster(s)') + + return clusters + + +def get_cluster_by_id_example(cluster_id: str): + """Get a specific cluster by ID.""" + cluster = verda.clusters.get_by_id(cluster_id) + + print('\nCluster details:') + print(f' ID: {cluster.id}') + print(f' Name: {cluster.hostname}') + print(f' Description: {cluster.description}') + print(f' Status: {cluster.status}') + print(f' Cluster type: {cluster.cluster_type}') + print(f' Created at: {cluster.created_at}') + print(f' Public IP: {cluster.ip}') + print(f' Worker nodes: {len(cluster.worker_nodes)}') + + return cluster + + +def delete_cluster_example(cluster_id: str): + """Delete a cluster.""" + print(f'\nDeleting cluster {cluster_id}...') + + verda.clusters.action(cluster_id, Actions.DELETE) + + print('Cluster deleted successfully') + + +def main(): + """Run all cluster examples.""" + print('=== Clusters API Example ===\n') + + # Create a new cluster + print('1. Creating a new cluster...') + cluster = create_cluster_example() + cluster_id = cluster.id + + # List all clusters + print('\n2. Listing all clusters...') + list_clusters_example() + + # Get cluster by ID + print('\n3. Getting cluster details...') + get_cluster_by_id_example(cluster_id) + + # Delete the cluster + print('\n6. Deleting the cluster...') + delete_cluster_example(cluster_id) + + print('\n=== Example completed successfully ===') + + +if __name__ == '__main__': + main() diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index 3f695ab..28d36fb 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -9,12 +9,11 @@ Make sure to run the server and the account has enough balance before running the tests """ -BASE_URL = 'http://localhost:3010/v1' - # Load env variables, make sure there's an env file with valid client credentials load_dotenv() CLIENT_SECRET = os.getenv('VERDA_CLIENT_SECRET') CLIENT_ID = os.getenv('VERDA_CLIENT_ID') +BASE_URL = os.getenv('VERDA_BASE_URL', 'http://localhost:3010/v1') @pytest.fixture diff --git a/tests/integration_tests/test_clusters.py b/tests/integration_tests/test_clusters.py new file mode 100644 index 0000000..0ab6d06 --- /dev/null +++ b/tests/integration_tests/test_clusters.py @@ -0,0 +1,69 @@ +import logging +import os + +import pytest + +from verda import VerdaClient +from verda.constants import Locations + +logging.basicConfig(level=logging.DEBUG) +logger = logging.getLogger() + + +IN_GITHUB_ACTIONS = os.getenv('GITHUB_ACTIONS') == 'true' + + +@pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Test doesn't work in Github Actions.") +@pytest.mark.withoutresponses +class TestClusters: + def test_create_cluster(self, verda_client: VerdaClient): + # get ssh key + ssh_key = verda_client.ssh_keys.get()[0] + + if not verda_client.clusters.is_available('16B200', Locations.FIN_03): + raise ValueError('Cluster type 16B200 is not available in FIN_03') + logger.debug('[x] Cluster type 16B200 is available in FIN_03') + + availabilities = verda_client.clusters.get_availabilities(Locations.FIN_03) + assert len(availabilities) > 0 + assert '16B200' in availabilities + logger.debug( + '[x] Cluster type 16B200 is one of the available cluster types in FIN_03: %s', + availabilities, + ) + + images = verda_client.clusters.get_cluster_images('16B200') + assert len(images) > 0 + assert 'ubuntu-22.04-cuda-12.9-cluster' in images + logger.debug('[x] Ubuntu 22.04 CUDA 12.9 cluster image is supported for 16B200') + + # create instance + cluster = verda_client.clusters.create( + hostname='test-instance', + location=Locations.FIN_03, + cluster_type='16B200', + description='test instance', + image='ubuntu-22.04-cuda-12.9-cluster', + ssh_key_ids=[ssh_key.id], + # Set to None to not wait for provisioning but return immediately + wait_for_status=verda_client.constants.cluster_status.PROVISIONING, + ) + + # assert instance is created + assert cluster.id is not None + assert ( + cluster.status == verda_client.constants.cluster_status.PROVISIONING + or cluster.status == verda_client.constants.cluster_status.RUNNING + ) + + # If still provisioning, we don't have worker nodes yet and ip is not available + if cluster.status != verda_client.constants.instance_status.PROVISIONING: + assert cluster.worker_nodes is not None + assert len(cluster.worker_nodes) == 2 + assert cluster.ip is not None + + # Now we need to wait for RUNNING status to connect to the jumphost (public IP is available) + # After that, we can connect to the jumphost and run commands on the cluster nodes: + # + # ssh -i ssh_key.pem root@ + # diff --git a/tests/unit_tests/clusters/__init__.py b/tests/unit_tests/clusters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/unit_tests/clusters/test_clusters.py b/tests/unit_tests/clusters/test_clusters.py new file mode 100644 index 0000000..a69ae03 --- /dev/null +++ b/tests/unit_tests/clusters/test_clusters.py @@ -0,0 +1,181 @@ +import pytest +import responses # https://github.com/getsentry/responses + +from verda.clusters import Cluster, ClustersService, ClusterWorkerNode +from verda.constants import ErrorCodes, Locations +from verda.exceptions import APIException + +INVALID_REQUEST = ErrorCodes.INVALID_REQUEST +INVALID_REQUEST_MESSAGE = 'Invalid request' + +CLUSTER_ID = 'deadc0de-a5d2-4972-ae4e-d429115d055b' +SSH_KEY_ID = '12345dc1-a5d2-4972-ae4e-d429115d055b' + +CLUSTER_HOSTNAME = 'test-cluster' +CLUSTER_DESCRIPTION = 'Test compute cluster' +CLUSTER_STATUS = 'running' +CLUSTER_CLUSTER_TYPE = '16H200' +CLUSTER_NODE_COUNT = 2 +CLUSTER_LOCATION = Locations.FIN_03 +CLUSTER_IMAGE = 'ubuntu-22.04-cuda-12.4-cluster' +CLUSTER_CREATED_AT = '2024-01-01T00:00:00Z' +CLUSTER_IP = '10.0.0.1' + +NODE_1_ID = 'node1-c0de-a5d2-4972-ae4e-d429115d055b' +NODE_2_ID = 'node2-c0de-a5d2-4972-ae4e-d429115d055b' + +NODES_PAYLOAD = [ + { + 'id': NODE_1_ID, + 'status': 'running', + 'hostname': 'test-cluster-node-1', + 'private_ip': '10.0.0.1', + }, + { + 'id': NODE_2_ID, + 'status': 'running', + 'hostname': 'test-cluster-node-2', + 'private_ip': '10.0.0.2', + }, +] + +CLUSTER_PAYLOAD = [ + { + 'id': CLUSTER_ID, + 'hostname': CLUSTER_HOSTNAME, + 'description': CLUSTER_DESCRIPTION, + 'status': CLUSTER_STATUS, + 'created_at': CLUSTER_CREATED_AT, + 'location': CLUSTER_LOCATION, + 'cluster_type': CLUSTER_CLUSTER_TYPE, + 'worker_nodes': NODES_PAYLOAD, + 'ssh_key_ids': [SSH_KEY_ID], + 'image': CLUSTER_IMAGE, + 'ip': CLUSTER_IP, + } +] + + +class TestClustersService: + @pytest.fixture + def clusters_service(self, http_client): + return ClustersService(http_client) + + @pytest.fixture + def endpoint(self, http_client): + return http_client._base_url + '/clusters' + + def test_get_clusters(self, clusters_service, endpoint): + # arrange - add response mock + responses.add(responses.GET, endpoint, json=CLUSTER_PAYLOAD, status=200) + + # act + clusters = clusters_service.get() + cluster = clusters[0] + + # assert + assert isinstance(clusters, list) + assert len(clusters) == 1 + assert isinstance(cluster, Cluster) + assert cluster.id == CLUSTER_ID + assert cluster.hostname == CLUSTER_HOSTNAME + assert cluster.description == CLUSTER_DESCRIPTION + assert cluster.status == CLUSTER_STATUS + assert cluster.created_at == CLUSTER_CREATED_AT + assert cluster.location == CLUSTER_LOCATION + assert cluster.cluster_type == CLUSTER_CLUSTER_TYPE + assert isinstance(cluster.worker_nodes, list) + assert len(cluster.worker_nodes) == CLUSTER_NODE_COUNT + assert isinstance(cluster.worker_nodes[0], ClusterWorkerNode) + assert cluster.ssh_key_ids == [SSH_KEY_ID] + assert cluster.image == CLUSTER_IMAGE + assert cluster.ip == CLUSTER_IP + assert responses.assert_call_count(endpoint, 1) is True + + def test_create_cluster_successful(self, clusters_service, endpoint): + # arrange - add response mock + # create cluster + responses.add(responses.POST, endpoint, json={'id': CLUSTER_ID}, status=200) + # get cluster by id + url = endpoint + '/' + CLUSTER_ID + responses.add(responses.GET, url, json=CLUSTER_PAYLOAD[0], status=200) + + # act + cluster = clusters_service.create( + hostname=CLUSTER_HOSTNAME, + cluster_type=CLUSTER_CLUSTER_TYPE, + image=CLUSTER_IMAGE, + description=CLUSTER_DESCRIPTION, + ssh_key_ids=[SSH_KEY_ID], + location=CLUSTER_LOCATION, + wait_for_status=CLUSTER_STATUS, + ) + + # assert + assert isinstance(cluster, Cluster) + assert cluster.id == CLUSTER_ID + assert cluster.hostname == CLUSTER_HOSTNAME + assert cluster.description == CLUSTER_DESCRIPTION + assert cluster.status == CLUSTER_STATUS + assert cluster.cluster_type == CLUSTER_CLUSTER_TYPE + assert len(cluster.worker_nodes) == CLUSTER_NODE_COUNT + assert cluster.ssh_key_ids == [SSH_KEY_ID] + assert cluster.location == CLUSTER_LOCATION + assert cluster.image == CLUSTER_IMAGE + assert responses.assert_call_count(endpoint, 1) is True + assert responses.assert_call_count(url, 1) is True + + def test_create_cluster_failed(self, clusters_service, endpoint): + # arrange - add response mock + responses.add( + responses.POST, + endpoint, + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, + ) + + # act + with pytest.raises(APIException) as excinfo: + clusters_service.create( + hostname=CLUSTER_HOSTNAME, + cluster_type=CLUSTER_CLUSTER_TYPE, + image=CLUSTER_IMAGE, + description=CLUSTER_DESCRIPTION, + ssh_key_ids=[SSH_KEY_ID], + location=CLUSTER_LOCATION, + ) + + # assert + assert excinfo.value.code == INVALID_REQUEST + assert excinfo.value.message == INVALID_REQUEST_MESSAGE + assert responses.assert_call_count(endpoint, 1) is True + + def test_delete_cluster_successful(self, clusters_service, endpoint): + # arrange - add response mock + url = endpoint + responses.add(responses.PUT, url, status=202) + + # act + result = clusters_service.delete(CLUSTER_ID) + + # assert + assert result is None + assert responses.assert_call_count(url, 1) is True + + def test_delete_cluster_failed(self, clusters_service, endpoint): + # arrange - add response mock + responses.add( + responses.PUT, + endpoint, + json={'code': INVALID_REQUEST, 'message': INVALID_REQUEST_MESSAGE}, + status=400, + ) + + # act + with pytest.raises(APIException) as excinfo: + clusters_service.delete('invalid_id') + + # assert + assert excinfo.value.code == INVALID_REQUEST + assert excinfo.value.message == INVALID_REQUEST_MESSAGE + assert responses.assert_call_count(endpoint, 1) is True diff --git a/verda/_verda.py b/verda/_verda.py index b3d9922..a4b4cfc 100644 --- a/verda/_verda.py +++ b/verda/_verda.py @@ -1,6 +1,7 @@ from verda._version import __version__ from verda.authentication import AuthenticationService from verda.balance import BalanceService +from verda.clusters import ClustersService from verda.constants import Constants from verda.containers import ContainersService from verda.http_client import HTTPClient @@ -79,5 +80,8 @@ def __init__( self.containers: ContainersService = ContainersService(self._http_client, inference_key) """Containers service. Deploy, manage, and monitor container deployments""" + self.clusters: ClustersService = ClustersService(self._http_client) + """Clusters service. Create, manage, and scale compute clusters""" + __all__ = ['VerdaClient'] diff --git a/verda/clusters/__init__.py b/verda/clusters/__init__.py new file mode 100644 index 0000000..849bf71 --- /dev/null +++ b/verda/clusters/__init__.py @@ -0,0 +1,5 @@ +"""Clusters service for managing compute clusters.""" + +from verda.clusters._clusters import Cluster, ClustersService, ClusterWorkerNode + +__all__ = ['Cluster', 'ClusterWorkerNode', 'ClustersService'] diff --git a/verda/clusters/_clusters.py b/verda/clusters/_clusters.py new file mode 100644 index 0000000..8558db3 --- /dev/null +++ b/verda/clusters/_clusters.py @@ -0,0 +1,285 @@ +import itertools +import time +from dataclasses import dataclass + +from dataclasses_json import dataclass_json + +from verda.constants import Actions, ClusterStatus, ErrorCodes, Locations +from verda.exceptions import APIException + +CLUSTERS_ENDPOINT = '/clusters' + +# Default shared volume size is 30TB +DEFAULT_SHARED_VOLUME_SIZE = 30000 + + +@dataclass_json +@dataclass +class ClusterWorkerNode: + """Represents a worker node in a cluster. + + Attributes: + id: Unique identifier for the node. + status: Current status of the node. + hostname: Network hostname of the node. + private_ip: Private IP address of the node. + """ + + id: str + status: str + hostname: str + private_ip: str + + +@dataclass_json +@dataclass +class Cluster: + """Represents a compute cluster with multiple nodes. + + Attributes: + id: Unique identifier for the cluster. + hostname: Human-readable hostname of the cluster. + description: Description of the cluster. + status: Current operational status of the cluster. + created_at: Timestamp of cluster creation. + location: Datacenter location code (default: Locations.FIN_03). + cluster_type: Type of the cluster. + worker_nodes: List of nodes in the cluster. + ssh_key_ids: List of SSH key IDs associated with the cluster nodes. + image: Image ID or type used for cluster nodes. + startup_script_id: ID of the startup script to run on nodes. + public_ip: IP address of the jumphost. + """ + + id: str + hostname: str + description: str + status: str + created_at: str + location: str + cluster_type: str + worker_nodes: list[ClusterWorkerNode] + ssh_key_ids: list[str] + image: str | None = None + startup_script_id: str | None = None + ip: str | None = None + + +class ClustersService: + """Service for managing compute clusters through the API. + + This service provides methods to create, retrieve, and manage compute clusters. + """ + + def __init__(self, http_client) -> None: + """Initializes the ClustersService with an HTTP client. + + Args: + http_client: HTTP client for making API requests. + """ + self._http_client = http_client + + def get(self, status: str | None = None) -> list[Cluster]: + """Retrieves all clusters or clusters with specific status. + + Args: + status: Optional status filter for clusters. If None, returns all + non-deleted clusters. + + Returns: + List of cluster objects matching the criteria. + """ + clusters_dict = self._http_client.get(CLUSTERS_ENDPOINT, params={'status': status}).json() + return [ + Cluster.from_dict(cluster_dict, infer_missing=True) for cluster_dict in clusters_dict + ] + + def get_by_id(self, id: str) -> Cluster: + """Retrieves a specific cluster by its ID. + + Args: + id: Unique identifier of the cluster to retrieve. + + Returns: + Cluster object with the specified ID. + + Raises: + HTTPError: If the cluster is not found or other API error occurs. + """ + cluster_dict = self._http_client.get(CLUSTERS_ENDPOINT + f'/{id}').json() + return Cluster.from_dict(cluster_dict, infer_missing=True) + + def create( + self, + hostname: str, + cluster_type: str, + image: str, + description: str, + ssh_key_ids: list = [], + location: str = Locations.FIN_03, + startup_script_id: str | None = None, + shared_volume_name: str | None = None, + shared_volume_size: int | None = None, + *, + wait_for_status: str | None = ClusterStatus.PROVISIONING, + max_wait_time: float = 900, + initial_interval: float = 1.0, + max_interval: float = 10, + backoff_coefficient: float = 2.0, + ) -> Cluster: + """Creates and deploys a new compute cluster. + + Args: + hostname: Name for the cluster. + cluster_type: Cluster type. + image: Image type or ID for cluster nodes. + description: Human-readable description of the cluster. + ssh_key_ids: List of SSH key IDs to associate with cluster nodes. + location: Datacenter location code (default: Locations.FIN_03). + startup_script_id: Optional ID of startup script to run on nodes. + shared_volume_name: Optional name for the shared volume. + shared_volume_size: Optional size for the shared volume, in GB, default to 30TB. + wait_for_status: Status to wait for the cluster to reach, default to PROVISIONING. If None, no wait is performed. + max_wait_time: Maximum total wait for the cluster to start creating, in seconds (default: 900) + initial_interval: Initial interval, in seconds (default: 1.0) + max_interval: The longest single delay allowed between retries, in seconds (default: 10) + backoff_coefficient: Coefficient to calculate the next retry interval (default 2.0) + + Returns: + The newly created cluster object. + + Raises: + HTTPError: If cluster creation fails or other API error occurs. + TimeoutError: If cluster does not start creating within max_wait_time. + """ + payload = { + 'hostname': hostname, + 'cluster_type': cluster_type, + 'image': image, + 'description': description, + 'ssh_key_ids': ssh_key_ids, + 'contract': 'PAY_AS_YOU_GO', + 'location_code': location, + 'startup_script_id': startup_script_id, + 'shared_volume': { + 'name': shared_volume_name if shared_volume_name else hostname + '-shared-volume', + 'size': shared_volume_size if shared_volume_size else DEFAULT_SHARED_VOLUME_SIZE, + }, + } + response = self._http_client.post(CLUSTERS_ENDPOINT, json=payload).json() + id = response['id'] + + if not wait_for_status: + return self.get_by_id(id) + + # Wait for cluster to enter creating state with timeout + deadline = time.monotonic() + max_wait_time + for i in itertools.count(): + cluster = self.get_by_id(id) + if cluster.status == wait_for_status: + return cluster + + if cluster.status == ClusterStatus.ERROR: + raise APIException(ErrorCodes.SERVER_ERROR, f'Cluster {id} entered error state') + + if cluster.status == ClusterStatus.DISCONTINUED: + raise APIException(ErrorCodes.SERVER_ERROR, f'Cluster {id} was discontinued') + + now = time.monotonic() + if now >= deadline: + raise TimeoutError( + f'Cluster {id} did not enter creating state within {max_wait_time:.1f} seconds' + ) + + interval = min(initial_interval * backoff_coefficient**i, max_interval, deadline - now) + time.sleep(interval) + + def action(self, id_list: list[str] | str, action: str) -> None: + """Performs an action on one or more clusters. + + Args: + id_list: Single cluster ID or list of cluster IDs to act upon. + action: Action to perform on the clusters. Only `delete` is supported. + + Raises: + HTTPError: If the action fails or other API error occurs. + """ + if action != Actions.DELETE: + raise ValueError(f'Invalid action: {action}. Only DELETE is supported.') + + if type(id_list) is str: + payload = {'actions': [{'id': id_list, 'action': 'discontinue'}]} + else: + payload = {'actions': [{'id': id, 'action': action} for id in id_list]} + + self._http_client.put(CLUSTERS_ENDPOINT, json=payload) + return + + def delete(self, cluster_id: str) -> None: + """Deletes a cluster. + + Args: + cluster_id: ID of the cluster to delete. + """ + self.action(cluster_id, 'delete') + return + + def is_available( + self, + cluster_type: str, + location_code: str | None = None, + ) -> bool: + """Checks if a specific cluster type is available for deployment. + + Args: + cluster_type: Type of cluster to check availability for. + location_code: Optional datacenter location code. + + Returns: + True if the cluster type is available, False otherwise. + """ + query_params = {'location_code': location_code} + url = f'/cluster-availability/{cluster_type}' + response = self._http_client.get(url, query_params).text + return response == 'true' + + def get_availabilities(self, location_code: str | None = None) -> list[str]: + """Retrieves a list of available cluster types across locations. + + Args: + location_code: Optional datacenter location code to filter by. + + Returns: + List of available cluster types and their details. + """ + query_params = {'location_code': location_code} + response = self._http_client.get('/cluster-availability', params=query_params).json() + availabilities = response[0]['availabilities'] + return availabilities + + def get_availability(self, cluster_type: str, location_code: str | None = None) -> list[dict]: + """Checks if a specific cluster type is available for deployment. + + Args: + cluster_type: Type of cluster to check availability for. + location_code: Optional datacenter location code. + + Returns: + True if the cluster type is available, False otherwise. + """ + + def get_cluster_images( + self, + cluster_type: str | None = None, + ) -> list[str]: + """Retrieves a list of available images for a given cluster type (optional). + + Args: + cluster_type: Type of cluster to get images for. + + Returns: + List of available images for the given cluster type. + """ + query_params = {'instance_type': cluster_type} + images = self._http_client.get('/images/cluster', params=query_params).json() + return [image['image_type'] for image in images] diff --git a/verda/constants.py b/verda/constants.py index 70b789f..777e7a7 100644 --- a/verda/constants.py +++ b/verda/constants.py @@ -56,6 +56,19 @@ def __init__(self): return +class ClusterStatus: + """Cluster status.""" + + ORDERED = 'ordered' + PROVISIONING = 'provisioning' + RUNNING = 'running' + DISCONTINUED = 'discontinued' + ERROR = 'error' + + def __init__(self): + return + + class VolumeTypes: """Storage volume types.""" @@ -110,6 +123,9 @@ def __init__(self, base_url, version): self.volume_status: VolumeStatus = VolumeStatus() """Possible volume statuses""" + self.cluster_status: ClusterStatus = ClusterStatus() + """Possible cluster statuses""" + self.volume_types: VolumeTypes = VolumeTypes() """Available volume types"""