from together import Together
client = Together()
cluster = client.beta.clusters.update("cluster_id", cluster_type="KUBERNETES", num_gpus=24)
print(cluster)
{
"cluster_id": "<string>",
"cluster_type": "KUBERNETES",
"region": "<string>",
"gpu_type": "H100_SXM",
"cluster_name": "<string>",
"volumes": [
{
"volume_id": "<string>",
"volume_name": "<string>",
"size_tib": 123,
"status": "<string>"
}
],
"status": "WaitingForControlPlaneNodes",
"control_plane_nodes": [
{
"node_id": "<string>",
"node_name": "<string>",
"status": "<string>",
"host_name": "<string>",
"num_cpu_cores": 123,
"memory_gib": 123,
"network": "<string>"
}
],
"gpu_worker_nodes": [
{
"node_id": "<string>",
"node_name": "<string>",
"status": "<string>",
"host_name": "<string>",
"num_cpu_cores": 123,
"num_gpus": 123,
"memory_gib": 123,
"networks": [
"<string>"
],
"instance_id": "<string>"
}
],
"kube_config": "<string>",
"num_gpus": 123,
"cuda_version": "<string>",
"nvidia_driver_version": "<string>",
"duration_hours": 123,
"slurm_shm_size_gib": 123,
"capacity_pool_id": "<string>",
"reservation_start_time": "2023-11-07T05:31:56Z",
"reservation_end_time": "2023-11-07T05:31:56Z",
"install_traefik": true,
"created_at": "2023-11-07T05:31:56Z"
}Update the configuration of an existing GPU cluster.
from together import Together
client = Together()
cluster = client.beta.clusters.update("cluster_id", cluster_type="KUBERNETES", num_gpus=24)
print(cluster)
{
"cluster_id": "<string>",
"cluster_type": "KUBERNETES",
"region": "<string>",
"gpu_type": "H100_SXM",
"cluster_name": "<string>",
"volumes": [
{
"volume_id": "<string>",
"volume_name": "<string>",
"size_tib": 123,
"status": "<string>"
}
],
"status": "WaitingForControlPlaneNodes",
"control_plane_nodes": [
{
"node_id": "<string>",
"node_name": "<string>",
"status": "<string>",
"host_name": "<string>",
"num_cpu_cores": 123,
"memory_gib": 123,
"network": "<string>"
}
],
"gpu_worker_nodes": [
{
"node_id": "<string>",
"node_name": "<string>",
"status": "<string>",
"host_name": "<string>",
"num_cpu_cores": 123,
"num_gpus": 123,
"memory_gib": 123,
"networks": [
"<string>"
],
"instance_id": "<string>"
}
],
"kube_config": "<string>",
"num_gpus": 123,
"cuda_version": "<string>",
"nvidia_driver_version": "<string>",
"duration_hours": 123,
"slurm_shm_size_gib": 123,
"capacity_pool_id": "<string>",
"reservation_start_time": "2023-11-07T05:31:56Z",
"reservation_end_time": "2023-11-07T05:31:56Z",
"install_traefik": true,
"created_at": "2023-11-07T05:31:56Z"
}Bearer authentication header of the form Bearer <token>, where <token> is your auth token.
The ID of the cluster to update
Type of cluster to update.
KUBERNETES, SLURM Number of GPUs to allocate in the cluster. This must be multiple of 8. For example, 8, 16 or 24
Timestamp at which the cluster should be decommissioned. Only accepted for prepaid clusters.
OK
Type of cluster.
KUBERNETES, SLURM H100_SXM, H200_SXM, RTX_6000_PCI, L40_PCIE, B200_SXM, H100_SXM_INF Show child attributes
Current status of the GPU cluster.
WaitingForControlPlaneNodes, WaitingForDataPlaneNodes, WaitingForSubnet, WaitingForSharedVolume, InstallingDrivers, RunningAcceptanceTests, Paused, OnDemandComputePaused, Ready, Degraded, Deleting Show child attributes
Show child attributes
Was this page helpful?