# Docs for v2 can be found by changing the above selector ^
from together import Together
import os
client = Together(
api_key=os.environ.get("TOGETHER_API_KEY"),
)
fine_tune = client.fine_tuning.retrieve(id="ft-id")
print(fine_tune){
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"status": "pending",
"training_file": "<string>",
"validation_file": "<string>",
"model": "<string>",
"model_output_name": "<string>",
"model_output_path": "<string>",
"trainingfile_numlines": 123,
"trainingfile_size": 123,
"created_at": "<string>",
"updated_at": "<string>",
"n_epochs": 123,
"n_checkpoints": 123,
"n_evals": 123,
"batch_size": "max",
"learning_rate": 123,
"lr_scheduler": {
"lr_scheduler_type": "linear",
"lr_scheduler_args": {
"min_lr_ratio": 0
}
},
"warmup_ratio": 123,
"max_grad_norm": 123,
"weight_decay": 123,
"eval_steps": 123,
"train_on_inputs": "auto",
"training_method": {
"method": "sft",
"train_on_inputs": "auto"
},
"training_type": {
"type": "Full"
},
"job_id": "<string>",
"events": [
{
"object": "fine-tune-event",
"created_at": "<string>",
"message": "<string>",
"type": "job_pending",
"param_count": 123,
"token_count": 123,
"total_steps": 123,
"wandb_url": "<string>",
"step": 123,
"checkpoint_path": "<string>",
"model_path": "<string>",
"training_offset": 123,
"hash": "<string>",
"level": null
}
],
"token_count": 123,
"param_count": 123,
"total_price": 123,
"epochs_completed": 123,
"queue_depth": 123,
"wandb_project_name": "<string>",
"wandb_url": "<string>",
"from_checkpoint": "<string>",
"from_hf_model": "<string>",
"hf_model_revision": "<string>"
}List the metadata for a single fine-tuning job.
# Docs for v2 can be found by changing the above selector ^
from together import Together
import os
client = Together(
api_key=os.environ.get("TOGETHER_API_KEY"),
)
fine_tune = client.fine_tuning.retrieve(id="ft-id")
print(fine_tune){
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"status": "pending",
"training_file": "<string>",
"validation_file": "<string>",
"model": "<string>",
"model_output_name": "<string>",
"model_output_path": "<string>",
"trainingfile_numlines": 123,
"trainingfile_size": 123,
"created_at": "<string>",
"updated_at": "<string>",
"n_epochs": 123,
"n_checkpoints": 123,
"n_evals": 123,
"batch_size": "max",
"learning_rate": 123,
"lr_scheduler": {
"lr_scheduler_type": "linear",
"lr_scheduler_args": {
"min_lr_ratio": 0
}
},
"warmup_ratio": 123,
"max_grad_norm": 123,
"weight_decay": 123,
"eval_steps": 123,
"train_on_inputs": "auto",
"training_method": {
"method": "sft",
"train_on_inputs": "auto"
},
"training_type": {
"type": "Full"
},
"job_id": "<string>",
"events": [
{
"object": "fine-tune-event",
"created_at": "<string>",
"message": "<string>",
"type": "job_pending",
"param_count": 123,
"token_count": 123,
"total_steps": 123,
"wandb_url": "<string>",
"step": 123,
"checkpoint_path": "<string>",
"model_path": "<string>",
"training_offset": 123,
"hash": "<string>",
"level": null
}
],
"token_count": 123,
"param_count": 123,
"total_price": 123,
"epochs_completed": 123,
"queue_depth": 123,
"wandb_project_name": "<string>",
"wandb_url": "<string>",
"from_checkpoint": "<string>",
"from_hf_model": "<string>",
"hf_model_revision": "<string>"
}Bearer authentication header of the form Bearer <token>, where <token> is your auth token.
Fine-tune job details retrieved successfully
pending, queued, running, compressing, uploading, cancel_requested, cancelled, error, completed Show child attributes
Show child attributes
fine-tune-event job_pending, job_start, job_stopped, model_downloading, model_download_complete, training_data_downloading, training_data_download_complete, validation_data_downloading, validation_data_download_complete, wandb_init, training_start, checkpoint_save, billing_limit, epoch_complete, training_complete, model_compressing, model_compression_complete, model_uploading, model_upload_complete, job_complete, job_error, cancel_requested, job_restarted, refund, warning , info, warning, error, legacy_info, legacy_iwarning, legacy_ierror Was this page helpful?