# Docs for v2 can be found by changing the above selector ^
from together import Together
import os
client = Together(
api_key=os.environ.get("TOGETHER_API_KEY"),
)
jobs = client.evaluation.list()
for job in jobs:
print(job.workflow_id)[
{
"workflow_id": "eval-1234aedf",
"type": "classify",
"owner_id": "<string>",
"status": "completed",
"status_updates": [
{
"status": "pending",
"message": "Job is pending evaluation",
"timestamp": "2025-07-23T17:10:04.837888Z"
}
],
"parameters": {},
"created_at": "2025-07-23T17:10:04.837888Z",
"updated_at": "2025-07-23T17:10:04.837888Z",
"results": {
"generation_fail_count": 0,
"judge_fail_count": 0,
"invalid_label_count": 0,
"result_file_id": "file-1234-aefd",
"pass_percentage": 10,
"label_counts": "{\"yes\": 10, \"no\": 0}"
}
}
]# Docs for v2 can be found by changing the above selector ^
from together import Together
import os
client = Together(
api_key=os.environ.get("TOGETHER_API_KEY"),
)
jobs = client.evaluation.list()
for job in jobs:
print(job.workflow_id)[
{
"workflow_id": "eval-1234aedf",
"type": "classify",
"owner_id": "<string>",
"status": "completed",
"status_updates": [
{
"status": "pending",
"message": "Job is pending evaluation",
"timestamp": "2025-07-23T17:10:04.837888Z"
}
],
"parameters": {},
"created_at": "2025-07-23T17:10:04.837888Z",
"updated_at": "2025-07-23T17:10:04.837888Z",
"results": {
"generation_fail_count": 0,
"judge_fail_count": 0,
"invalid_label_count": 0,
"result_file_id": "file-1234-aefd",
"pass_percentage": 10,
"label_counts": "{\"yes\": 10, \"no\": 0}"
}
}
]Bearer authentication header of the form Bearer <token>, where <token> is your auth token.
Admin users can specify a user ID to filter jobs. Pass empty string to get all jobs.
evaluation jobs retrieved successfully
The evaluation job ID
"eval-1234aedf"
The type of evaluation
classify, score, compare "classify"
ID of the job owner (admin only)
Current status of the job
pending, queued, running, completed, error, user_error "completed"
History of status updates (admin only)
The parameters used for this evaluation
When the job was created
"2025-07-23T17:10:04.837888Z"
When the job was last updated
"2025-07-23T17:10:04.837888Z"
Results of the evaluation (when completed)
Show child attributes
Number of failed generations.
0
Number of failed judge generations
0
Number of invalid labels
0
Data File ID
"file-1234-aefd"
Pecentage of pass labels.
10
JSON string representing label counts
"{\"yes\": 10, \"no\": 0}"
Was this page helpful?