# Docs for v1 can be found by changing the above selector ^
from together import Together
import os
client = Together(
api_key=os.environ.get("TOGETHER_API_KEY"),
)
response = client.evals.create(
type="classify",
parameters=ParametersEvaluationClassifyParameters(
judge=ParametersEvaluationClassifyParametersJudge(
model="meta-llama/Llama-3.1-70B-Instruct-Turbo",
model_source="serverless",
system_template="You are an expert evaluator...",
),
input_data_file_path="file-abc123",
labels=["good", "bad"],
pass_labels=["good"],
model_to_evaluate="meta-llama/Llama-3.1-8B-Instruct-Turbo"
)
)
print(response.workflow_id){
"workflow_id": "eval-1234-1244513",
"status": "pending"
}# Docs for v1 can be found by changing the above selector ^
from together import Together
import os
client = Together(
api_key=os.environ.get("TOGETHER_API_KEY"),
)
response = client.evals.create(
type="classify",
parameters=ParametersEvaluationClassifyParameters(
judge=ParametersEvaluationClassifyParametersJudge(
model="meta-llama/Llama-3.1-70B-Instruct-Turbo",
model_source="serverless",
system_template="You are an expert evaluator...",
),
input_data_file_path="file-abc123",
labels=["good", "bad"],
pass_labels=["good"],
model_to_evaluate="meta-llama/Llama-3.1-8B-Instruct-Turbo"
)
)
print(response.workflow_id){
"workflow_id": "eval-1234-1244513",
"status": "pending"
}Bearer authentication header of the form Bearer <token>, where <token> is your auth token.
Was this page helpful?