@autonia/providers
v0.1.25
Published
Providers for AgentGraph
Readme
Section:1 AGX Data format
The following are the data structure of how responses will be stored by AGX Agent Framework
Text Responses
{
"messages": [
{
"msg_id": "550e8400-e29b-41d4-a716-446655440000",
"role": "assistant",
"content": [{ "type": "text", "text": "Hello! How can I help you today?" }]
}
],
"usage": {
"input_tokens": 492,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0,
"output_tokens": 52,
"reasoning_output_tokens": 0,
"service_tier": "standard"
},
"request_params": {
"model": "gpt-4o",
"temperature": 0.7,
"max_output_tokens": 150
},
"response_object": {
"id": "resp_67ccd2bed1ec8190b14f964abc0542670bb6a6b452d3795b",
"object": "response",
"created_at": 1741476542,
"status": "completed"
}
}JSON format / Structured Responses
{
"messages": [
{
"msg_id": "550e8400-e29b-41d4-a716-446655440000",
"role": "assistant",
"content": [
{
"type": "json",
"text": "{\"joke\":\"Why don't scientists trust atoms? Because they make up everything!\"}"
}
]
}
],
"usage": {
"input_tokens": 78,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0,
"output_tokens": 23,
"reasoning_output_tokens": 0,
"service_tier": "standard"
},
"request_params": {
"model": "gpt-4o",
"text": {
"format": {
"type": "json_schema",
"name": "schema",
"strict": true,
"schema": {
"type": "object",
"properties": {
"joke": { "type": "string" }
},
"required": ["joke"],
"additionalProperties": false
}
}
}
}
}Image Responses
{
"messages": [
{
"msg_id": "550e8400-e29b-41d4-a716-446655440000",
"role": "assistant",
"content": [
{
"type": "image",
"image_url": "data:image/jpeg;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg=="
}
]
}
],
"usage": {
"input_tokens": 120,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0,
"output_tokens": 0,
"reasoning_output_tokens": 0,
"service_tier": "standard"
}
}Vision Responses (Image Analysis)
{
"messages": [
{
"msg_id": "550e8400-e29b-41d4-a716-446655440000",
"role": "assistant",
"content": [
{
"type": "text",
"text": "This image shows a fast food receipt from a restaurant. The receipt includes items like tea and pizza with a total of $483.00."
}
]
}
],
"usage": {
"input_tokens": 437,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0,
"output_tokens": 150,
"reasoning_output_tokens": 0,
"service_tier": "standard"
}
}Audio Transcription Responses
{
"role": "user",
"content": [
{
"type": "text",
"text": "Hello, this is the transcribed content from the audio file."
}
]
}Tool Call Responses
{
"messages": [
{
"msg_id": "550e8400-e29b-41d4-a716-446655440000",
"role": "assistant",
"content": [
{
"type": "tool_call",
"name": "route",
"call_id": "call_Wsc6HYmzz7tBq0UAWojI39cR",
"arguments": "{\"next\":[\"receipt_uploader\"],\"instructions\":\"Process the uploaded receipt\",\"message_to_human\":\"Processing your receipt...\"}"
}
]
}
],
"usage": {
"input_tokens": 492,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0,
"output_tokens": 52,
"reasoning_output_tokens": 0,
"service_tier": "standard"
}
}Streaming Response Format
Streaming responses include additional metadata for real-time processing:
{
"eventName": "stream.text.delta",
"indexes": {
"outputIndex": 0,
"contentIndex": 0
},
"response": {
"messages": [
{
"msg_id": "550e8400-e29b-41d4-a716-446655440000",
"role": "assistant",
"content": [{ "type": "text", "text": "Hello" }]
}
]
}
}Section 2: AGX Request Parameters to Providers (OpenAI, Claude)
These are parameters that are provided as input to the Provider Class (OpenAI, Claude), which should be converted to request format accepted by the provider.
Text Completion Request
{
"type": "text",
"messages": [
{
"role": "user",
"content": [{ "type": "text", "text": "Hello, how are you?" }]
},
{
"role": "assistant",
"content": [{ "type": "text", "text": "I'm doing well, thank you!" }]
}
],
"model": "gpt-4o",
"temperature": 0.7,
"maxTokens": 150,
"stream": false
}Vision Request (Image Analysis)
{
"type": "vision",
"messages": [
{
"role": "user",
"content": [{ "type": "text", "text": "What do you see in this image?" }]
}
],
"files": [
{
"url": "https://example.com/image.jpg",
"base64": "iVBORw0KGgoAAAANSUhEUgAAAAEAAAAB...",
"originalname": "receipt.jpg",
"mimetype": "image/jpeg"
}
],
"model": "gpt-4o"
}Structured Output Request
{
"type": "json",
"schema": {
"type": "object",
"properties": {
"joke": { "type": "string" },
"category": { "type": "string" }
},
"required": ["joke", "category"],
"additionalProperties": false
},
"messages": [
{
"role": "user",
"content": [{ "type": "text", "text": "Tell me a programming joke" }]
}
],
"model": "gpt-4o"
}Tool/Function Calling Request
{
"type": "text",
"messages": [
{
"role": "user",
"content": [{ "type": "text", "text": "Route this expense receipt to the appropriate processor" }]
}
],
"tools": [
{
"type": "function",
"name": "route",
"description": "Route tasks to appropriate agents",
"parameters": {
"type": "object",
"properties": {
"next": {
"type": "array",
"items": { "type": "string" },
"description": "Next agents to process"
},
"instructions": {
"type": "string",
"description": "Instructions for the agents"
}
},
"required": ["next", "instructions"]
}
}
],
"model": "gpt-4o"
}Embedding Request
{
"input": "Text to generate embeddings for",
"model": "text-embedding-3-small",
"encoding_format": "float"
}Audio Transcription Request
{
"file": {
"buffer": "<audio_buffer>",
"originalname": "recording.mp3",
"mimetype": "audio/mpeg"
},
"model": "whisper-1",
"language": "en",
"response_format": "json",
"temperature": 0
}Streaming Request
{
"type": "text",
"messages": [
{
"role": "user",
"content": [{ "type": "text", "text": "Write a short story" }]
}
],
"stream": true,
"streamCallback": "function(chunk) { console.log(chunk); }",
"model": "gpt-4o"
}