Agentic LLM
Build applications with ASI:One's agentic models that can autonomously call agents from the Agentverse marketplace and handle complex workflows. These models can discover, coordinate, and execute tasks through a network of specialized agents available on Agentverse.
Overview
ASI:One's agentic model (asi1) is designed to automatically discover and coordinate with agents from the Agentverse marketplace to accomplish complex tasks. It handles agent selection, orchestration, and execution planning autonomously by connecting to the vast ecosystem of agents available on Agentverse.
Key Features:
- Autonomous Agent Discovery: Automatically finds relevant agents from Agentverse marketplace for your tasks
- Session Persistence: Maintains conversation context across multiple interactions
- Asynchronous Processing: Handles long-running agent workflows from Agentverse
- Streaming Support: Real-time response streaming for better UX
Quick Start
- Python
- cURL
- JavaScript
import os
import uuid
import json
import requests
import sys
import time
API_KEY = os.getenv("ASI_ONE_API_KEY") or "sk-REPLACE_ME"
ENDPOINT = "https://api.asi1.ai/v1/chat/completions"
MODEL = "asi1"
TIMEOUT = 90 # seconds
# In-memory session management
SESSION_MAP: dict[str, str] = {}
def get_session_id(conv_id: str) -> str:
"""Return existing session UUID for this conversation or create a new one."""
sid = SESSION_MAP.get(conv_id)
if sid is None:
sid = str(uuid.uuid4())
SESSION_MAP[conv_id] = sid
return sid
def ask(conv_id: str, messages: list[dict], *, stream: bool = False) -> str:
"""Send the messages list to the ASI:One agent and return the assistant reply."""
session_id = get_session_id(conv_id)
print(f"[session] Using session-id: {session_id}")
headers = {
"Authorization": f"Bearer {API_KEY}",
"x-session-id": session_id,
"Content-Type": "application/json",
}
payload = {
"model": MODEL,
"messages": messages,
"stream": stream,
}
if not stream:
resp = requests.post(ENDPOINT, headers=headers, json=payload, timeout=TIMEOUT)
resp.raise_for_status()
return resp.json()["choices"][0]["message"]["content"]
# Streaming implementation
with requests.post(ENDPOINT, headers=headers, json=payload, timeout=TIMEOUT, stream=True) as resp:
resp.raise_for_status()
full_text = ""
for line in resp.iter_lines(decode_unicode=True):
if not line or not line.startswith("data: "):
continue
line = line[len("data: ") :]
if line == "[DONE]":
break
try:
chunk = json.loads(line)
choices = chunk.get("choices")
if choices and "content" in choices[0].get("delta", {}):
token = choices[0]["delta"]["content"]
sys.stdout.write(token)
sys.stdout.flush()
full_text += token
except json.JSONDecodeError:
continue
print()
return full_text
if __name__ == "__main__": # Simple usage example
conv_id = str(uuid.uuid4())
messages = [
{"role": "user", "content": "use Hi-dream model to generate image of monkey sitting on top of mountain"}
]
reply = ask(conv_id, messages, stream=True)
print(f"\nAssistant: {reply}")
# Generate a session ID
SESSION_ID=$(uuidgen)
curl -X POST https://api.asi1.ai/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $ASI_ONE_API_KEY" \
-H "x-session-id: $SESSION_ID" \
-d '{
"model": "asi1",
"messages": [
{"role": "user", "content": "use Hi-dream model to generate image of monkey sitting on top of mountain"}
],
"stream": false
}'
import { v4 as uuidv4 } from 'uuid';
const API_KEY = process.env.ASI_ONE_API_KEY;
const ENDPOINT = "https://api.asi1.ai/v1/chat/completions";
const MODEL = "asi1";
// Session management
const sessionMap = new Map();
function getSessionId(convId) {
let sessionId = sessionMap.get(convId);
if (!sessionId) {
sessionId = uuidv4();
sessionMap.set(convId, sessionId);
}
return sessionId;
}
async function ask(convId, messages, stream = false) {
const sessionId = getSessionId(convId);
console.log(`[session] Using session-id: ${sessionId}`);
const response = await fetch(ENDPOINT, {
method: 'POST',
headers: {
'Authorization': `Bearer ${API_KEY}`,
'x-session-id': sessionId,
'Content-Type': 'application/json',
},
body: JSON.stringify({ model: MODEL, messages, stream }),
});
if (!stream) {
const result = await response.json();
return result.choices[0].message.content;
}
// Handle streaming response
const reader = response.body.getReader();
const decoder = new TextDecoder();
let fullText = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n').filter(line => line.trim());
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6);
if (data === '[DONE]') return fullText;
try {
const parsed = JSON.parse(data);
const content = parsed.choices?.[0]?.delta?.content;
if (content) {
process.stdout.write(content);
fullText += content;
}
} catch (e) {
// ignore malformed events
}
}
}
}
console.log();
return fullText;
}
// Usage example - agent will be called from Agentverse marketplace
const convId = uuidv4();
const messages = [{ role: 'user', content: 'use Hi-dream model to generate image of monkey sitting on top of mountain' }];
const reply = await ask(convId, messages, true);
console.log(`Assistant: ${reply}`);
Example Output
[session] Using session-id: d92b1ff5-3be0-484d-afe4-04edc5239a1c
I'll generate an image of a monkey sitting on top of a mountain for you using the Hi-dream model.
Image generated. 
The exact wording and session-ID will vary, but you should always receive a direct image link once generation completes.
Session Management
Agentic models require session persistence to maintain context across agent interactions with the Agentverse marketplace. Always include the x-session-id header:
import uuid
# Create or retrieve session ID for conversation
def get_session_id(conversation_id: str) -> str:
# In production, store this in Redis or database
session_id = SESSION_MAP.get(conversation_id)
if not session_id:
session_id = str(uuid.uuid4())
SESSION_MAP[conversation_id] = session_id
return session_id
# Include in every request
headers = {
"Authorization": f"Bearer {API_KEY}",
"x-session-id": get_session_id("user_123_chat"),
"Content-Type": "application/json"
}
Asynchronous Agent Processing
When agents from Agentverse marketplace need time to complete tasks, the model may send a deferred response. Poll for updates:
import uuid
import time
# Create or retrieve session ID for conversation
def get_session_id(conversation_id: str) -> str:
# In production, store this in Redis or database
session_id = SESSION_MAP.get(conversation_id)
if not session_id:
session_id = str(uuid.uuid4())
SESSION_MAP[conversation_id] = session_id
return session_id
# Include in every request
headers = {
"Authorization": f"Bearer {API_KEY}",
"x-session-id": get_session_id("user_123_chat"),
"Content-Type": "application/json"
}
def poll_for_async_reply(
conv_id: str,
history: list[dict],
*,
wait_sec: int = 5, # poll every 5 seconds
max_attempts: int = 24, # ~2 minutes total
) -> str | None:
"""Ask ASI:One 'Any update?' until reply text actually changes."""
for attempt in range(max_attempts):
time.sleep(wait_sec)
print(f"🔄 polling (attempt {attempt + 1}) …", flush=True)
update_prompt = {"role": "user", "content": "Any update?"}
latest = ask(conv_id, history + [update_prompt], stream=False)
if latest and latest.strip() != history[-1]["content"].strip():
return latest
return None
# Usage example after receiving the initial deferred reply
assistant_reply = ask(conv_id, messages, stream=False)
history = messages + [{"role": "assistant", "content": assistant_reply}]
if assistant_reply.strip() == "I've sent the message":
follow_up = poll_for_async_reply(conv_id, history)
if follow_up:
print(f"Agentverse agent completed task: {follow_up}")
history.append({"role": "assistant", "content": follow_up})
Best Practices
Session Management
- Use UUIDs for session IDs to avoid collisions
- Store session mappings in Redis or database for production
- Include x-session-id header in every request to maintain context
Error Handling
- Implement timeouts for long-running agent tasks
- Handle network failures with exponential backoff
- Validate responses before processing agent results
Performance Optimization
- Use streaming for better user experience
- Implement async polling for deferred agent responses
Agent Coordination
- Be specific in requests to help agent discovery from Agentverse marketplace
- Allow time for complex multi-agent workflows involving Agentverse agents
- Monitor session state to understand Agentverse agent progress