Python SDK
Use the official OpenAI Python SDK with AltLLM for seamless integration.
Installation
pip install openaiQuick Start
from openai import OpenAI
client = OpenAI(
base_url="https://altllm-api.viber.autonome.fun/v1",
api_key="YOUR_API_KEY"
)
response = client.chat.completions.create(
model="altllm-standard",
messages=[
{"role": "user", "content": "What's the price of Bitcoin?"}
]
)
print(response.choices[0].message.content)Environment Variables
Use environment variables for secure configuration:
# .env file
OPENAI_API_KEY=your_altllm_api_key
OPENAI_BASE_URL=https://altllm-api.viber.autonome.fun/v1import os
from openai import OpenAI
# Client automatically reads from environment
client = OpenAI()
response = client.chat.completions.create(
model="altllm-standard",
messages=[{"role": "user", "content": "Hello!"}]
)Streaming Responses
from openai import OpenAI
client = OpenAI(
base_url="https://altllm-api.viber.autonome.fun/v1",
api_key="YOUR_API_KEY"
)
stream = client.chat.completions.create(
model="altllm-standard",
messages=[{"role": "user", "content": "Tell me about Ethereum"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)Async Client
import asyncio
from openai import AsyncOpenAI
client = AsyncOpenAI(
base_url="https://altllm-api.viber.autonome.fun/v1",
api_key="YOUR_API_KEY"
)
async def main():
response = await client.chat.completions.create(
model="altllm-standard",
messages=[{"role": "user", "content": "Hello async!"}]
)
print(response.choices[0].message.content)
# Async streaming
stream = await client.chat.completions.create(
model="altllm-standard",
messages=[{"role": "user", "content": "Stream this"}],
stream=True
)
async for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
asyncio.run(main())System Messages
response = client.chat.completions.create(
model="altllm-pro",
messages=[
{
"role": "system",
"content": "You are a crypto analyst. Be concise and data-driven."
},
{
"role": "user",
"content": "Analyze Bitcoin's current market position"
}
],
temperature=0.7,
max_tokens=1000
)
print(response.choices[0].message.content)Custom Tool Calling
Define custom tools alongside the built-in crypto tools:
import json
tools = [
{
"type": "function",
"function": {
"name": "get_portfolio",
"description": "Get user's crypto portfolio balance",
"parameters": {
"type": "object",
"properties": {
"wallet_address": {
"type": "string",
"description": "Ethereum wallet address"
}
},
"required": ["wallet_address"]
}
}
}
]
response = client.chat.completions.create(
model="altllm-pro",
messages=[
{"role": "user", "content": "Show my portfolio for 0x742d35Cc6634C0532925a3b844Bc9e7595f5eE"}
],
tools=tools,
tool_choice="auto"
)
# Handle tool calls
message = response.choices[0].message
if message.tool_calls:
for tool_call in message.tool_calls:
print(f"Function: {tool_call.function.name}")
print(f"Arguments: {tool_call.function.arguments}")
# Execute your function
args = json.loads(tool_call.function.arguments)
result = get_portfolio(args["wallet_address"]) # Your implementation
# Send result back
follow_up = client.chat.completions.create(
model="altllm-pro",
messages=[
{"role": "user", "content": "Show my portfolio..."},
message,
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result)
}
]
)
print(follow_up.choices[0].message.content)Multi-turn Conversation
messages = [
{"role": "system", "content": "You are a helpful crypto assistant."}
]
def chat(user_message):
messages.append({"role": "user", "content": user_message})
response = client.chat.completions.create(
model="altllm-standard",
messages=messages
)
assistant_message = response.choices[0].message.content
messages.append({"role": "assistant", "content": assistant_message})
return assistant_message
# Multi-turn conversation
print(chat("What's the price of ETH?"))
print(chat("How about BTC?"))
print(chat("Compare their 24h performance"))Usage Tracking
response = client.chat.completions.create(
model="altllm-standard",
messages=[{"role": "user", "content": "Hello!"}]
)
# Track usage
usage = response.usage
print(f"Prompt tokens: {usage.prompt_tokens}")
print(f"Completion tokens: {usage.completion_tokens}")
print(f"Total tokens: {usage.total_tokens}")
# Estimate cost (altllm-standard: $0.60/$2.40 per 1M)
input_cost = usage.prompt_tokens * 0.0000006
output_cost = usage.completion_tokens * 0.0000024
total_cost = input_cost + output_cost
print(f"Estimated cost: $" + f"{total_cost:.6f}")Timeout & Retries
from openai import OpenAI
client = OpenAI(
base_url="https://altllm-api.viber.autonome.fun/v1",
api_key="YOUR_API_KEY",
timeout=60.0, # 60 second timeout
max_retries=3 # Retry up to 3 times
)
# Per-request timeout
response = client.with_options(timeout=120.0).chat.completions.create(
model="altllm-max",
messages=[{"role": "user", "content": "Complex analysis..."}]
)