TypeScript SDK
Use the official OpenAI TypeScript SDK with AltLLM for type-safe integration.
Installation
npm install openaiyarn add openaipnpm add openaiQuick Start
import OpenAI from 'openai';
const client = new OpenAI({
baseURL: 'https://altllm-api.viber.autonome.fun/v1',
apiKey: 'YOUR_API_KEY'
});
async function main() {
const response = await client.chat.completions.create({
model: 'altllm-standard',
messages: [
{ role: 'user', content: "What's the price of Bitcoin?" }
]
});
console.log(response.choices[0].message.content);
}
main();Environment Variables
Use environment variables for secure configuration:
# .env file
OPENAI_API_KEY=your_altllm_api_key
OPENAI_BASE_URL=https://altllm-api.viber.autonome.fun/v1import OpenAI from 'openai';
// Client automatically reads from environment
const client = new OpenAI();
const response = await client.chat.completions.create({
model: 'altllm-standard',
messages: [{ role: 'user', content: 'Hello!' }]
});Streaming Responses
import OpenAI from 'openai';
const client = new OpenAI({
baseURL: 'https://altllm-api.viber.autonome.fun/v1',
apiKey: 'YOUR_API_KEY'
});
async function streamChat() {
const stream = await client.chat.completions.create({
model: 'altllm-standard',
messages: [{ role: 'user', content: 'Tell me about Ethereum' }],
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
}
}
streamChat();React Hook Example
import { useState, useCallback } from 'react';
import OpenAI from 'openai';
const client = new OpenAI({
baseURL: 'https://altllm-api.viber.autonome.fun/v1',
apiKey: process.env.NEXT_PUBLIC_ALTLLM_API_KEY,
dangerouslyAllowBrowser: true // Only for demos
});
function useAltLLM() {
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<Error | null>(null);
const [streamContent, setStreamContent] = useState('');
const chat = useCallback(async (message: string) => {
setIsLoading(true);
setError(null);
try {
const response = await client.chat.completions.create({
model: 'altllm-standard',
messages: [{ role: 'user', content: message }]
});
return response.choices[0].message.content;
} catch (err) {
setError(err as Error);
throw err;
} finally {
setIsLoading(false);
}
}, []);
const chatStream = useCallback(async (message: string) => {
setIsLoading(true);
setError(null);
setStreamContent('');
try {
const stream = await client.chat.completions.create({
model: 'altllm-standard',
messages: [{ role: 'user', content: message }],
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
setStreamContent(prev => prev + content);
}
}
} catch (err) {
setError(err as Error);
throw err;
} finally {
setIsLoading(false);
}
}, []);
return { chat, chatStream, isLoading, error, streamContent };
}
// Usage in component
function ChatComponent() {
const { chat, chatStream, isLoading, streamContent } = useAltLLM();
return (
<div>
<button onClick={() => chatStream('What is DeFi?')}>
Ask about DeFi
</button>
{isLoading && <p>Loading...</p>}
<p>{streamContent}</p>
</div>
);
}System Messages
const response = await client.chat.completions.create({
model: 'altllm-pro',
messages: [
{
role: 'system',
content: 'You are a crypto analyst. Be concise and data-driven.'
},
{
role: 'user',
content: "Analyze Bitcoin's current market position"
}
],
temperature: 0.7,
max_tokens: 1000
});
console.log(response.choices[0].message.content);Custom Tool Calling
import OpenAI from 'openai';
const client = new OpenAI({
baseURL: 'https://altllm-api.viber.autonome.fun/v1',
apiKey: 'YOUR_API_KEY'
});
const tools: OpenAI.Chat.ChatCompletionTool[] = [
{
type: 'function',
function: {
name: 'get_portfolio',
description: "Get user's crypto portfolio balance",
parameters: {
type: 'object',
properties: {
wallet_address: {
type: 'string',
description: 'Ethereum wallet address'
}
},
required: ['wallet_address']
}
}
}
];
async function main() {
const response = await client.chat.completions.create({
model: 'altllm-pro',
messages: [
{ role: 'user', content: 'Show my portfolio for 0x742d35Cc...' }
],
tools,
tool_choice: 'auto'
});
const message = response.choices[0].message;
if (message.tool_calls) {
for (const toolCall of message.tool_calls) {
console.log(`Function: ${toolCall.function.name}`);
console.log(`Arguments: ${toolCall.function.arguments}`);
// Execute your function
const args = JSON.parse(toolCall.function.arguments);
const result = await getPortfolio(args.wallet_address);
// Send result back
const followUp = await client.chat.completions.create({
model: 'altllm-pro',
messages: [
{ role: 'user', content: 'Show my portfolio...' },
message,
{
role: 'tool',
tool_call_id: toolCall.id,
content: JSON.stringify(result)
}
]
});
console.log(followUp.choices[0].message.content);
}
}
}
main();Edge Runtime (Next.js)
// app/api/chat/route.ts
import OpenAI from 'openai';
import { NextResponse } from 'next/server';
export const runtime = 'edge';
const client = new OpenAI({
baseURL: 'https://altllm-api.viber.autonome.fun/v1',
apiKey: process.env.ALTLLM_API_KEY
});
export async function POST(request: Request) {
const { message } = await request.json();
const response = await client.chat.completions.create({
model: 'altllm-standard',
messages: [{ role: 'user', content: message }]
});
return NextResponse.json({
content: response.choices[0].message.content
});
}Usage Tracking
const response = await client.chat.completions.create({
model: 'altllm-standard',
messages: [{ role: 'user', content: 'Hello!' }]
});
// Track usage
const usage = response.usage;
console.log(`Prompt tokens: ${usage?.prompt_tokens}`);
console.log(`Completion tokens: ${usage?.completion_tokens}`);
console.log(`Total tokens: ${usage?.total_tokens}`);
// Estimate cost (altllm-standard: $0.60/$2.40 per 1M)
const inputCost = (usage?.prompt_tokens || 0) * 0.0000006;
const outputCost = (usage?.completion_tokens || 0) * 0.0000024;
const totalCost = inputCost + outputCost;
console.log(`Estimated cost: $${totalCost.toFixed(6)}`);Error Handling
import OpenAI from 'openai';
const client = new OpenAI({
baseURL: 'https://altllm-api.viber.autonome.fun/v1',
apiKey: 'YOUR_API_KEY',
timeout: 60000, // 60 second timeout
maxRetries: 3 // Retry up to 3 times
});
async function chatWithRetry(message: string): Promise<string> {
try {
const response = await client.chat.completions.create({
model: 'altllm-standard',
messages: [{ role: 'user', content: message }]
});
return response.choices[0].message.content || '';
} catch (error) {
if (error instanceof OpenAI.AuthenticationError) {
console.error('Invalid API key');
throw error;
}
if (error instanceof OpenAI.RateLimitError) {
console.error('Rate limited - implement backoff');
throw error;
}
if (error instanceof OpenAI.APIError) {
console.error(`API error: ${error.status} - ${error.message}`);
throw error;
}
throw error;
}
}TypeScript Types
import OpenAI from 'openai';
type ChatMessage = OpenAI.Chat.ChatCompletionMessageParam;
type ChatResponse = OpenAI.Chat.ChatCompletion;
type ChatChoice = OpenAI.Chat.ChatCompletion.Choice;
// Type-safe model selection
type AltLLMModel =
| 'altllm-light'
| 'altllm-standard'
| 'altllm-pro'
| 'altllm-pro-max'
| 'altllm-max'
| 'altllm-native-light'
| 'altllm-native-standard'
| 'altllm-native-pro'
| 'altllm-native-promax'
| 'altllm-native-max';
async function chat(
model: AltLLMModel,
messages: ChatMessage[]
): Promise<ChatResponse> {
return client.chat.completions.create({ model, messages });
}