JavaScript SDK
The official OpenAI JavaScript/TypeScript SDK works seamlessly with Assisters API in both Node.js and browser environments.Installation
Copy
npm install openai
Quick Start
Copy
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'ask_your_api_key',
baseURL: 'https://api.assisters.dev/v1'
});
const response = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Hello!' }
]
});
console.log(response.choices[0].message.content);
Configuration
Environment Variables
Copy
export ASSISTERS_API_KEY="ask_your_api_key"
Copy
const client = new OpenAI({
apiKey: process.env.ASSISTERS_API_KEY,
baseURL: 'https://api.assisters.dev/v1'
});
Client Options
Copy
const client = new OpenAI({
apiKey: 'ask_...',
baseURL: 'https://api.assisters.dev/v1',
timeout: 30000, // Request timeout in ms
maxRetries: 3, // Automatic retries
defaultHeaders: {...}, // Custom headers
});
Chat Completions
Basic Request
Copy
const response = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages: [
{ role: 'user', content: 'What is machine learning?' }
],
temperature: 0.7,
max_tokens: 500
});
console.log(response.choices[0].message.content);
console.log(`Tokens used: ${response.usage.total_tokens}`);
Streaming
Copy
const stream = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages: [{ role: 'user', content: 'Write a poem about JavaScript' }],
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
process.stdout.write(content);
}
Multi-turn Conversation
Copy
const messages = [
{ role: 'system', content: 'You are a math tutor.' }
];
async function chat(userMessage) {
messages.push({ role: 'user', content: userMessage });
const response = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages
});
const assistantMessage = response.choices[0].message.content;
messages.push({ role: 'assistant', content: assistantMessage });
return assistantMessage;
}
console.log(await chat('What is 2 + 2?'));
console.log(await chat('And if I multiply that by 3?'));
Embeddings
Copy
const response = await client.embeddings.create({
model: 'e5-large-v2',
input: 'The quick brown fox jumps over the lazy dog'
});
const embedding = response.data[0].embedding;
console.log(`Dimensions: ${embedding.length}`); // 1024
// Batch embeddings
const batchResponse = await client.embeddings.create({
model: 'e5-large-v2',
input: ['First text', 'Second text', 'Third text']
});
batchResponse.data.forEach((data, i) => {
console.log(`Text ${i}: ${data.embedding.length} dimensions`);
});
Moderation
Copy
const response = await client.moderations.create({
model: 'llama-guard-3',
input: 'Hello, how are you today?'
});
const result = response.results[0];
console.log(`Flagged: ${result.flagged}`);
console.log(`Categories:`, result.categories);
console.log(`Scores:`, result.category_scores);
TypeScript Support
Full TypeScript support with type definitions:Copy
import OpenAI from 'openai';
import type {
ChatCompletion,
ChatCompletionMessageParam
} from 'openai/resources/chat/completions';
const client = new OpenAI({
apiKey: process.env.ASSISTERS_API_KEY!,
baseURL: 'https://api.assisters.dev/v1'
});
async function getResponse(prompt: string): Promise<string> {
const messages: ChatCompletionMessageParam[] = [
{ role: 'user', content: prompt }
];
const response: ChatCompletion = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages
});
return response.choices[0].message.content ?? '';
}
Error Handling
Copy
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'ask_...',
baseURL: 'https://api.assisters.dev/v1'
});
try {
const response = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages: [{ role: 'user', content: 'Hello' }]
});
} catch (error) {
if (error instanceof OpenAI.AuthenticationError) {
console.log('Invalid API key');
} else if (error instanceof OpenAI.RateLimitError) {
console.log('Rate limited. Retry after:', error.headers?.['retry-after']);
} else if (error instanceof OpenAI.BadRequestError) {
console.log('Bad request:', error.message);
} else if (error instanceof OpenAI.APIError) {
console.log('API error:', error.status, error.message);
}
}
Browser Usage
The SDK works in browsers, but never expose your API key client-side. Use a backend proxy:Backend Proxy (Recommended)
Copy
// Server-side (Next.js API route)
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.ASSISTERS_API_KEY,
baseURL: 'https://api.assisters.dev/v1'
});
export async function POST(req) {
const { message } = await req.json();
const response = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages: [{ role: 'user', content: message }]
});
return Response.json({
content: response.choices[0].message.content
});
}
Copy
// Client-side
async function sendMessage(message) {
const res = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ message })
});
const data = await res.json();
return data.content;
}
Framework Integration
Next.js
Copy
// app/api/chat/route.ts
import OpenAI from 'openai';
import { NextResponse } from 'next/server';
const client = new OpenAI({
apiKey: process.env.ASSISTERS_API_KEY!,
baseURL: 'https://api.assisters.dev/v1'
});
export async function POST(req: Request) {
const { messages } = await req.json();
const response = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages
});
return NextResponse.json(response);
}
Express
Copy
import express from 'express';
import OpenAI from 'openai';
const app = express();
const client = new OpenAI({
apiKey: process.env.ASSISTERS_API_KEY,
baseURL: 'https://api.assisters.dev/v1'
});
app.use(express.json());
app.post('/chat', async (req, res) => {
const { message } = req.body;
const response = await client.chat.completions.create({
model: 'llama-3.1-8b',
messages: [{ role: 'user', content: message }]
});
res.json({ content: response.choices[0].message.content });
});
app.listen(3000);
Vercel AI SDK
Copy
import { createOpenAI } from '@ai-sdk/openai';
import { generateText, streamText } from 'ai';
const assisters = createOpenAI({
apiKey: process.env.ASSISTERS_API_KEY,
baseURL: 'https://api.assisters.dev/v1'
});
// Non-streaming
const { text } = await generateText({
model: assisters('llama-3.1-8b'),
prompt: 'Hello!'
});
// Streaming
const result = await streamText({
model: assisters('llama-3.1-8b'),
prompt: 'Write a poem'
});
for await (const textPart of result.textStream) {
process.stdout.write(textPart);
}
React Hook Example
Copy
import { useState, useCallback } from 'react';
interface Message {
role: 'user' | 'assistant';
content: string;
}
export function useChat() {
const [messages, setMessages] = useState<Message[]>([]);
const [isLoading, setIsLoading] = useState(false);
const sendMessage = useCallback(async (content: string) => {
const userMessage: Message = { role: 'user', content };
setMessages(prev => [...prev, userMessage]);
setIsLoading(true);
try {
const res = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [...messages, userMessage]
})
});
const data = await res.json();
const assistantMessage: Message = {
role: 'assistant',
content: data.choices[0].message.content
};
setMessages(prev => [...prev, assistantMessage]);
} finally {
setIsLoading(false);
}
}, [messages]);
return { messages, sendMessage, isLoading };
}
Best Practices
Use Environment Variables
Never expose API keys in client-side code
Use Backend Proxy
Always route browser requests through your server
Enable Streaming
Use streaming for better chat UX
Handle Errors
Catch and handle API errors gracefully