Quick Start
Visit the Arcee Model Engine platform at https://models.arcee.ai/
In this video, you will learn how to use the Arcee Model Engine. Model Engine introduces 6 new models, with more to come.
Account Setup
Sign up for an Arcee Model Engine account
Log in using your credentials and authentication code
Configure your payment settings
Generate your API key for your chosen model and begin development
Getting Started with Arcee Model Engine API
Explore integrating Arcee Model Engine API using your preferred programming language with and without streaming. Below are code examples demonstrating API usage in Python, TypeScript, Node.js, Go, and Java.
What is Streaming?
Streaming is a real-time data transmission method where the model's response is sent back piece by piece, rather than waiting for the complete response to be generated. This allows you to:
Receive and display partial responses immediately
Create more responsive applications
Handle long-form content efficiently
Provide immediate feedback to users
Without Streaming:
# First, install the openai and httpx packages
# pip install openai httpx[http2]
# Be sure to set the following environment variables
# OPENAI_BASE_URL=https://models.arcee.ai/v1
# OPENAI_API_KEY=$ARCEE_TOKEN
import httpx
from openai import OpenAI
client = OpenAI(
http_client=httpx.Client(http2=True)
)
response = client.chat.completions.create(
model='virtuoso-small',
messages=[{'role': 'user', 'content': 'Your prompt here'}],
temperature=0.4,
)
print(response)
curl --http2 -X POST https://models.arcee.ai/v1/chat/completions \
-H "Authorization: Bearer $ARCEE_TOKEN" \
-H "Content-Type: application/json" \
-d '{
"model": "virtuoso-small",
"messages": [
{
"role": "user",
"content": "Your prompt here"
}
]
}'
import OpenAI from 'openai';
import dotenv from 'dotenv';
dotenv.config();
const client = new OpenAI({
baseURL: 'https://models.arcee.ai/v1',
apiKey: process.env.ARCEE_TOKEN,
});
interface ChatResponse {
role: string;
content: string;
}
async function generateResponse(prompt: string): Promise<void> {
try {
const response = await client.chat.completions.create({
model: 'virtuoso-small',
messages: [{ role: 'user', content: prompt }],
temperature: 0.4,
});
console.log(response);
} catch (error) {
console.error('Error:', error);
}
}
generateResponse('Your prompt here');
require('dotenv').config();
const OpenAI = require('openai');
const client = new OpenAI({
baseURL: 'https://models.arcee.ai/v1',
apiKey: process.env.ARCEE_TOKEN,
});
async function main() {
const response = await client.chat.completions.create({
model: 'virtuoso-small',
messages: [{ role: 'user', content: 'Your prompt here' }],
temperature: 0.4,
});
console.log(response);
}
main();
package main
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"os"
openai "github.com/sashabaranov/go-openai"
"golang.org/x/net/http2"
)
func main() {
apiKey := os.Getenv("ARCEE_TOKEN")
if apiKey == "" {
fmt.Println("Please set ARCEE_TOKEN environment variable")
return
}
http2Client := &http.Client{}
transport := &http2.Transport{
TLSClientConfig: &tls.Config{MinVersion: tls.VersionTLS12},
}
http2Client.Transport = transport
config := openai.DefaultConfig(apiKey)
config.BaseURL = "https://models.arcee.ai/v1"
config.HTTPClient = http2Client
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: "virtuoso-small",
Messages: []openai.ChatCompletionMessage{
{
Role: "user",
Content: "Your prompt here",
},
},
Temperature: 0.4,
},
)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
With Streaming:
# First, install the openai and httpx packages
# pip install openai httpx[http2]
# Be sure to set the following environment variables
# OPENAI_BASE_URL="https://models.arcee.ai/v1"
# OPENAI_API_KEY="$ARCEE_TOKEN"
import httpx
from openai import OpenAI
client = OpenAI(
http_client=httpx.Client(http2=True)
)
stream = client.chat.completions.create(
model='virtuoso-small',
messages=[{'role': 'user', 'content': 'Your prompt here'}],
temperature=0.4,
stream=True
)
for chunk in stream:
if len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="")
curl --http2 -X POST https://models.arcee.ai/v1/chat/completions \
-H "Authorization: Bearer $ARCEE_TOKEN" \
-H "Content-Type: application/json" \
-d '{
"stream": true,
"model": "virtuoso-small",
"messages": [
{
"role": "user",
"content": "Your prompt here"
}
]
}'
import OpenAI from 'openai';
import dotenv from 'dotenv';
dotenv.config();
const client = new OpenAI({
baseURL: 'https://models.arcee.ai/v1',
apiKey: process.env.ARCEE_TOKEN,
});
async function generateStreamingResponse(prompt: string): Promise<void> {
try {
const stream = await client.chat.completions.create({
model: 'virtuoso-small',
messages: [{ role: 'user', content: prompt }],
temperature: 0.4,
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
process.stdout.write(content);
}
console.log('\n');
} catch (error) {
console.error('Error:', error);
}
}
generateStreamingResponse('Tell me a story about a space adventure');
require('dotenv').config();
const OpenAI = require('openai');
const client = new OpenAI({
baseURL: 'https://models.arcee.ai/v1',
apiKey: process.env.ARCEE_TOKEN,
});
async function main() {
try {
const stream = await client.chat.completions.create({
model: 'virtuoso-small',
messages: [{ role: 'user', content: 'Tell me a story about space' }],
temperature: 0.4,
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
process.stdout.write(content);
}
console.log('\n');
} catch (error) {
console.error('Error:', error);
}
}
main();
package main
import (
"context"
"crypto/tls"
"fmt"
"io"
"net/http"
"os"
openai "github.com/sashabaranov/go-openai"
"golang.org/x/net/http2"
)
func main() {
apiKey := os.Getenv("ARCEE_TOKEN")
if apiKey == "" {
fmt.Println("Please set ARCEE_TOKEN environment variable")
return
}
http2Client := &http.Client{}
transport := &http2.Transport{
TLSClientConfig: &tls.Config{MinVersion: tls.VersionTLS12},
}
http2Client.Transport = transport
config := openai.DefaultConfig(apiKey)
config.BaseURL = "https://models.arcee.ai/v1"
config.HTTPClient = http2Client
client := openai.NewClientWithConfig(config)
stream, err := client.CreateChatCompletionStream(
context.Background(),
openai.ChatCompletionRequest{
Model: "virtuoso-small",
Messages: []openai.ChatCompletionMessage{
{
Role: "user",
Content: "Tell me a story about space",
},
},
Temperature: 0.4,
Stream: true,
},
)
if err != nil {
fmt.Printf("Stream creation error: %v\n", err)
return
}
defer stream.Close()
for {
response, err := stream.Recv()
if err != nil {
if err == io.EOF {
fmt.Println("\nStream finished")
return
}
fmt.Printf("\nStream error: %v\n", err)
return
}
if len(response.Choices) > 0 {
fmt.Print(response.Choices[0].Delta.Content)
}
}
}
Last updated