SDK Overview
Official SDKs and client libraries
SDK Guides
Official and community SDKs for integrating HiveOps into your applications.
Supported Languages
- ✅ Python - Official OpenAI SDK
- ✅ JavaScript/TypeScript - Official OpenAI SDK
- ✅ Go - Community SDK (go-openai)
- ✅ .NET - Azure OpenAI SDK
- ✅ Java - Community SDK
- ✅ Ruby - Community SDK
- ✅ PHP - Community SDK
Python SDK
Installation
pip install openai
Requires Python 3.7+. Latest version recommended: openai>=1.0.0
Basic Usage
from openai import OpenAI
client = OpenAI(
api_key="sk-YOUR-HIVEOPS-KEY",
base_url="https://ai.hiveops.io"
)
response = client.chat.completions.create(
model="llama3:8b-instruct-q8_0",
messages=[
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)
Environment Variables
export OPENAI_API_KEY="sk-YOUR-HIVEOPS-KEY"
export OPENAI_BASE_URL="https://ai.hiveops.io"
from openai import OpenAI
# Automatically reads from environment
client = OpenAI()
response = client.chat.completions.create(
model="llama3:8b-instruct-q8_0",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
Async Support
import asyncio
from openai import AsyncOpenAI
async_client = AsyncOpenAI(
api_key="sk-YOUR-HIVEOPS-KEY",
base_url="https://ai.hiveops.io"
)
async def main():
response = await async_client.chat.completions.create(
model="llama3:8b-instruct-q8_0",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())
Streaming
from openai import OpenAI
client = OpenAI(
api_key="sk-YOUR-HIVEOPS-KEY",
base_url="https://ai.hiveops.io"
)
stream = client.chat.completions.create(
model="llama3:8b-instruct-q8_0",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
Full Documentation
JavaScript/TypeScript SDK
Installation
npm install openai
# or
yarn add openai
# or
pnpm add openai
Requires Node.js 18+. Latest version recommended: openai>=4.0.0
Basic Usage (Node.js)
import OpenAI from "openai";
const client = new OpenAI({
apiKey: "sk-YOUR-HIVEOPS-KEY",
baseURL: "https://ai.hiveops.io",
});
const response = await client.chat.completions.create({
model: "llama3:8b-instruct-q8_0",
messages: [{ role: "user", content: "Hello!" }],
});
console.log(response.choices[0].message.content);
TypeScript
import OpenAI from "openai";
import type {
ChatCompletionMessageParam,
ChatCompletion,
} from "openai/resources/chat";
const client = new OpenAI({
apiKey: process.env.HIVEOPS_API_KEY!,
baseURL: "https://ai.hiveops.io",
});
const messages: ChatCompletionMessageParam[] = [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Hello!" },
];
const response: ChatCompletion = await client.chat.completions.create({
model: "llama3:8b-instruct-q8_0",
messages,
});
console.log(response.choices[0].message.content);
Browser Usage
import OpenAI from "openai";
const client = new OpenAI({
apiKey: "sk-YOUR-HIVEOPS-KEY",
baseURL: "https://ai.hiveops.io",
dangerouslyAllowBrowser: true, // Required for browser
});
// Same usage as Node.js
const response = await client.chat.completions.create({
model: "llama3:8b-instruct-q8_0",
messages: [{ role: "user", content: "Hello!" }],
});
⚠️ Warning: Exposing API keys in browser is insecure. Use a backend proxy in production.
Streaming
const stream = await client.chat.completions.create({
model: "llama3:8b-instruct-q8_0",
messages: [{ role: "user", content: "Tell me a story" }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || "";
process.stdout.write(content);
}
Full Documentation
Go SDK
Installation
go get github.com/sashabaranov/go-openai
Basic Usage
package main
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultConfig("sk-YOUR-HIVEOPS-KEY")
config.BaseURL = "https://ai.hiveops.io"
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: "llama3:8b-instruct-q8_0",
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
},
)
if err != nil {
fmt.Printf("ChatCompletion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
Environment Variables
package main
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
func main() {
apiKey := os.Getenv("HIVEOPS_API_KEY")
if apiKey == "" {
panic("HIVEOPS_API_KEY not set")
}
config := openai.DefaultConfig(apiKey)
config.BaseURL = "https://ai.hiveops.io"
client := openai.NewClientWithConfig(config)
// Use client...
}
Streaming
package main
import (
"context"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultConfig("sk-YOUR-HIVEOPS-KEY")
config.BaseURL = "https://ai.hiveops.io"
client := openai.NewClientWithConfig(config)
ctx := context.Background()
req := openai.ChatCompletionRequest{
Model: "llama3:8b-instruct-q8_0",
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Tell me a story",
},
},
Stream: true,
}
stream, err := client.CreateChatCompletionStream(ctx, req)
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
defer stream.Close()
for {
response, err := stream.Recv()
if err == io.EOF {
fmt.Println("\nStream finished")
return
}
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
fmt.Print(response.Choices[0].Delta.Content)
}
}
Full Documentation
.NET SDK
Installation
dotnet add package Azure.AI.OpenAI
Basic Usage
using Azure.AI.OpenAI;
using Azure;
var client = new OpenAIClient(
new Uri("https://ai.hiveops.io"),
new AzureKeyCredential("sk-YOUR-HIVEOPS-KEY")
);
var chatCompletionsOptions = new ChatCompletionsOptions()
{
DeploymentName = "llama3:8b-instruct-q8_0",
Messages =
{
new ChatRequestUserMessage("Hello!")
}
};
Response<ChatCompletions> response = await client.GetChatCompletionsAsync(chatCompletionsOptions);
ChatResponseMessage message = response.Value.Choices[0].Message;
Console.WriteLine(message.Content);
Full Documentation
Command Line Interface
Using cURL
curl https://ai.hiveops.io/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer sk-YOUR-HIVEOPS-KEY" \
-d '{
"model": "llama3:8b-instruct-q8_0",
"messages": [
{
"role": "user",
"content": "Hello!"
}
]
}'
Using OpenAI CLI
# Install
pip install openai
# Set environment
export OPENAI_API_KEY="sk-YOUR-HIVEOPS-KEY"
export OPENAI_BASE_URL="https://ai.hiveops.io"
# Run
openai api chat.completions.create \
-m llama3:8b-instruct-q8_0 \
-g user "Hello!"
Framework Integration
LangChain
Python:
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
model="llama3:8b-instruct-q8_0",
openai_api_base="https://ai.hiveops.io",
openai_api_key="sk-YOUR-HIVEOPS-KEY"
)
response = llm.invoke("What is AI?")
print(response.content)
TypeScript:
import { ChatOpenAI } from "@langchain/openai";
const llm = new ChatOpenAI({
modelName: "llama3:8b-instruct-q8_0",
openAIApiKey: "sk-YOUR-HIVEOPS-KEY",
configuration: {
baseURL: "https://ai.hiveops.io",
},
});
const response = await llm.invoke("What is AI?");
console.log(response.content);
See: LangChain Integration Guide
LlamaIndex
from llama_index.llms.openai import OpenAI
llm = OpenAI(
model="llama3:8b-instruct-q8_0",
api_key="sk-YOUR-HIVEOPS-KEY",
api_base="https://ai.hiveops.io"
)
response = llm.complete("What is AI?")
print(response.text)
CrewAI
from crewai import Agent
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(
model="llama-3-70b-instruct",
openai_api_base="https://ai.hiveops.io",
openai_api_key="sk-YOUR-HIVEOPS-KEY"
)
agent = Agent(
role="Research Analyst",
goal="Provide insights",
llm=llm
)
Community SDKs
Java
<!-- Maven -->
<dependency>
<groupId>com.theokanning.openai-gpt3-java</groupId>
<artifactId>service</artifactId>
<version>0.18.0</version>
</dependency>
OpenAiService service = new OpenAiService("sk-YOUR-HIVEOPS-KEY");
service.setBaseUrl("https://ai.hiveops.io");
ChatCompletionRequest request = ChatCompletionRequest.builder()
.model("llama3:8b-instruct-q8_0")
.messages(List.of(
new ChatMessage("user", "Hello!")
))
.build();
ChatCompletionResult result = service.createChatCompletion(request);
System.out.println(result.getChoices().get(0).getMessage().getContent());
Ruby
# Gemfile
gem 'ruby-openai'
require 'openai'
client = OpenAI::Client.new(
access_token: "sk-YOUR-HIVEOPS-KEY",
uri_base: "https://ai.hiveops.io"
)
response = client.chat(
parameters: {
model: "llama3:8b-instruct-q8_0",
messages: [{ role: "user", content: "Hello!" }]
}
)
puts response.dig("choices", 0, "message", "content")
PHP
composer require openai-php/client
<?php
use OpenAI;
$client = OpenAI::factory()
->withApiKey('sk-YOUR-HIVEOPS-KEY')
->withBaseUri('https://ai.hiveops.io')
->make();
$response = $client->chat()->create([
'model' => 'llama3:8b-instruct-q8_0',
'messages' => [
['role' => 'user', 'content' => 'Hello!'],
],
]);
echo $response->choices[0]->message->content;
Best Practices
1. Store API Keys Securely
❌ Never do this:
# Hardcoded key (INSECURE!)
client = OpenAI(api_key="sk-1234567890abcdef")
✅ Do this:
import os
client = OpenAI(
api_key=os.getenv("HIVEOPS_API_KEY"),
base_url="https://ai.hiveops.io"
)
2. Set Timeouts
# Python
client.chat.completions.create(
...,
timeout=30 # 30 second timeout
)
// JavaScript
const response = await client.chat.completions.create({
...,
timeout: 30000 // 30 seconds (in ms)
});
3. Implement Error Handling
See: Error Handling Guide
from openai import OpenAI, APIError, RateLimitError
try:
response = client.chat.completions.create(...)
except RateLimitError:
# Handle rate limit
pass
except APIError as e:
# Handle API errors
pass
4. Use Streaming for UX
# Better UX - show response as it generates
stream = client.chat.completions.create(..., stream=True)
for chunk in stream:
print(chunk.choices[0].delta.content, end="")
Migration Checklist
Switching from OpenAI to HiveOps:
- Install/update SDK to latest version
- Change API key to HiveOps key
- Set
base_urltohttps://ai.hiveops.io - Update model names (see model mapping)
- Test in development environment
- Implement error handling
- Deploy to production
Support
Need help with SDK integration?
- 📚 Quickstart Guide
- 📖 API Reference
- 🚀 Migration Guide
- 💬 Discord Community
- 📧 Email: [email protected]
Last Updated: March 20, 2024