Coding Assistants
AI-powered development tools
AI Coding Assistants with HiveOps
Build intelligent coding assistants for code completion, debugging, documentation, and code review.
Overview
Use HiveOps to build:
- Code completion - Autocomplete functions, classes, modules
- Bug detection & fixing - Identify and fix errors
- Code explanation - Understand complex codebases
- Test generation - Auto-generate unit tests
- Documentation - Generate docstrings and README files
- Code review - Automated feedback and suggestions
- Refactoring - Improve code quality and structure
Quick Start: Code Completion
from openai import OpenAI
client = OpenAI(
api_key="sk-YOUR-API-KEY",
base_url="https://ai.hiveops.io"
)
def complete_code(partial_code, language="python"):
prompt = f"""Complete this {language} code:
```{language}
{partial_code}
Provide idiomatic, production-ready code. Include comments."""
response = client.chat.completions.create(
model="llama-3-70b-instruct", # Best for code generation
messages=[
{"role": "system", "content": f"You are an expert {language} programmer."},
{"role": "user", "content": prompt}
],
temperature=0.2 # Low temp for more deterministic code
)
return response.choices[0].message.content
Usage
partial = """ def binary_search(arr, target): left, right = 0, len(arr) - 1 # Complete the algorithm """
completion = complete_code(partial) print(completion)
---
## Bug Detection & Fixing
### Find and Fix Bugs
```python
def debug_code(buggy_code, language="python", error_message=None):
prompt = f"""Find and fix bugs in this {language} code:
```{language}
{buggy_code}
"""
if error_message:
prompt += f"\nError message: {error_message}"
prompt += """
Provide:
-
Explanation of the bug
-
Fixed code
-
Why the fix works """ response = client.chat.completions.create( model="llama-3-70b-instruct", messages=[ {"role": "system", "content": "You are an expert debugger."}, {"role": "user", "content": prompt} ], temperature=0.3 )
return response.choices[0].message.content
Usage
buggy_code = """ def calculate_average(numbers): total = sum(numbers) return total / len(numbers)
print(calculate_average([])) """
fix = debug_code(buggy_code, error_message="ZeroDivisionError: division by zero") print(fix)
### Security Vulnerability Scanner
```python
def scan_security_issues(code, language="python"):
prompt = f"""Analyze this {language} code for security vulnerabilities:
```{language}
{code}
Check for:
- SQL injection
- XSS vulnerabilities
- Authentication issues
- Insecure data handling
- Hardcoded secrets
For each issue found, provide:
-
Severity (High/Medium/Low)
-
Description
-
Recommended fix """ response = client.chat.completions.create( model="llama-3-70b-instruct", messages=[ {"role": "system", "content": "You are a security expert specializing in code audits."}, {"role": "user", "content": prompt} ], temperature=0.2 )
return response.choices[0].message.content
Usage
code_to_scan = """ def login(username, password): query = f"SELECT * FROM users WHERE username='{username}' AND password='{password}'" return db.execute(query) """
vulnerabilities = scan_security_issues(code_to_scan) print(vulnerabilities)
---
## Code Explanation
### Explain Complex Code
```python
def explain_code(code, language="python", detail_level="medium"):
levels = {
"simple": "Explain like I'm a beginner",
"medium": "Explain for an intermediate developer",
"detailed": "Provide an in-depth technical explanation"
}
prompt = f"""Explain this {language} code:
```{language}
{code}
{levels.get(detail_level, levels['medium'])}
Include:
-
What the code does
-
How it works (step-by-step)
-
Time/space complexity (if applicable)
-
Potential improvements """ response = client.chat.completions.create( model="llama3:8b-instruct-q8_0", messages=[{"role": "user", "content": prompt}], temperature=0.5 )
return response.choices[0].message.content
Usage
complex_code = """ def quick_sort(arr): if len(arr) <= 1: return arr pivot = arr[len(arr) // 2] left = [x for x in arr if x < pivot] middle = [x for x in arr if x == pivot] right = [x for x in arr if x > pivot] return quick_sort(left) + middle + quick_sort(right) """
explanation = explain_code(complex_code, detail_level="detailed") print(explanation)
---
## Test Generation
### Auto-Generate Unit Tests
```python
def generate_tests(function_code, language="python", framework="pytest"):
prompt = f"""Generate comprehensive unit tests for this {language} function:
```{language}
{function_code}
Testing framework: {framework}
Include tests for:
-
Normal cases
-
Edge cases
-
Error cases
-
Boundary conditions """ response = client.chat.completions.create( model="llama-3-70b-instruct", messages=[ {"role": "system", "content": f"You are an expert in {language} testing with {framework}."}, {"role": "user", "content": prompt} ], temperature=0.3 )
return response.choices[0].message.content
Usage
function = """ def validateemail(email): import re pattern = r'^[a-zA-Z0-9.%+-]+@[a-zA-Z0-9.-]+.[a-zA-Z]{2,}$' return re.match(pattern, email) is not None """
tests = generate_tests(function, framework="pytest") print(tests)
### Test-Driven Development (TDD)
```python
def generate_code_from_tests(test_code, language="python"):
prompt = f"""Given these unit tests, implement the function that passes all tests:
```{language}
{test_code}
Requirements:
-
Implement clean, efficient code
-
Pass all test cases
-
Include type hints (if applicable)
-
Add docstring """ response = client.chat.completions.create( model="llama-3-70b-instruct", messages=[{"role": "user", "content": prompt}], temperature=0.2 )
return response.choices[0].message.content
Usage
tests = """ def test_fibonacci(): assert fibonacci(0) == 0 assert fibonacci(1) == 1 assert fibonacci(5) == 5 assert fibonacci(10) == 55 """
implementation = generate_code_from_tests(tests) print(implementation)
---
## Code Documentation
### Generate Docstrings
```python
def generate_docstring(function_code, style="google"):
prompt = f"""Add a comprehensive docstring to this function using {style} style:
```python
{function_code}
Include:
-
Brief description
-
Args with types
-
Returns with type
-
Raises (if applicable)
-
Example usage """ response = client.chat.completions.create( model="gemma-2-9b-it", # Cost-effective for documentation messages=[{"role": "user", "content": prompt}], temperature=0.3 )
return response.choices[0].message.content
Usage
function = """ def merge_sorted_lists(list1, list2): result = [] i, j = 0, 0 while i < len(list1) and j < len(list2): if list1[i] <= list2[j]: result.append(list1[i]) i += 1 else: result.append(list2[j]) j += 1 result.extend(list1[i:]) result.extend(list2[j:]) return result """
documented = generate_docstring(function, style="google") print(documented)
### Generate README
```python
def generate_readme(project_structure, description):
prompt = f"""Generate a professional README.md for this project:
Project: {description}
File structure:
{project_structure}
Include:
- Project title and description
- Features
- Installation instructions
- Usage examples
- API documentation (if applicable)
- Contributing guidelines
- License
"""
response = client.chat.completions.create(
model="llama3:8b-instruct-q8_0",
messages=[{"role": "user", "content": prompt}],
temperature=0.5
)
return response.choices[0].message.content
# Usage
structure = """
my-api/
├── src/
│ ├── app.py
│ ├── routes/
│ └── models/
├── tests/
├── requirements.txt
└── README.md
"""
readme = generate_readme(structure, "REST API for user management")
print(readme)
Code Review Assistant
Automated Code Review
def review_code(code, language="python", focus=None):
focus_areas = focus or ["correctness", "performance", "style", "security"]
prompt = f"""Perform a code review on this {language} code:
```{language}
{code}
Focus areas: {', '.join(focus_areas)}
Provide:
-
Issues found (with severity)
-
Specific suggestions
-
Improved version of problematic sections
-
Overall rating (1-10) """ response = client.chat.completions.create( model="llama-3-70b-instruct", messages=[ {"role": "system", "content": "You are a senior software engineer conducting code reviews."}, {"role": "user", "content": prompt} ], temperature=0.4 )
return response.choices[0].message.content
Usage
code_for_review = """ def process_data(data): result = [] for i in range(len(data)): if data[i] != None: result.append(data[i] * 2) return result """
review = review_code(code_for_review) print(review)
---
## Code Refactoring
### Refactor for Efficiency
```python
def refactor_code(code, language="python", goal="efficiency"):
goals = {
"efficiency": "Optimize for performance and time complexity",
"readability": "Improve code clarity and maintainability",
"modularity": "Break into smaller, reusable functions",
"modern": "Use modern language features and best practices"
}
prompt = f"""Refactor this {language} code:
```{language}
{code}
Goal: {goals.get(goal, goals['efficiency'])}
Provide:
-
Refactored code
-
Explanation of changes
-
Performance comparison (if applicable) """ response = client.chat.completions.create( model="llama-3-70b-instruct", messages=[{"role": "user", "content": prompt}], temperature=0.3 )
return response.choices[0].message.content
Usage
inefficient_code = """ def find_duplicates(arr): duplicates = [] for i in range(len(arr)): for j in range(i + 1, len(arr)): if arr[i] == arr[j] and arr[i] not in duplicates: duplicates.append(arr[i]) return duplicates """
refactored = refactor_code(inefficient_code, goal="efficiency") print(refactored)
---
## Language Translation
### Convert Between Languages
```python
def translate_code(code, from_lang, to_lang):
prompt = f"""Translate this code from {from_lang} to {to_lang}:
```{from_lang}
{code}
Requirements:
-
Use idiomatic {to_lang} patterns
-
Maintain functionality
-
Add comments explaining {to_lang}-specific features """ response = client.chat.completions.create( model="llama-3-70b-instruct", messages=[ {"role": "system", "content": f"You are an expert in both {from_lang} and {to_lang}."}, {"role": "user", "content": prompt} ], temperature=0.2 )
return response.choices[0].message.content
Usage
js_code = """ const users = await fetch('/api/users') .then(res => res.json()) .catch(err => console.error(err)); """
python_version = translate_code(js_code, from_lang="JavaScript", to_lang="Python") print(python_version)
---
## VS Code Extension Example
### Simple Code Assistant Extension
```typescript
// extension.ts
import * as vscode from 'vscode';
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.HIVEOPS_API_KEY!,
baseURL: 'https://ai.hiveops.io'
});
export function activate(context: vscode.ExtensionContext) {
// Command: Explain selected code
let explainCommand = vscode.commands.registerCommand(
'codeAssistant.explain',
async () => {
const editor = vscode.window.activeTextEditor;
if (!editor) return;
const selection = editor.document.getText(editor.selection);
const language = editor.document.languageId;
const response = await client.chat.completions.create({
model: 'llama3:8b-instruct-q8_0',
messages: [
{
role: 'user',
content: `Explain this ${language} code:\n\n${selection}`
}
]
});
const explanation = response.choices[0].message.content;
vscode.window.showInformationMessage(explanation!, { modal: true });
}
);
// Command: Fix selected code
let fixCommand = vscode.commands.registerCommand(
'codeAssistant.fix',
async () => {
const editor = vscode.window.activeTextEditor;
if (!editor) return;
const selection = editor.document.getText(editor.selection);
const language = editor.document.languageId;
const response = await client.chat.completions.create({
model: 'llama-3-70b-instruct',
messages: [
{
role: 'user',
content: `Find and fix bugs in this ${language} code:\n\n${selection}\n\nProvide only the fixed code, no explanation.`
}
],
temperature: 0.2
});
const fixedCode = response.choices[0].message.content!;
await editor.edit(editBuilder => {
editBuilder.replace(editor.selection, fixedCode);
});
}
);
context.subscriptions.push(explainCommand, fixCommand);
}
CLI Tool Example
Code Assistant CLI
#!/usr/bin/env python3
import sys
import click
from openai import OpenAI
client = OpenAI(
api_key="sk-YOUR-API-KEY",
base_url="https://ai.hiveops.io"
)
@click.group()
def cli():
"""AI Code Assistant powered by HiveOps"""
pass
@cli.command()
@click.argument('file', type=click.File('r'))
@click.option('--language', default='python')
def explain(file, language):
"""Explain code in a file"""
code = file.read()
response = client.chat.completions.create(
model="llama3:8b-instruct-q8_0",
messages=[
{"role": "user", "content": f"Explain this {language} code:\n\n{code}"}
]
)
click.echo(response.choices[0].message.content)
@cli.command()
@click.argument('file', type=click.File('r'))
@click.option('--language', default='python')
@click.option('--output', '-o', type=click.File('w'))
def fix(file, language, output):
"""Find and fix bugs"""
code = file.read()
response = client.chat.completions.create(
model="llama-3-70b-instruct",
messages=[
{"role": "user", "content": f"Fix bugs in this {language} code:\n\n{code}"}
],
temperature=0.2
)
fixed_code = response.choices[0].message.content
if output:
output.write(fixed_code)
click.echo(f"Fixed code written to {output.name}")
else:
click.echo(fixed_code)
@cli.command()
@click.argument('file', type=click.File('r'))
@click.option('--framework', default='pytest')
def test(file, framework):
"""Generate unit tests"""
code = file.read()
response = client.chat.completions.create(
model="llama-3-70b-instruct",
messages=[
{
"role": "user",
"content": f"Generate {framework} tests for:\n\n{code}"
}
],
temperature=0.3
)
click.echo(response.choices[0].message.content)
if __name__ == '__main__':
cli()
Usage:
# Explain code
code-assistant explain app.py
# Fix bugs and save to new file
code-assistant fix buggy.py -o fixed.py
# Generate tests
code-assistant test mymodule.py --framework pytest
Best Practices
1. Use Lower Temperature for Code
# Code generation/fixing: Low temperature (more deterministic)
response = client.chat.completions.create(
model="llama-3-70b-instruct",
messages=[{"role": "user", "content": "Fix this Python code..."}],
temperature=0.2 # ← Low for code
)
# Code explanation: Medium temperature (more natural)
response = client.chat.completions.create(
model="llama3:8b-instruct-q8_0",
messages=[{"role": "user", "content": "Explain this algorithm..."}],
temperature=0.5 # ← Medium for explanations
)
2. Choose Right Model
| Task | Model | Why |
|---|---|---|
| Code completion | llama-3-70b-instruct | Best code quality |
| Documentation | gemma-2-9b-it | Cost-effective |
| Bug fixing | llama-3-70b-instruct | Complex reasoning |
| Explanation | llama3:8b-instruct-q8_0 | Good balance |
3. Provide Context
# Bad: No context
prompt = "Fix this code: return x / y"
# Good: Full context
prompt = """Fix this Python function:
def calculate_ratio(x, y):
return x / y
Error: ZeroDivisionError when y=0
Expected: Return None when division impossible
"""
Next Steps
Support
- 💬 Discord
- 📧 Email: [email protected]