HiveOps Logo
HiveOps
/Content Generation

Content Generation

SEO, marketing, and creative writing

AI Content Generation with HiveOps

Generate high-quality content at scale: blog posts, marketing copy, product descriptions, social media, and more.

Overview

Use HiveOps for:

  • Blog posts & articles - Long-form content generation
  • Marketing copy - Ads, landing pages, email campaigns
  • Product descriptions - E-commerce at scale
  • Social media - Posts, captions, thread generation
  • SEO content - Keyword-optimized articles
  • Creative writing - Stories, scripts, poetry

Quick Start: Blog Post Generator

from openai import OpenAI

client = OpenAI(
    api_key="sk-YOUR-API-KEY",
    base_url="https://ai.hiveops.io"
)

def generate_blog_post(topic, keywords=None):
    prompt = f"Write a comprehensive 800-word blog post about: {topic}"
    if keywords:
        prompt += f"\nInclude these SEO keywords naturally: {', '.join(keywords)}"

    response = client.chat.completions.create(
        model="llama-3-70b-instruct",  # Use 70B for high-quality content
        messages=[
            {"role": "system", "content": "You are an expert content writer."},
            {"role": "user", "content": prompt}
        ],
        temperature=0.8,  # Higher for creativity
        max_tokens=2000
    )

    return response.choices[0].message.content

# Usage
post = generate_blog_post(
    topic="Benefits of Remote Work",
    keywords=["productivity", "work-life balance", "collaboration"]
)
print(post)

Marketing Copy Generation

Ad Copy Generator

def generate_ad_copy(product, target_audience, platform="Google Ads"):
    prompt = f"""Create 5 attention-grabbing ad headlines for:

Product: {product}
Target audience: {target_audience}
Platform: {platform}

Requirements:
- Each headline under 30 characters
- Include emotional trigger
- Clear call-to-action
- Benefit-focused
"""

    response = client.chat.completions.create(
        model="gemma-2-9b-it",  # Cost-effective for short copy
        messages=[
            {"role": "system", "content": "You are a direct response copywriter."},
            {"role": "user", "content": prompt}
        ],
        temperature=0.9  # Maximum creativity
    )

    return response.choices[0].message.content

# Usage
ads = generate_ad_copy(
    product="Fitness tracker watch",
    target_audience="Health-conscious millennials"
)
print(ads)

Email Campaign Generator

def generate_email_campaign(product, goal, tone="professional"):
    prompt = f"""Write a complete email campaign for:

Product: {product}
Goal: {goal}
Tone: {tone}

Include:
1. Subject line (compelling, under 50 chars)
2. Preview text (35 chars)
3. Email body (300 words)
4. Clear CTA button text
"""

    response = client.chat.completions.create(
        model="llama3:8b-instruct-q8_0",
        messages=[{"role": "user", "content": prompt}],
        temperature=0.7
    )

    return response.choices[0].message.content

# Usage
email = generate_email_campaign(
    product="New online course on Python",
    goal="Drive course enrollments",
    tone="friendly but professional"
)
print(email)

E-Commerce Product Descriptions

Bulk Product Description Generator

import json

def generate_product_descriptions(products):
    """
    Generate descriptions for multiple products efficiently
    products: list of dicts with 'name', 'features', 'category'
    """
    descriptions = []

    for product in products:
        prompt = f"""Write a compelling 100-word product description for:

Name: {product['name']}
Category: {product['category']}
Features: {', '.join(product['features'])}

Style: Persuasive, benefit-focused, SEO-friendly
Include: Key benefits, use cases, why customers should buy
"""

        response = client.chat.completions.create(
            model="mistral-7b-instruct-v0.3",  # Cheapest for high-volume
            messages=[{"role": "user", "content": prompt}],
            temperature=0.7,
            max_tokens=200
        )

        descriptions.append({
            "product": product['name'],
            "description": response.choices[0].message.content,
            "tokens": response.usage.total_tokens
        })

    return descriptions

# Usage
products = [
    {
        "name": "Wireless Bluetooth Headphones",
        "category": "Audio",
        "features": ["40-hour battery", "Active noise cancellation", "Foldable design"]
    },
    {
        "name": "Organic Cotton T-Shirt",
        "category": "Apparel",
        "features": ["100% organic cotton", "Fair trade", "Unisex fit"]
    }
]

results = generate_product_descriptions(products)
for result in results:
    print(f"\n{result['product']}:")
    print(result['description'])
    print(f"Tokens: {result['tokens']}")

Social Media Content

Multi-Platform Post Generator

def generate_social_media_posts(topic, platforms=["Twitter", "LinkedIn", "Instagram"]):
    """Generate platform-specific posts for the same topic"""
    posts = {}

    for platform in platforms:
        if platform == "Twitter":
            char_limit = "280 characters (with room for link/image)"
            style = "Punchy, conversational, use hashtags"
        elif platform == "LinkedIn":
            char_limit = "200 words"
            style = "Professional, thought-leadership, no hashtags"
        elif platform == "Instagram":
            char_limit = "150 words"
            style = "Visual-focused, use emojis, 3-5 hashtags"

        prompt = f"""Create a {platform} post about: {topic}

Requirements:
- Max length: {char_limit}
- Style: {style}
- Include call-to-action
"""

        response = client.chat.completions.create(
            model="gemma-2-9b-it",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.8
        )

        posts[platform] = response.choices[0].message.content

    return posts

# Usage
posts = generate_social_media_posts("Launching our new AI-powered app")
for platform, content in posts.items():
    print(f"\n=== {platform} ===")
    print(content)

Twitter Thread Generator

def generate_twitter_thread(topic, num_tweets=5):
    prompt = f"""Create a {num_tweets}-tweet thread explaining: {topic}

Requirements:
- Each tweet under 280 characters
- Tweet 1: Hook (make people want to read)
- Tweets 2-{num_tweets-1}: Educational/valuable content
- Tweet {num_tweets}: CTA or conclusion
- Use emojis strategically
- Number each tweet (1/{num_tweets}, 2/{num_tweets}, etc.)
"""

    response = client.chat.completions.create(
        model="llama3:8b-instruct-q8_0",
        messages=[{"role": "user", "content": prompt}],
        temperature=0.8
    )

    return response.choices[0].message.content

# Usage
thread = generate_twitter_thread("How AI is transforming healthcare", num_tweets=7)
print(thread)

SEO Content Generation

SEO-Optimized Article

def generate_seo_article(keyword, secondary_keywords, word_count=1500):
    prompt = f"""Write a comprehensive, SEO-optimized article:

Primary keyword: {keyword}
Secondary keywords: {', '.join(secondary_keywords)}
Word count: {word_count} words

Structure:
1. Compelling H1 title (include primary keyword)
2. Introduction (150 words) - hook + preview
3. 3-5 H2 sections (include secondary keywords)
4. Conclusion with CTA
5. Naturally incorporate all keywords (no keyword stuffing)

Tone: Informative, authoritative, easy to read
"""

    response = client.chat.completions.create(
        model="llama-3-70b-instruct",  # Best quality for long-form
        messages=[
            {"role": "system", "content": "You are an expert SEO content writer."},
            {"role": "user", "content": prompt}
        ],
        temperature=0.7,
        max_tokens=3000
    )

    return response.choices[0].message.content

# Usage
article = generate_seo_article(
    keyword="best project management software",
    secondary_keywords=["team collaboration", "task tracking", "productivity tools"],
    word_count=2000
)
print(article)

Meta Description Generator

def generate_meta_descriptions(pages):
    """Generate SEO meta descriptions for multiple pages"""
    descriptions = []

    for page in pages:
        prompt = f"""Write a compelling meta description for:

Page title: {page['title']}
Primary keyword: {page['keyword']}
Page content summary: {page['summary']}

Requirements:
- 150-160 characters exactly
- Include primary keyword naturally
- Include call-to-action verb
- Make it click-worthy
"""

        response = client.chat.completions.create(
            model="mistral-7b-instruct-v0.3",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.7,
            max_tokens=100
        )

        descriptions.append({
            "page": page['title'],
            "meta_description": response.choices[0].message.content.strip()
        })

    return descriptions

Creative Writing

Story Generator

def generate_story(genre, theme, length="short"):
    word_targets = {"short": 500, "medium": 1500, "long": 3000}
    word_count = word_targets.get(length, 500)

    prompt = f"""Write a {length} {genre} story about: {theme}

Requirements:
- Approximately {word_count} words
- Clear beginning, middle, end
- Compelling characters
- Vivid descriptions
- Emotional impact
"""

    response = client.chat.completions.create(
        model="llama-3-70b-instruct",  # Best for creative writing
        messages=[
            {"role": "system", "content": "You are a creative fiction writer."},
            {"role": "user", "content": prompt}
        ],
        temperature=0.95,  # Maximum creativity
        max_tokens=word_count * 2
    )

    return response.choices[0].message.content

# Usage
story = generate_story(
    genre="science fiction",
    theme="an AI that learns to feel emotions",
    length="medium"
)
print(story)

Batch Content Generation

Efficient Parallelization

import asyncio
from openai import AsyncOpenAI

async_client = AsyncOpenAI(
    api_key="sk-YOUR-API-KEY",
    base_url="https://ai.hiveops.io"
)

async def generate_single_content(prompt, model="llama3:8b-instruct-q8_0"):
    response = await async_client.chat.completions.create(
        model=model,
        messages=[{"role": "user", "content": prompt}]
    )
    return response.choices[0].message.content

async def generate_batch_content(prompts):
    """Generate multiple pieces of content in parallel"""
    tasks = [generate_single_content(prompt) for prompt in prompts]
    results = await asyncio.gather(*tasks)
    return results

# Usage
prompts = [
    "Write a product description for wireless earbuds",
    "Write a product description for smart watch",
    "Write a product description for fitness tracker"
]

results = asyncio.run(generate_batch_content(prompts))
for i, content in enumerate(results):
    print(f"\n=== Product {i+1} ===")
    print(content)

Content Refinement

Multiple Variations

def generate_variations(base_content, num_variations=3):
    """Generate multiple versions of the same content"""
    variations = []

    for i in range(num_variations):
        prompt = f"""Rewrite the following content with a different style/angle:

Original:
{base_content}

Requirements:
- Keep the same key information
- Change the tone/approach (variation {i+1})
- Same approximate length
- Make it fresh and unique
"""

        response = client.chat.completions.create(
            model="gemma-2-9b-it",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.9  # High creativity for variations
        )

        variations.append(response.choices[0].message.content)

    return variations

# Usage
original = "Introducing our new eco-friendly water bottle. Made from recycled materials."
versions = generate_variations(original, num_variations=3)

for i, version in enumerate(versions, 1):
    print(f"\n=== Variation {i} ===")
    print(version)

Content Polishing

def polish_content(draft, improvements="grammar, clarity, engagement"):
    prompt = f"""Improve this content by focusing on: {improvements}

Original content:
{draft}

Tasks:
- Fix grammar/spelling errors
- Improve sentence structure
- Enhance readability
- Make it more engaging
- Keep the same message and tone
"""

    response = client.chat.completions.create(
        model="llama3:8b-instruct-q8_0",
        messages=[{"role": "user", "content": prompt}],
        temperature=0.5  # Lower temp for editing
    )

    return response.choices[0].message.content

Cost Optimization for Content Generation

Model Selection Strategy

Content TypeRecommended ModelCost/1M TokensWhy
Product descriptions (bulk)mistral-7b-instruct-v0.3$0.001/$0.002Highest volume, lowest cost
Social media postsgemma-2-9b-it$0.005/$0.010Good quality, fast
Blog postsllama3:8b-instruct-q8_0$0.01/$0.02Great quality/cost balance
Long-form articlesllama-3-70b-instruct$0.10/$0.20Best quality for premium content

Calculate Content Generation Costs

def estimate_content_cost(content_type, pieces_per_day):
    # Estimated tokens per piece (input + output)
    token_estimates = {
        "product_description": 300,
        "social_post": 200,
        "blog_post": 2500,
        "email": 600
    }

    # Model pricing (input + output average)
    model_costs = {
        "mistral-7b-instruct-v0.3": 0.0015,  # per 1M tokens
        "gemma-2-9b-it": 0.0075,
        "llama3:8b-instruct-q8_0": 0.015,
        "llama-3-70b-instruct": 0.15
    }

    tokens_per_piece = token_estimates.get(content_type, 500)
    total_tokens_per_day = tokens_per_piece * pieces_per_day

    print(f"Content type: {content_type}")
    print(f"Pieces/day: {pieces_per_day}")
    print(f"Tokens/day: {total_tokens_per_day:,}\n")

    for model, cost_per_million in model_costs.items():
        daily_cost = (total_tokens_per_day / 1_000_000) * cost_per_million
        monthly_cost = daily_cost * 30

        print(f"{model}:")
        print(f"  Daily: ${daily_cost:.4f}")
        print(f"  Monthly: ${monthly_cost:.2f}\n")

# Example: Generate 100 product descriptions per day
estimate_content_cost("product_description", pieces_per_day=100)

# Example: Generate 50 blog posts per month
estimate_content_cost("blog_post", pieces_per_day=50/30)

Production Best Practices

1. Content Quality Checks

def generate_with_quality_check(prompt, min_words=500):
    max_attempts = 3

    for attempt in range(max_attempts):
        response = client.chat.completions.create(
            model="llama3:8b-instruct-q8_0",
            messages=[{"role": "user", "content": prompt}]
        )

        content = response.choices[0].message.content
        word_count = len(content.split())

        if word_count >= min_words:
            return content

        print(f"Attempt {attempt+1}: Only {word_count} words, retrying...")

    raise ValueError(f"Failed to generate content with {min_words}+ words")

2. Brand Voice Consistency

BRAND_VOICE = """Brand: TechCorp
Tone: Professional yet approachable
Style: Clear, concise, benefit-focused
Avoid: Jargon, hype, exclamation marks
Voice: Authoritative but friendly"""

def generate_branded_content(content_type, topic):
    system_prompt = f"{BRAND_VOICE}\n\nYou write {content_type} for TechCorp."

    response = client.chat.completions.create(
        model="llama3:8b-instruct-q8_0",
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": f"Write about: {topic}"}
        ]
    )

    return response.choices[0].message.content

3. Content Caching

import hashlib
import json
from pathlib import Path

def cached_generate(prompt, model="llama3:8b-instruct-q8_0", cache_dir="./content_cache"):
    # Create cache key from prompt + model
    cache_key = hashlib.md5(f"{prompt}{model}".encode()).hexdigest()
    cache_file = Path(cache_dir) / f"{cache_key}.json"

    # Check cache
    if cache_file.exists():
        with open(cache_file) as f:
            cached = json.load(f)
            print("Using cached content")
            return cached['content']

    # Generate new content
    response = client.chat.completions.create(
        model=model,
        messages=[{"role": "user", "content": prompt}]
    )

    content = response.choices[0].message.content

    # Save to cache
    cache_file.parent.mkdir(exist_ok=True)
    with open(cache_file, 'w') as f:
        json.dump({'prompt': prompt, 'content': content}, f)

    return content

Real-World Examples

Complete Blog Post Pipeline

class BlogPostGenerator:
    def __init__(self):
        self.client = OpenAI(
            api_key="sk-YOUR-API-KEY",
            base_url="https://ai.hiveops.io"
        )

    def generate_outline(self, topic, keywords):
        prompt = f"""Create a blog post outline for: {topic}

SEO keywords: {', '.join(keywords)}

Include:
- Compelling H1 title
- Introduction hook
- 5-7 H2 section headings
- Conclusion with CTA
"""
        response = self.client.chat.completions.create(
            model="gemma-2-9b-it",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.7
        )
        return response.choices[0].message.content

    def expand_outline(self, outline):
        prompt = f"""Expand this outline into a full 1500-word blog post:

{outline}

Requirements:
- Detailed paragraphs for each section
- Include examples and data
- SEO-friendly but natural
- Engaging and informative
"""
        response = self.client.chat.completions.create(
            model="llama-3-70b-instruct",
            messages=[{"role": "user", "content": prompt}],
            temperature=0.7,
            max_tokens=3000
        )
        return response.choices[0].message.content

    def generate_complete_post(self, topic, keywords):
        print("Generating outline...")
        outline = self.generate_outline(topic, keywords)

        print("Expanding to full post...")
        full_post = self.expand_outline(outline)

        return full_post

# Usage
generator = BlogPostGenerator()
post = generator.generate_complete_post(
    topic="10 Ways to Boost Team Productivity",
    keywords=["remote work", "productivity tools", "team collaboration"]
)
print(post)

Next Steps

Support