Prompt Templates

Build reusable, maintainable prompt templates with dynamic variable substitution for consistent LLM interactions.

Why Use Prompt Templates?

Prompt templates help you create consistent, reusable prompts by separating the structure from the data. Benefits include:

  • Consistency: Ensure prompts follow the same format across your application
  • Maintainability: Update prompt structure in one place
  • Reusability: Use the same template with different data
  • Clarity: Separate prompt logic from business logic
  • Testing: Easily test different prompt variations

Template Types

SimplePrompt

Single variable substitution

Best for: Simple prompts with one dynamic value

MultiValuePrompt

Multiple variable substitution

Best for: Complex prompts with multiple dynamic values

SimplePrompt

Use SimplePrompt when you have a single dynamic value to insert:

from SimplerLLM.prompts.prompt_builder import SimplePrompt
from SimplerLLM.language.llm import LLM, LLMProvider

# Create a simple template
template = SimplePrompt(
    template_text="Explain the concept of {topic} in simple terms."
)

# Generate prompt with a value
prompt = template.build_prompt(topic="machine learning")
print(prompt)
# Output: "Explain the concept of machine learning in simple terms."

# Use with LLM
llm = LLM.create(provider=LLMProvider.OPENAI, model_name="gpt-4o")
response = llm.generate_response(prompt=prompt)
print(response)

Multiple Uses of Same Template

from SimplerLLM.prompts.prompt_builder import SimplePrompt

# Create template once
explainer = SimplePrompt(
    template_text="Explain {topic} to a 10-year-old child."
)

# Use with different topics
topics = ["quantum computing", "photosynthesis", "blockchain"]

for topic in topics:
    prompt = explainer.build_prompt(topic=topic)
    response = llm.generate_response(prompt=prompt)
    print(f"\n{topic.upper()}:")
    print(response)

MultiValuePrompt

Use MultiValuePrompt for templates with multiple dynamic values:

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

# Create template with multiple variables
template = MultiValuePrompt(
    template_text="""You are a {role} assistant.

Task: {task}

Context:
{context}

Please provide a {tone} response."""
)

# Build prompt with multiple values
prompt = template.build_prompt({
    'role': 'customer support',
    'task': 'Help the customer troubleshoot their login issue',
    'context': 'Customer cannot access their account after password reset',
    'tone': 'friendly and helpful'
})

print(prompt)

# Use with LLM
response = llm.generate_response(prompt=prompt)

Dictionary vs Keyword Arguments

You can pass values as a dictionary or keyword arguments:

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

template = MultiValuePrompt(
    template_text="Write a {length} {content_type} about {subject}."
)

# Using dictionary
values = {
    'length': 'short',
    'content_type': 'blog post',
    'subject': 'artificial intelligence'
}
prompt1 = template.build_prompt(values)

# Using keyword arguments
prompt2 = template.build_prompt(
    length='long',
    content_type='article',
    subject='machine learning'
)

Real-World Examples

Content Generation System

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt
from SimplerLLM.language.llm import LLM, LLMProvider

class ContentGenerator:
    def __init__(self):
        self.llm = LLM.create(provider=LLMProvider.OPENAI, model_name="gpt-4o")

        # Blog post template
        self.blog_template = MultiValuePrompt(
            template_text="""Write a {tone} blog post about {topic}.

Target Audience: {audience}
Word Count: Approximately {word_count} words
Key Points to Cover:
{key_points}

Include:
- Engaging introduction
- Clear explanations
- Practical examples
- Strong conclusion"""
        )

        # Social media template
        self.social_template = MultiValuePrompt(
            template_text="""Create a {platform} post about {topic}.

Tone: {tone}
Include: {elements}
Character limit: {limit}"""
        )

    def generate_blog(self, topic, audience, tone="professional", word_count=500):
        prompt = self.blog_template.build_prompt({
            'topic': topic,
            'audience': audience,
            'tone': tone,
            'word_count': word_count,
            'key_points': '- Main concept\n- Benefits\n- Implementation tips'
        })
        return self.llm.generate_response(prompt=prompt)

    def generate_social_post(self, topic, platform="Twitter", tone="casual"):
        prompt = self.social_template.build_prompt({
            'topic': topic,
            'platform': platform,
            'tone': tone,
            'elements': 'hashtags and emoji',
            'limit': '280 characters' if platform == 'Twitter' else 'no limit'
        })
        return self.llm.generate_response(prompt=prompt)

# Usage
generator = ContentGenerator()

# Generate blog post
blog = generator.generate_blog(
    topic="SimplerLLM library",
    audience="Python developers",
    tone="friendly and informative"
)
print("BLOG POST:")
print(blog)

# Generate social post
tweet = generator.generate_social_post(
    topic="New SimplerLLM release",
    platform="Twitter"
)
print("\nTWEET:")
print(tweet)

Customer Support System

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

class SupportResponseGenerator:
    def __init__(self, llm_instance):
        self.llm = llm_instance

        # Template for different support scenarios
        self.support_template = MultiValuePrompt(
            template_text="""You are a {company_name} customer support agent.

Customer Issue Category: {category}
Customer Message: "{customer_message}"

Previous Context:
{context}

Instructions:
- Be {tone}
- {special_instructions}
- Include {includes}

Generate a response:"""
        )

    def generate_response(self, customer_message, category, context="None"):
        # Customize based on category
        tone_map = {
            'billing': 'apologetic and solution-focused',
            'technical': 'patient and detailed',
            'general': 'friendly and helpful'
        }

        special_instructions_map = {
            'billing': 'Offer to escalate to billing team if needed',
            'technical': 'Provide step-by-step troubleshooting',
            'general': 'Answer thoroughly and offer additional help'
        }

        prompt = self.support_template.build_prompt({
            'company_name': 'SimplerLLM',
            'category': category,
            'customer_message': customer_message,
            'context': context,
            'tone': tone_map.get(category, 'professional'),
            'special_instructions': special_instructions_map.get(category, 'Be helpful'),
            'includes': 'next steps and contact information'
        })

        return self.llm.generate_response(prompt=prompt)

# Usage
support = SupportResponseGenerator(llm)

response = support.generate_response(
    customer_message="I can't log into my account after changing my password",
    category="technical",
    context="Customer successfully reset password 10 minutes ago"
)
print(response)

Data Analysis Assistant

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

class DataAnalysisAssistant:
    def __init__(self, llm_instance):
        self.llm = llm_instance

        self.analysis_template = MultiValuePrompt(
            template_text="""Analyze the following {data_type} data:

Data:
{data}

Analysis Type: {analysis_type}
Focus Areas: {focus_areas}

Provide:
1. Key insights
2. Trends and patterns
3. {additional_request}
4. Recommendations

Format the response as {format}."""
        )

    def analyze(self, data, data_type, analysis_type, focus_areas, format="markdown"):
        prompt = self.analysis_template.build_prompt({
            'data': data,
            'data_type': data_type,
            'analysis_type': analysis_type,
            'focus_areas': focus_areas,
            'additional_request': 'Statistical summary',
            'format': format
        })

        return self.llm.generate_response(prompt=prompt)

# Usage
analyst = DataAnalysisAssistant(llm)

sales_data = """
Q1 2024: $120,000
Q2 2024: $145,000
Q3 2024: $138,000
Q4 2024: $162,000
"""

analysis = analyst.analyze(
    data=sales_data,
    data_type="quarterly sales",
    analysis_type="trend analysis",
    focus_areas="growth rate, seasonality, forecast",
    format="bullet points"
)
print(analysis)

Template Management

Storing Templates

Store templates in a centralized location for easy management:

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

class PromptLibrary:
    def __init__(self):
        self.templates = {
            'summarize': MultiValuePrompt(
                template_text="Summarize the following {content_type} in {length}: {content}"
            ),
            'translate': MultiValuePrompt(
                template_text="Translate the following text from {source_lang} to {target_lang}: {text}"
            ),
            'explain': MultiValuePrompt(
                template_text="Explain {concept} to someone with {expertise_level} expertise in {field}"
            ),
            'review': MultiValuePrompt(
                template_text="Review this {item_type} and provide {review_type} feedback: {content}"
            )
        }

    def get_template(self, name):
        return self.templates.get(name)

    def add_template(self, name, template):
        self.templates[name] = template

# Usage
library = PromptLibrary()

# Use existing template
summarize_template = library.get_template('summarize')
prompt = summarize_template.build_prompt({
    'content_type': 'article',
    'length': '3 sentences',
    'content': 'Long article text here...'
})

# Add new template
library.add_template(
    'code_review',
    MultiValuePrompt(
        template_text="Review this {language} code for {focus}: {code}"
    )
)

Loading Templates from Files

import json
from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

def load_templates_from_json(file_path):
    """Load prompt templates from JSON file"""
    with open(file_path, 'r') as f:
        template_data = json.load(f)

    templates = {}
    for name, text in template_data.items():
        templates[name] = MultiValuePrompt(template_text=text)

    return templates

# templates.json:
# {
#   "email_draft": "Write a {tone} email to {recipient} about {subject}...",
#   "summary": "Summarize {content} in {length}...",
#   ...
# }

templates = load_templates_from_json('templates.json')
email_template = templates['email_draft']

Best Practices

1. Use Descriptive Variable Names

Choose clear variable names that indicate their purpose: {target_audience} instead of {var1}

2. Include Instructions in Templates

Add clear instructions and constraints within the template for consistent results.

3. Version Your Templates

Keep track of template versions to maintain consistency and enable rollbacks if needed.

4. Test with Different Inputs

Validate templates with various input combinations to ensure they work correctly.

5. Separate Structure from Content

Keep template structure (instructions, format) separate from dynamic content (data, values).

Advanced Patterns

Conditional Template Building

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

class AdaptivePromptBuilder:
    def build_analysis_prompt(self, data, include_charts=False, detailed=True):
        base_template = "Analyze this data: {data}\n\n"

        if detailed:
            base_template += "Provide detailed insights including:\n"
            base_template += "- Statistical analysis\n"
            base_template += "- Trends and patterns\n"
            base_template += "- Recommendations\n"
        else:
            base_template += "Provide a brief summary.\n"

        if include_charts:
            base_template += "\nSuggest visualizations for the data."

        template = MultiValuePrompt(template_text=base_template)
        return template.build_prompt({'data': data})

# Usage
builder = AdaptivePromptBuilder()

# Detailed analysis with charts
prompt1 = builder.build_analysis_prompt(
    data="Sales data...",
    include_charts=True,
    detailed=True
)

# Brief summary without charts
prompt2 = builder.build_analysis_prompt(
    data="Sales data...",
    include_charts=False,
    detailed=False
)

Template Chaining

from SimplerLLM.prompts.prompt_builder import SimplePrompt

class PromptChain:
    def __init__(self, llm_instance):
        self.llm = llm_instance

    def research_and_write(self, topic):
        # Step 1: Research
        research_template = SimplePrompt(
            template_text="List 5 key facts about {topic}. Format as bullet points."
        )
        research_prompt = research_template.build_prompt(topic=topic)
        facts = self.llm.generate_response(prompt=research_prompt)

        # Step 2: Write using research
        write_template = MultiValuePrompt(
            template_text="""Using these facts about {topic}:

{facts}

Write a {length} {content_type} that is {tone}."""
        )
        write_prompt = write_template.build_prompt({
            'topic': topic,
            'facts': facts,
            'length': 'short',
            'content_type': 'article',
            'tone': 'engaging and informative'
        })
        article = self.llm.generate_response(prompt=write_prompt)

        return article

# Usage
chain = PromptChain(llm)
article = chain.research_and_write("renewable energy")

Error Handling

from SimplerLLM.prompts.prompt_builder import MultiValuePrompt

template = MultiValuePrompt(
    template_text="Explain {concept} in {detail_level} detail."
)

try:
    # Build prompt
    prompt = template.build_prompt({
        'concept': 'neural networks',
        'detail_level': 'simple'
    })
    print(prompt)

    # Use with LLM
    response = llm.generate_response(prompt=prompt)
    print(response)

except KeyError as e:
    print(f"Missing required template variable: {e}")

except ValueError as e:
    print(f"Invalid template value: {e}")

except Exception as e:
    print(f"Error building prompt: {e}")

Next Steps

💡 Pro Tip

Combine prompt templates with other SimplerLLM features like ReliableLLM for automatic failover and structured output for validated responses. This creates robust, production-ready AI applications.