| # Integration Examples |
|
|
| This document provides concrete examples of integrating LLMPromptKit into various applications and workflows. |
|
|
| ## Customer Support Chatbot |
|
|
| ### Setup |
|
|
| ```python |
| from llmpromptkit import PromptManager, VersionControl |
| import openai |
| |
| # Initialize components |
| prompt_manager = PromptManager() |
| version_control = VersionControl(prompt_manager) |
| |
| # Create prompt templates for different scenarios |
| greeting_prompt = prompt_manager.create( |
| content="You are a helpful customer service agent for {company_name}. Greet the customer politely.", |
| name="Customer Greeting", |
| tags=["customer-service", "greeting"] |
| ) |
| |
| inquiry_prompt = prompt_manager.create( |
| content=""" |
| You are a helpful customer service agent for {company_name}. |
| Customer inquiry: {customer_message} |
| |
| Based on this inquiry: |
| 1. Identify the main issue |
| 2. Provide a helpful response |
| 3. Offer additional assistance |
| |
| Keep your tone professional but friendly. |
| """, |
| name="Customer Inquiry Response", |
| tags=["customer-service", "inquiry"] |
| ) |
| |
| # Version them |
| version_control.commit(greeting_prompt.id, "Initial version") |
| version_control.commit(inquiry_prompt.id, "Initial version") |
| |
| # OpenAI callback |
| def generate_response(prompt_text): |
| response = openai.ChatCompletion.create( |
| model="gpt-3.5-turbo", |
| messages=[{"role": "user", "content": prompt_text}] |
| ) |
| return response.choices[0].message.content |
| |
| # Main handler function |
| def handle_customer_message(customer_name, message, is_new_conversation): |
| if is_new_conversation: |
| # Use greeting prompt for new conversations |
| prompt = prompt_manager.get(greeting_prompt.id) |
| prompt_text = prompt.render(company_name="Acme Inc.") |
| return generate_response(prompt_text) |
| else: |
| # Use inquiry prompt for ongoing conversations |
| prompt = prompt_manager.get(inquiry_prompt.id) |
| prompt_text = prompt.render( |
| company_name="Acme Inc.", |
| customer_message=message |
| ) |
| return generate_response(prompt_text) |
| ``` |
|
|
| ## Content Generation System |
|
|
| ### Setup |
|
|
| ```python |
| from llmpromptkit import PromptManager, PromptTesting, Evaluator |
| import asyncio |
| |
| # Initialize components |
| prompt_manager = PromptManager("content_system_prompts") |
| testing = PromptTesting(prompt_manager) |
| evaluator = Evaluator(prompt_manager) |
| |
| # Create content generation prompt |
| blog_prompt = prompt_manager.create( |
| content=""" |
| Write a blog post about {topic}. |
| |
| Title: {title} |
| |
| The post should: |
| - Be approximately {word_count} words |
| - Be written in a {tone} tone |
| - Include {num_sections} main sections |
| - Target audience: {audience} |
| - Include a compelling call-to-action at the end |
| |
| Keywords to include: {keywords} |
| """, |
| name="Blog Post Generator", |
| tags=["content", "blog"] |
| ) |
| |
| # Test cases |
| test_case = testing.create_test_case( |
| prompt_id=blog_prompt.id, |
| input_vars={ |
| "topic": "Sustainable Living", |
| "title": "10 Simple Ways to Reduce Your Carbon Footprint", |
| "word_count": "800", |
| "tone": "informative yet casual", |
| "num_sections": "5", |
| "audience": "environmentally-conscious millennials", |
| "keywords": "sustainability, eco-friendly, carbon footprint, climate change, lifestyle changes" |
| } |
| ) |
| |
| # LLM callback |
| async def content_llm_callback(prompt, vars): |
| # Call your preferred LLM API here |
| # This is a placeholder |
| return f"Generated content about {vars.get('topic', 'unknown topic')}" |
| |
| # Content generation function |
| async def generate_content(content_type, parameters): |
| if content_type == "blog": |
| prompt = prompt_manager.get(blog_prompt.id) |
| rendered_prompt = prompt.render(**parameters) |
| |
| # Generate content |
| content = await content_llm_callback(rendered_prompt, parameters) |
| |
| # Evaluate quality |
| evaluation = await evaluator.evaluate_prompt( |
| prompt_id=blog_prompt.id, |
| inputs=[parameters], |
| llm_callback=content_llm_callback |
| ) |
| |
| quality_score = evaluation["aggregated_metrics"].get("length", 0) |
| |
| return { |
| "content": content, |
| "quality_score": quality_score, |
| "metadata": { |
| "prompt_id": blog_prompt.id, |
| "prompt_version": prompt.version, |
| "parameters": parameters |
| } |
| } |
| else: |
| raise ValueError(f"Unsupported content type: {content_type}") |
| ``` |
|
|
| ## AI-Assisted Research Tool |
|
|
| ### Setup |
|
|
| ```python |
| from llmpromptkit import PromptManager, VersionControl |
| import json |
| import openai |
| |
| # Initialize components |
| prompt_manager = PromptManager("research_prompts") |
| version_control = VersionControl(prompt_manager) |
| |
| # Create research prompts |
| article_summary_prompt = prompt_manager.create( |
| content=""" |
| Summarize the following research article: |
| |
| Title: {article_title} |
| Abstract: {article_abstract} |
| |
| Provide a summary that: |
| 1. Identifies the main research question |
| 2. Outlines the methodology |
| 3. Summarizes key findings |
| 4. Highlights limitations |
| 5. Explains the significance of the results |
| |
| Keep the summary concise, approximately 250 words. |
| """, |
| name="Article Summarizer", |
| tags=["research", "summary"] |
| ) |
| |
| research_question_prompt = prompt_manager.create( |
| content=""" |
| Based on the following information: |
| |
| Research Area: {research_area} |
| Existing Knowledge: {existing_knowledge} |
| Observed Gap: {knowledge_gap} |
| |
| Generate 5 potential research questions that: |
| 1. Address the identified knowledge gap |
| 2. Are specific and answerable |
| 3. Have theoretical or practical significance |
| 4. Can be investigated with available research methods |
| """, |
| name="Research Question Generator", |
| tags=["research", "question-generation"] |
| ) |
| |
| # Version control |
| version_control.commit(article_summary_prompt.id, "Initial version") |
| version_control.commit(research_question_prompt.id, "Initial version") |
| |
| # OpenAI callback |
| def research_assistant(prompt_text): |
| response = openai.ChatCompletion.create( |
| model="gpt-4", |
| messages=[{"role": "user", "content": prompt_text}] |
| ) |
| return response.choices[0].message.content |
| |
| # Research functions |
| def summarize_article(article_title, article_abstract): |
| prompt = prompt_manager.get(article_summary_prompt.id) |
| prompt_text = prompt.render( |
| article_title=article_title, |
| article_abstract=article_abstract |
| ) |
| return research_assistant(prompt_text) |
| |
| def generate_research_questions(research_area, existing_knowledge, knowledge_gap): |
| prompt = prompt_manager.get(research_question_prompt.id) |
| prompt_text = prompt.render( |
| research_area=research_area, |
| existing_knowledge=existing_knowledge, |
| knowledge_gap=knowledge_gap |
| ) |
| return research_assistant(prompt_text) |
| |
| # Save results |
| def save_research_data(research_project, data_type, content): |
| # Save the data along with prompt metadata for reproducibility |
| if data_type == "summary": |
| prompt_id = article_summary_prompt.id |
| prompt = prompt_manager.get(prompt_id) |
| elif data_type == "questions": |
| prompt_id = research_question_prompt.id |
| prompt = prompt_manager.get(prompt_id) |
| |
| research_data = { |
| "content": content, |
| "metadata": { |
| "prompt_id": prompt_id, |
| "prompt_version": prompt.version, |
| "timestamp": datetime.datetime.now().isoformat() |
| } |
| } |
| |
| # Save to file (in real application, might save to database) |
| with open(f"{research_project}_{data_type}.json", "w") as f: |
| json.dump(research_data, f, indent=2) |
| ``` |
|
|
| ## Educational Quiz Generator |
|
|
| ### Setup |
|
|
| ```python |
| from llmpromptkit import PromptManager, PromptTemplate |
| import asyncio |
| import aiohttp |
| |
| # Initialize components |
| prompt_manager = PromptManager("education_prompts") |
| |
| # Quiz generation prompt |
| quiz_prompt = prompt_manager.create( |
| content=""" |
| Generate a quiz on the topic of {topic} at a {difficulty_level} difficulty level. |
| |
| The quiz should: |
| - Have {num_questions} multiple-choice questions |
| - Cover the following subtopics: {subtopics} |
| - Include {include_explanation} explanations for the correct answers |
| - Be appropriate for {grade_level} students |
| |
| For each question, provide: |
| 1. The question text |
| 2. Four possible answers (A, B, C, D) |
| 3. The correct answer |
| {if include_explanation == "yes"} |
| 4. An explanation of why the answer is correct |
| {endif} |
| |
| Format the output as valid JSON. |
| """, |
| name="Quiz Generator", |
| tags=["education", "quiz"] |
| ) |
| |
| # Quiz rendering template using advanced templating |
| render_template = PromptTemplate(""" |
| <h1>{quiz_title}</h1> |
| |
| <form id="quiz-form"> |
| {for question in questions} |
| <div class="question"> |
| <p><strong>Question {question.number}:</strong> {question.text}</p> |
| <ul style="list-style-type: none;"> |
| {for option in question.options} |
| <li> |
| <input type="radio" name="q{question.number}" id="q{question.number}_{option.letter}" value="{option.letter}"> |
| <label for="q{question.number}_{option.letter}">{option.letter}. {option.text}</label> |
| </li> |
| {endfor} |
| </ul> |
| |
| {if show_answers} |
| <div class="answer"> |
| <p><strong>Correct Answer:</strong> {question.correct_answer}</p> |
| {if question.has_explanation} |
| <p><strong>Explanation:</strong> {question.explanation}</p> |
| {endif} |
| </div> |
| {endif} |
| </div> |
| {endfor} |
| |
| {if !show_answers} |
| <button type="submit">Submit Quiz</button> |
| {endif} |
| </form> |
| """) |
| |
| # LLM callback |
| async def education_llm_callback(prompt, vars): |
| # This would call your LLM API |
| # Simulated response for this example |
| await asyncio.sleep(1) # Simulate API call |
| if "quiz" in prompt: |
| return """ |
| { |
| "questions": [ |
| { |
| "text": "What is the capital of France?", |
| "options": [ |
| {"letter": "A", "text": "London"}, |
| {"letter": "B", "text": "Berlin"}, |
| {"letter": "C", "text": "Paris"}, |
| {"letter": "D", "text": "Madrid"} |
| ], |
| "correct_answer": "C", |
| "explanation": "Paris is the capital and most populous city of France." |
| }, |
| { |
| "text": "Who wrote 'Romeo and Juliet'?", |
| "options": [ |
| {"letter": "A", "text": "Charles Dickens"}, |
| {"letter": "B", "text": "William Shakespeare"}, |
| {"letter": "C", "text": "Jane Austen"}, |
| {"letter": "D", "text": "Mark Twain"} |
| ], |
| "correct_answer": "B", |
| "explanation": "William Shakespeare wrote 'Romeo and Juliet' around 1594-1596." |
| } |
| ] |
| } |
| """ |
| return "Default response" |
| |
| # Quiz generation function |
| async def generate_quiz(topic, difficulty, num_questions, grade_level, include_explanations=True): |
| prompt = prompt_manager.get(quiz_prompt.id) |
| rendered_prompt = prompt.render( |
| topic=topic, |
| difficulty_level=difficulty, |
| num_questions=num_questions, |
| subtopics=", ".join(["key concepts", "historical context", "practical applications"]), |
| include_explanation="yes" if include_explanations else "no", |
| grade_level=grade_level |
| ) |
| |
| # Get quiz content from LLM |
| quiz_json = await education_llm_callback(rendered_prompt, {}) |
| |
| # Parse JSON |
| quiz_data = json.loads(quiz_json) |
| |
| # Prepare data for HTML template |
| template_data = { |
| "quiz_title": f"{topic} Quiz ({difficulty} Level)", |
| "questions": [], |
| "show_answers": False |
| } |
| |
| # Format questions |
| for i, q in enumerate(quiz_data["questions"]): |
| question = { |
| "number": i + 1, |
| "text": q["text"], |
| "options": q["options"], |
| "correct_answer": q["correct_answer"], |
| "has_explanation": "explanation" in q, |
| "explanation": q.get("explanation", "") |
| } |
| template_data["questions"].append(question) |
| |
| # Render HTML |
| return render_template.render(**template_data) |
| ``` |
|
|
| ## Automated Coding Assistant |
|
|
| ### Setup |
|
|
| ```python |
| from llmpromptkit import PromptManager, PromptTesting |
| import asyncio |
| import subprocess |
| import tempfile |
| |
| # Initialize components |
| prompt_manager = PromptManager("coding_prompts") |
| testing = PromptTesting(prompt_manager) |
| |
| # Create code generation prompts |
| function_prompt = prompt_manager.create( |
| content=""" |
| Write a {language} function that solves the following problem: |
| |
| {problem_description} |
| |
| Function signature: {function_signature} |
| |
| Requirements: |
| - The function should handle edge cases |
| - Include appropriate comments |
| - Follow {language} best practices |
| - Be optimized for {optimization_goal} |
| |
| {if include_tests == "yes"} |
| Also include unit tests for the function. |
| {endif} |
| """, |
| name="Function Generator", |
| tags=["coding", "function"] |
| ) |
| |
| bug_fix_prompt = prompt_manager.create( |
| content=""" |
| Debug the following {language} code which has an issue: |
| |
| ```{language} |
| {buggy_code} |
| ``` |
| |
| Error message or problem description: |
| {error_description} |
|
|
| Please: |
| 1. Identify the issue |
| 2. Explain the root cause |
| 3. Provide a fixed version of the code |
| 4. Suggest how to prevent similar issues |
| """, |
| name="Bug Fix Assistant", |
| tags=["coding", "debugging"] |
| ) |
| |
| # LLM callback |
| async def coding_llm_callback(prompt, vars): |
| # This would call your LLM API |
| # Simplified example response |
| await asyncio.sleep(1) |
| |
| if "function" in prompt: |
| return """ |
| ```python |
| def find_max_subarray_sum(arr): |
| """ |
| Finds the maximum sum of any contiguous subarray. |
| Uses Kadane's algorithm with O(n) time complexity. |
| |
| Args: |
| arr: List of integers |
| Returns: |
| Maximum subarray sum |
| """ |
| if not arr: |
| return 0 |
| |
| current_max = global_max = arr[0] |
| |
| for num in arr[1:]: |
| current_max = max(num, current_max + num) |
| global_max = max(global_max, current_max) |
| |
| return global_max |
| |
| # Unit tests |
| def test_find_max_subarray_sum(): |
| assert find_max_subarray_sum([]) == 0 |
| assert find_max_subarray_sum([-2, 1, -3, 4, -1, 2, 1, -5, 4]) == 6 |
| assert find_max_subarray_sum([-1, -2, -3]) == -1 |
| print("All tests passed!") |
| ``` |
| """ |
| elif "debug" in prompt: |
| return """ |
| The issue is a classic off-by-one error in the loop boundary. |
| |
| Root cause: |
| The loop is using `i <= len(arr)` which accesses an index that's out of bounds. |
| |
| Fixed code: |
| ```python |
| def process_array(arr): |
| result = [] |
| for i in range(len(arr)): # Changed from i <= len(arr) |
| result.append(arr[i] * 2) |
| return result |
| ``` |
| |
| Prevention: |
| - Remember that array indices are 0-based and go up to len(arr)-1 |
| - Use range() or enumerate() when iterating through arrays by index |
| - Add bounds checking for critical operations |
| """ |
| |
| return "Default response" |
| |
| # Function to test generated code |
| def test_generated_code(code, language): |
| """Test the generated code by running it in a safe environment.""" |
| if language.lower() == "python": |
| with tempfile.NamedTemporaryFile(suffix=".py") as temp: |
| temp.write(code.encode()) |
| temp.flush() |
| |
| try: |
| result = subprocess.run(["python", temp.name], |
| capture_output=True, |
| text=True, |
| timeout=5) |
| if result.returncode == 0: |
| return {"success": True, "output": result.stdout} |
| else: |
| return {"success": False, "error": result.stderr} |
| except subprocess.TimeoutExpired: |
| return {"success": False, "error": "Code execution timed out"} |
| |
| return {"success": False, "error": f"Testing not implemented for {language}"} |
| |
| # Main coding assistant function |
| async def generate_function(problem, language="python", optimization_goal="readability", include_tests=True): |
| function_name = problem.lower().replace(" ", "_").replace("-", "_") |
| signature = f"def {function_name}(parameters):" |
| |
| prompt = prompt_manager.get(function_prompt.id) |
| rendered_prompt = prompt.render( |
| language=language, |
| problem_description=problem, |
| function_signature=signature, |
| optimization_goal=optimization_goal, |
| include_tests="yes" if include_tests else "no" |
| ) |
| |
| # Get code from LLM |
| generated_code = await coding_llm_callback(rendered_prompt, {}) |
| |
| # Extract code from markdown if present |
| if "```" in generated_code: |
| code_blocks = re.findall(r"```(?:\w+)?\n(.+?)```", generated_code, re.DOTALL) |
| if code_blocks: |
| clean_code = code_blocks[0] |
| else: |
| clean_code = generated_code |
| else: |
| clean_code = generated_code |
| |
| # Test the code |
| test_result = test_generated_code(clean_code, language) |
| |
| return { |
| "code": clean_code, |
| "test_result": test_result, |
| "prompt_id": function_prompt.id |
| } |
| ``` |
| |