Dify-Learning / content /tutorials.py
AtZa
Upload 16 files
7cb4836 verified
"""
Tutorial content for the Dify AI Learning Platform
Based on official Dify documentation and best practices
"""
def get_all_tutorials():
"""Return all available tutorials in learning order"""
return [
{
'id': 1,
'title': 'Getting Started with Dify',
'slug': 'getting-started-with-dify',
'category': 'Fundamentals',
'difficulty': 'beginner',
'difficulty_color': 'success',
'order_index': 1,
'estimated_time': 15,
'description': 'Learn the basics of Dify AI platform, how to access it, and understand core concepts like LLMs, embeddings, and context windows.',
'total_steps': 4
},
{
'id': 2,
'title': 'Building Your First Chatbot',
'slug': 'building-your-first-chatbot',
'category': 'Chatbots',
'difficulty': 'beginner',
'difficulty_color': 'success',
'order_index': 2,
'estimated_time': 25,
'description': 'Create a simple AI chatbot using Dify\'s visual interface. Learn about prompts, models, and basic conversation flow.',
'total_steps': 6
},
{
'id': 3,
'title': 'Creating Knowledge Bases with RAG',
'slug': 'creating-knowledge-bases-rag',
'category': 'Knowledge Management',
'difficulty': 'intermediate',
'difficulty_color': 'warning',
'order_index': 3,
'estimated_time': 35,
'description': 'Build intelligent chatbots that can answer questions from your documents using Retrieval-Augmented Generation (RAG).',
'total_steps': 8
},
{
'id': 4,
'title': 'Mastering Prompt Engineering',
'slug': 'mastering-prompt-engineering',
'category': 'Prompt Engineering',
'difficulty': 'intermediate',
'difficulty_color': 'warning',
'order_index': 4,
'estimated_time': 30,
'description': 'Learn advanced prompt engineering techniques to get better results from your AI applications.',
'total_steps': 7
},
{
'id': 5,
'title': 'Building AI Agents',
'slug': 'building-ai-agents',
'category': 'AI Agents',
'difficulty': 'intermediate',
'difficulty_color': 'warning',
'order_index': 5,
'estimated_time': 40,
'description': 'Create intelligent AI agents that can use tools, make decisions, and handle complex multi-step tasks.',
'total_steps': 9
},
{
'id': 6,
'title': 'Workflow Orchestration',
'slug': 'workflow-orchestration',
'category': 'Workflows',
'difficulty': 'advanced',
'difficulty_color': 'danger',
'order_index': 6,
'estimated_time': 45,
'description': 'Master Dify\'s workflow system to create complex AI applications with conditional logic and parallel processing.',
'total_steps': 10
},
{
'id': 7,
'title': 'Tool Integration and APIs',
'slug': 'tool-integration-apis',
'category': 'Integration',
'difficulty': 'advanced',
'difficulty_color': 'danger',
'order_index': 7,
'estimated_time': 35,
'description': 'Connect your AI applications to external services and APIs to extend their capabilities.',
'total_steps': 8
},
{
'id': 8,
'title': 'Deployment and Monitoring',
'slug': 'deployment-monitoring',
'category': 'Operations',
'difficulty': 'advanced',
'difficulty_color': 'danger',
'order_index': 8,
'estimated_time': 30,
'description': 'Learn how to deploy your Dify applications to production and monitor their performance.',
'total_steps': 6
}
]
def get_tutorial_content(slug):
"""Return detailed content for a specific tutorial"""
tutorials = {
'getting-started-with-dify': {
'id': 1,
'title': 'Getting Started with Dify',
'category': 'Fundamentals',
'difficulty': 'beginner',
'difficulty_color': 'success',
'estimated_time': 15,
'description': 'Learn the basics of Dify AI platform, how to access it, and understand core concepts like LLMs, embeddings, and context windows.',
'steps': [
{
'title': 'What is Dify?',
'estimated_time': 3,
'content': '''
<p>Dify is an open-source, user-friendly LLMOps (Large Language Model Operations) platform designed to simplify and accelerate the development, deployment, and management of AI applications.</p>
<h4>Key Features:</h4>
<ul>
<li><strong>Visual AI Orchestration:</strong> Build AI applications using a drag-and-drop interface</li>
<li><strong>No-Code/Low-Code:</strong> Perfect for beginners with no programming experience</li>
<li><strong>RAG Engine:</strong> Built-in Retrieval-Augmented Generation for knowledge-based applications</li>
<li><strong>AI Agent Framework:</strong> Create intelligent agents that can use tools and make decisions</li>
<li><strong>Model Support:</strong> Works with all mainstream LLMs including GPT, Claude, and open-source models</li>
</ul>
<h4>Why Choose Dify?</h4>
<p>Unlike other AI tools that offer individual components, Dify provides a comprehensive, production-ready solution. Think of it as a well-designed scaffolding system that handles the complexity while you focus on creating innovative AI applications.</p>
''',
'tips': [
'Dify is completely open-source and free to use',
'You can use either the cloud version at dify.ai or deploy locally',
'No coding experience required to get started'
]
},
{
'title': 'Accessing Dify',
'estimated_time': 4,
'content': '''
<p>There are two ways to access Dify:</p>
<h4>Cloud Version (Recommended for Beginners)</h4>
<ol>
<li>Go to <a href="https://dify.ai" target="_blank">dify.ai</a></li>
<li>Sign up for a free account</li>
<li>Verify your email address</li>
<li>Start building immediately</li>
</ol>
<p>The cloud version includes free usage quotas for popular AI models, making it perfect for learning and experimentation.</p>
<h4>Local Installation (For Advanced Users)</h4>
<ol>
<li>Clone the repository from <a href="https://github.com/langgenius/dify" target="_blank">GitHub</a></li>
<li>Install Docker and Docker Compose</li>
<li>Run <code>docker compose up -d</code> in the dify/docker directory</li>
<li>Access at <code>http://localhost/install</code></li>
</ol>
''',
'tips': [
'Start with the cloud version for easier setup',
'Local installation gives you full control over data',
'Both versions have the same features and capabilities'
]
},
{
'title': 'Understanding Core Concepts',
'estimated_time': 5,
'content': '''
<p>Before building your first AI application, it's important to understand these key concepts:</p>
<h4>Large Language Models (LLMs)</h4>
<p>LLMs are AI models trained on vast amounts of text data. They can understand and generate human-like text. Popular LLMs include:</p>
<ul>
<li><strong>OpenAI GPT-4:</strong> Excellent for complex reasoning and creative tasks</li>
<li><strong>Claude:</strong> Great for analysis and following instructions</li>
<li><strong>Open-source models:</strong> Cost-effective alternatives for specific use cases</li>
</ul>
<h4>Context Window</h4>
<p>The context window is the amount of text an LLM can "see" and "remember" at once. A larger context window allows the model to consider more information when generating responses, leading to more accurate and coherent outputs.</p>
<h4>Embeddings</h4>
<p>Embeddings convert text into numerical vectors that capture semantic meaning. Similar texts have similar vectors, enabling AI to understand relationships between different pieces of content. This is crucial for search and retrieval functions.</p>
<h4>Tokens</h4>
<p>Tokens are the basic units that LLMs process - roughly equivalent to words or parts of words. Understanding tokens helps you manage costs and context limits.</p>
''',
'tips': [
'Different LLMs have different strengths - experiment to find the best fit',
'Larger context windows cost more but provide better results for complex tasks',
'Embeddings enable semantic search, not just keyword matching'
]
},
{
'title': 'Exploring the Dify Interface',
'estimated_time': 3,
'content': '''
<p>Once you're logged into Dify, you'll see a clean, intuitive dashboard with several key sections:</p>
<h4>Main Navigation</h4>
<ul>
<li><strong>Studio:</strong> Where you build and manage your AI applications</li>
<li><strong>Knowledge:</strong> Manage your knowledge bases and documents</li>
<li><strong>Tools:</strong> Configure external tools and integrations</li>
<li><strong>Datasets:</strong> Manage your training data and examples</li>
</ul>
<h4>Application Types</h4>
<p>Dify supports several types of AI applications:</p>
<ul>
<li><strong>Chatflow:</strong> Conversational applications with complex workflows</li>
<li><strong>Assistant:</strong> AI agents that can use tools and external services</li>
<li><strong>Completion:</strong> Text generation and completion applications</li>
</ul>
<h4>Quick Actions</h4>
<ul>
<li><strong>Create from Blank:</strong> Start with a clean slate</li>
<li><strong>Use Template:</strong> Begin with pre-built examples</li>
<li><strong>Import:</strong> Bring in existing configurations</li>
</ul>
''',
'tips': [
'Spend time exploring the interface before building your first app',
'Templates are great for learning best practices',
'The preview feature lets you test applications before publishing'
]
}
]
},
'building-your-first-chatbot': {
'id': 2,
'title': 'Building Your First Chatbot',
'category': 'Chatbots',
'difficulty': 'beginner',
'difficulty_color': 'success',
'estimated_time': 25,
'description': 'Create a simple AI chatbot using Dify\'s visual interface. Learn about prompts, models, and basic conversation flow.',
'steps': [
{
'title': 'Setting Up Your Model Provider',
'estimated_time': 5,
'content': '''
<p>Before creating your chatbot, you need to configure an AI model provider. Dify provides free usage quotas, but you'll eventually need your own API keys.</p>
<h4>Using the Free Quota</h4>
<p>New Dify users get:</p>
<ul>
<li>200 free OpenAI GPT messages</li>
<li>1000 free Anthropic Claude messages</li>
</ul>
<h4>Adding Your Own API Key</h4>
<ol>
<li>Go to Settings → Model Provider</li>
<li>Click on your preferred provider (e.g., OpenAI)</li>
<li>Click "Setup" and enter your API key</li>
<li>Test the connection and save</li>
</ol>
<h4>Getting API Keys</h4>
<ul>
<li><strong>OpenAI:</strong> Visit <a href="https://platform.openai.com/api-keys" target="_blank">platform.openai.com/api-keys</a></li>
<li><strong>Anthropic:</strong> Visit <a href="https://console.anthropic.com/" target="_blank">console.anthropic.com</a></li>
<li><strong>Other providers:</strong> Check their respective documentation</li>
</ul>
''',
'tips': [
'Start with the free quota to learn the basics',
'OpenAI GPT-4 is great for general-purpose chatbots',
'Keep your API keys secure and never share them publicly'
]
},
{
'title': 'Creating Your First Chatflow',
'estimated_time': 4,
'content': '''
<p>Let's create a simple chatbot that can have basic conversations with users.</p>
<h4>Step-by-Step Creation</h4>
<ol>
<li>Click <strong>"Create from Blank"</strong> in the Studio</li>
<li>Select <strong>"Chatflow"</strong> as the application type</li>
<li>Give your app a name (e.g., "My First Chatbot")</li>
<li>Click <strong>"Create"</strong></li>
</ol>
<h4>Understanding the Default Flow</h4>
<p>Your new chatflow will have three basic nodes:</p>
<ul>
<li><strong>Start Node:</strong> Triggers when a user sends a message</li>
<li><strong>LLM Node:</strong> Processes the user's message with AI</li>
<li><strong>Answer Node:</strong> Sends the response back to the user</li>
</ul>
<p>This simple flow creates a basic ChatGPT-like experience where users can ask questions and get AI-generated responses.</p>
''',
'interactive_demo': '''
<div class="demo-container" data-demo-type="workflow-builder">
Interactive workflow demo will appear here
</div>
''',
'tips': [
'The default three-node setup is perfect for simple conversational AI',
'You can always add more complexity later',
'Give your applications descriptive names for easy management'
]
},
{
'title': 'Configuring Your LLM',
'estimated_time': 6,
'content': '''
<p>The LLM node is the brain of your chatbot. Let's configure it properly.</p>
<h4>Selecting a Model</h4>
<ol>
<li>Click on the LLM node to open its settings</li>
<li>Choose your model (GPT-4 is recommended for beginners)</li>
<li>Adjust the temperature (0.7 is a good starting point)</li>
<li>Set the max tokens (1000-2000 for most conversations)</li>
</ol>
<h4>Model Parameters Explained</h4>
<ul>
<li><strong>Temperature (0-1):</strong> Controls creativity. Lower = more focused, Higher = more creative</li>
<li><strong>Max Tokens:</strong> Maximum length of the response</li>
<li><strong>Top P:</strong> Controls diversity of word choices</li>
<li><strong>Presence Penalty:</strong> Reduces repetitive responses</li>
</ul>
<h4>Writing Your First Prompt</h4>
<p>In the "Prompt" section, write instructions for your AI:</p>
<div class="code-example">
<pre><code>You are a helpful AI assistant. Your goal is to provide accurate, helpful, and friendly responses to users' questions.
Please follow these guidelines:
- Be conversational and approachable
- Provide clear and concise answers
- Ask clarifying questions when needed
- Admit when you don't know something
Respond to the user's message: {{sys.query}}</code></pre>
</div>
''',
'tips': [
'Start with a temperature of 0.7 for balanced responses',
'Clear prompts lead to better AI behavior',
'Use {{sys.query}} to reference the user\'s input'
]
},
{
'title': 'Testing Your Chatbot',
'estimated_time': 4,
'content': '''
<p>Before publishing, it's crucial to test your chatbot thoroughly.</p>
<h4>Using the Preview Feature</h4>
<ol>
<li>Click the <strong>"Preview"</strong> button in the top-right corner</li>
<li>A chat interface will open on the right side</li>
<li>Type a test message and press Enter</li>
<li>Observe the AI's response</li>
</ol>
<h4>Test Cases to Try</h4>
<ul>
<li><strong>Simple greeting:</strong> "Hello, how are you?"</li>
<li><strong>Question:</strong> "What is artificial intelligence?"</li>
<li><strong>Follow-up:</strong> Ask related questions to test conversation flow</li>
<li><strong>Edge cases:</strong> Empty messages, very long messages, special characters</li>
</ul>
<h4>Debugging Common Issues</h4>
<ul>
<li><strong>No response:</strong> Check your model provider setup</li>
<li><strong>Poor responses:</strong> Refine your prompt</li>
<li><strong>Too long/short:</strong> Adjust max tokens</li>
<li><strong>Too creative/rigid:</strong> Adjust temperature</li>
</ul>
''',
'interactive_demo': '''
<div class="demo-container" data-demo-type="chatbot-preview">
Interactive chatbot demo will appear here
</div>
''',
'tips': [
'Test with various types of questions and conversation styles',
'Pay attention to response quality and consistency',
'Make note of any improvements needed before publishing'
]
},
{
'title': 'Adding Personality and Features',
'estimated_time': 4,
'content': '''
<p>Make your chatbot more engaging by adding personality and useful features.</p>
<h4>Enhancing Your Prompt</h4>
<p>Add personality to your chatbot with a more detailed prompt:</p>
<div class="code-example">
<pre><code>You are Alex, a friendly and knowledgeable AI assistant with a passion for helping people learn and solve problems.
Your personality traits:
- Enthusiastic about technology and learning
- Patient and encouraging
- Uses occasional emojis to be more approachable
- Provides examples when explaining concepts
Your expertise includes:
- General knowledge and current events
- Technology and AI concepts
- Problem-solving strategies
- Creative thinking
Always aim to:
1. Understand the user's needs fully
2. Provide helpful and accurate information
3. Encourage further learning and exploration
4. Maintain a positive and supportive tone
User message: {{sys.query}}</code></pre>
</div>
<h4>Adding Features</h4>
<p>Click "Add Feature" to enable:</p>
<ul>
<li><strong>Opening Statement:</strong> Greet users when they start chatting</li>
<li><strong>Next Question Suggestions:</strong> Provide follow-up question ideas</li>
<li><strong>Speech to Text:</strong> Allow voice input</li>
<li><strong>Citation and Attribution:</strong> Show sources for responses</li>
</ul>
''',
'tips': [
'A good personality makes interactions more enjoyable',
'Opening statements set the right expectations',
'Question suggestions help users explore your chatbot\'s capabilities'
]
},
{
'title': 'Publishing and Sharing',
'estimated_time': 2,
'content': '''
<p>Once you're satisfied with your chatbot, it's time to publish and share it.</p>
<h4>Publishing Your Chatbot</h4>
<ol>
<li>Click the <strong>"Publish"</strong> button</li>
<li>Review your settings one final time</li>
<li>Click <strong>"Update"</strong> to make it live</li>
</ol>
<h4>Sharing Options</h4>
<p>After publishing, you can share your chatbot in several ways:</p>
<ul>
<li><strong>Public Link:</strong> Share a direct URL to your chatbot</li>
<li><strong>Embed Code:</strong> Add it to your website with HTML/iframe</li>
<li><strong>API Access:</strong> Integrate with your applications via REST API</li>
</ul>
<h4>Monitoring Usage</h4>
<p>Use the built-in analytics to track:</p>
<ul>
<li>Number of conversations</li>
<li>User satisfaction ratings</li>
<li>Most common questions</li>
<li>Response times and costs</li>
</ul>
<h4>Next Steps</h4>
<p>Congratulations! You've built your first AI chatbot. Consider:</p>
<ul>
<li>Gathering user feedback for improvements</li>
<li>Adding a knowledge base for specific topics</li>
<li>Creating more sophisticated workflows</li>
<li>Exploring AI agent capabilities</li>
</ul>
''',
'tips': [
'Test the published version to ensure everything works',
'Monitor usage patterns to identify improvement opportunities',
'Regular updates keep your chatbot relevant and useful'
]
}
]
},
'creating-knowledge-bases-rag': {
'id': 3,
'title': 'Creating Knowledge Bases with RAG',
'category': 'Knowledge Management',
'difficulty': 'intermediate',
'difficulty_color': 'warning',
'estimated_time': 35,
'description': 'Build intelligent chatbots that can answer questions from your documents using Retrieval-Augmented Generation (RAG).',
'steps': [
{
'title': 'Understanding RAG',
'estimated_time': 5,
'content': '''
<p>Retrieval-Augmented Generation (RAG) is a powerful technique that allows AI to access external knowledge sources to provide accurate, up-to-date information.</p>
<h4>How RAG Works</h4>
<ol>
<li><strong>Document Processing:</strong> Your documents are split into chunks and converted to embeddings</li>
<li><strong>Query Processing:</strong> User questions are also converted to embeddings</li>
<li><strong>Similarity Search:</strong> The system finds the most relevant document chunks</li>
<li><strong>Response Generation:</strong> The AI uses retrieved information to generate accurate answers</li>
</ol>
<h4>Benefits of RAG</h4>
<ul>
<li><strong>Accuracy:</strong> Reduces AI hallucinations by grounding responses in real data</li>
<li><strong>Current Information:</strong> Access to your latest documents and data</li>
<li><strong>Source Attribution:</strong> Can cite specific sources for transparency</li>
<li><strong>Cost Effective:</strong> More efficient than fine-tuning models</li>
</ul>
<h4>Use Cases</h4>
<ul>
<li>Customer support with product documentation</li>
<li>Internal knowledge sharing</li>
<li>Educational content delivery</li>
<li>Legal document analysis</li>
<li>Technical documentation assistance</li>
</ul>
''',
'tips': [
'RAG is perfect when you need AI to answer questions about specific content',
'Quality of your source documents directly impacts answer quality',
'RAG works best with well-structured, factual content'
]
},
{
'title': 'Creating Your First Knowledge Base',
'estimated_time': 6,
'content': '''
<p>Let's create a knowledge base from your documents that your AI can search and reference.</p>
<h4>Step-by-Step Knowledge Base Creation</h4>
<ol>
<li>Navigate to <strong>Knowledge</strong> in the main menu</li>
<li>Click <strong>"Create Knowledge"</strong></li>
<li>Give your knowledge base a descriptive name</li>
<li>Add a brief description of its contents</li>
<li>Click <strong>"Create"</strong></li>
</ol>
<h4>Supported Data Sources</h4>
<p>Dify supports multiple data sources:</p>
<ul>
<li><strong>Local Files:</strong> Upload PDFs, Word docs, text files, CSV, etc.</li>
<li><strong>Notion Pages:</strong> Sync directly from your Notion workspace</li>
<li><strong>Web Pages:</strong> Scrape content from websites using Jina or Firecrawl API</li>
<li><strong>Plain Text:</strong> Copy and paste content directly</li>
</ul>
<h4>File Requirements and Limits</h4>
<ul>
<li><strong>Supported formats:</strong> PDF, DOCX, TXT, MD, CSV, XLSX</li>
<li><strong>File size limit:</strong> Usually 15MB per file (varies by plan)</li>
<li><strong>Total size:</strong> Depends on your subscription tier</li>
<li><strong>Language support:</strong> Multi-language documents supported</li>
</ul>
''',
'tips': [
'Start with a small set of high-quality documents',
'Use descriptive names for easy management',
'Organize related content in the same knowledge base'
]
},
{
'title': 'Uploading and Processing Documents',
'estimated_time': 8,
'content': '''
<p>Now let's add documents to your knowledge base and configure how they're processed.</p>
<h4>Document Upload Process</h4>
<ol>
<li>Click <strong>"Add Document"</strong> in your knowledge base</li>
<li>Choose your upload method (File, Notion, Web scraping, or Text)</li>
<li>Select or upload your documents</li>
<li>Review the document preview</li>
<li>Configure processing settings</li>
</ol>
<h4>Chunking Configuration</h4>
<p>Documents are split into smaller chunks for better retrieval:</p>
<ul>
<li><strong>Automatic Chunking:</strong> Dify automatically splits by paragraphs</li>
<li><strong>Custom Rules:</strong> Set your own chunk size and overlap</li>
<li><strong>Chunk Size:</strong> 500-1000 characters is usually optimal</li>
<li><strong>Overlap:</strong> 50-100 characters to maintain context</li>
</ul>
<h4>Text Preprocessing Options</h4>
<ul>
<li><strong>Remove extra spaces:</strong> Clean up formatting</li>
<li><strong>Remove URLs:</strong> Filter out web links</li>
<li><strong>Remove email addresses:</strong> Protect privacy</li>
<li><strong>Custom preprocessing:</strong> Advanced filtering rules</li>
</ul>
<h4>Embedding Model Selection</h4>
<p>Choose the right embedding model for your content:</p>
<ul>
<li><strong>OpenAI text-embedding-3-small:</strong> Fast and cost-effective</li>
<li><strong>OpenAI text-embedding-3-large:</strong> Higher accuracy</li>
<li><strong>Cohere embed-english:</strong> Good for English content</li>
<li><strong>Cohere embed-multilingual:</strong> For multiple languages</li>
</ul>
''',
'code_example': '''
# Example document structure for optimal RAG performance:
## Product FAQ Document
### What is Product X?
Product X is a comprehensive solution for...
### How do I install Product X?
1. Download the installer from our website
2. Run the installer as administrator
3. Follow the setup wizard
### Troubleshooting Common Issues
**Issue:** Application won't start
**Solution:** Check system requirements and try running as administrator
''',
'tips': [
'Well-structured documents with clear headings work best',
'Keep chunk sizes moderate - too small loses context, too large reduces precision',
'Choose embedding models based on your primary language'
]
},
{
'title': 'Configuring Retrieval Settings',
'estimated_time': 5,
'content': '''
<p>Fine-tune how your knowledge base searches for and retrieves relevant information.</p>
<h4>Retrieval Methods</h4>
<ul>
<li><strong>Vector Retrieval:</strong> Finds semantically similar content using embeddings</li>
<li><strong>Full-Text Search:</strong> Traditional keyword-based search</li>
<li><strong>Hybrid Retrieval (Recommended):</strong> Combines both methods for best results</li>
</ul>
<h4>Hybrid Retrieval Configuration</h4>
<p>Adjust the balance between semantic and keyword search:</p>
<ul>
<li><strong>Semantic Weight (70%):</strong> Finds conceptually related content</li>
<li><strong>Keyword Weight (30%):</strong> Finds exact term matches</li>
<li><strong>Custom Weights:</strong> Adjust based on your content type</li>
</ul>
<h4>Reranking Models</h4>
<p>Improve retrieval accuracy with reranking:</p>
<ul>
<li><strong>Cohere Rerank:</strong> Reorders results for better relevance</li>
<li><strong>BGE Reranker:</strong> Open-source alternative</li>
<li><strong>No Reranking:</strong> Faster but potentially less accurate</li>
</ul>
<h4>Retrieval Parameters</h4>
<ul>
<li><strong>Top K:</strong> Number of chunks to retrieve (3-5 recommended)</li>
<li><strong>Score Threshold:</strong> Minimum similarity score for inclusion</li>
<li><strong>Max Tokens:</strong> Total token limit for retrieved content</li>
</ul>
''',
'tips': [
'Hybrid retrieval works best for most use cases',
'Start with 70% semantic, 30% keyword weighting',
'Use reranking for better accuracy when response quality matters most'
]
},
{
'title': 'Testing Your Knowledge Base',
'estimated_time': 4,
'content': '''
<p>Before integrating your knowledge base into an application, test its retrieval accuracy.</p>
<h4>Using the Recall Test</h4>
<ol>
<li>Go to your knowledge base settings</li>
<li>Click on the <strong>"Recall Test"</strong> tab</li>
<li>Enter test queries related to your content</li>
<li>Review the retrieved chunks and their relevance scores</li>
<li>Adjust settings if needed</li>
</ol>
<h4>Effective Test Queries</h4>
<ul>
<li><strong>Direct questions:</strong> "How do I reset my password?"</li>
<li><strong>Conceptual queries:</strong> "Security best practices"</li>
<li><strong>Specific terms:</strong> "API rate limits"</li>
<li><strong>Variations:</strong> Test different ways of asking the same thing</li>
</ul>
<h4>Evaluating Results</h4>
<p>Look for:</p>
<ul>
<li><strong>Relevance:</strong> Do retrieved chunks actually answer the question?</li>
<li><strong>Completeness:</strong> Is all necessary information retrieved?</li>
<li><strong>Ranking:</strong> Are the most relevant chunks ranked highest?</li>
<li><strong>Coverage:</strong> Can the system find information across all your documents?</li>
</ul>
<h4>Common Issues and Solutions</h4>
<ul>
<li><strong>Poor retrieval:</strong> Adjust chunk size or embedding model</li>
<li><strong>Irrelevant results:</strong> Increase score threshold</li>
<li><strong>Missing information:</strong> Check document quality and chunking</li>
<li><strong>Inconsistent results:</strong> Consider using reranking</li>
</ul>
''',
'tips': [
'Test with questions your actual users would ask',
'Document any query patterns that don\'t work well',
'Iterate on your settings based on test results'
]
},
{
'title': 'Building a RAG-Powered Chatbot',
'estimated_time': 5,
'content': '''
<p>Now let's create a chatbot that uses your knowledge base to answer questions accurately.</p>
<h4>Creating the Chatflow</h4>
<ol>
<li>Create a new Chatflow application</li>
<li>Keep the default Start → LLM → Answer flow</li>
<li>Click on the LLM node to configure it</li>
</ol>
<h4>Adding Your Knowledge Base</h4>
<ol>
<li>In the LLM node settings, find the <strong>"Context"</strong> section</li>
<li>Click <strong>"Add Knowledge"</strong></li>
<li>Select your knowledge base</li>
<li>Configure retrieval settings if needed</li>
</ol>
<h4>Crafting a RAG-Optimized Prompt</h4>
<div class="code-example">
<pre><code>You are a helpful assistant that answers questions based on the provided context.
Instructions:
1. Use the context information below to answer the user's question
2. If the context doesn't contain relevant information, say "I don't have information about that in my knowledge base"
3. Always cite specific parts of the context when possible
4. Be accurate and don't make up information not in the context
Context: {{#knowledge}}
User Question: {{sys.query}}
Please provide a helpful and accurate response based on the context above.</code></pre>
</div>
<h4>Advanced RAG Techniques</h4>
<ul>
<li><strong>Question Classification:</strong> Route different types of questions appropriately</li>
<li><strong>Multiple Knowledge Bases:</strong> Use different sources for different topics</li>
<li><strong>Fallback Strategies:</strong> Handle cases when no relevant information is found</li>
</ul>
''',
'tips': [
'Always instruct the AI to stay within the provided context',
'Enable citation features to show sources',
'Test with questions both inside and outside your knowledge base'
]
},
{
'title': 'Advanced RAG Workflows',
'estimated_time': 6,
'content': '''
<p>Create more sophisticated RAG applications with conditional logic and multiple knowledge sources.</p>
<h4>Question Classification Workflow</h4>
<p>Route different types of questions to appropriate knowledge bases:</p>
<ol>
<li>Add a <strong>Question Classifier</strong> node after Start</li>
<li>Define categories (e.g., "Product Info", "Technical Support", "Billing")</li>
<li>Connect different paths to different knowledge bases</li>
<li>Use conditional logic to route appropriately</li>
</ol>
<h4>Multi-Step RAG Process</h4>
<ol>
<li><strong>Initial Retrieval:</strong> Find relevant chunks</li>
<li><strong>Relevance Check:</strong> Evaluate if information is sufficient</li>
<li><strong>Follow-up Retrieval:</strong> Search additional sources if needed</li>
<li><strong>Response Generation:</strong> Synthesize all found information</li>
</ol>
<h4>Handling Edge Cases</h4>
<ul>
<li><strong>No Matches Found:</strong> Provide helpful guidance on how to rephrase</li>
<li><strong>Low Confidence:</strong> Ask clarifying questions</li>
<li><strong>Multiple Valid Answers:</strong> Present options clearly</li>
<li><strong>Outdated Information:</strong> Include disclaimers about data freshness</li>
</ul>
<div class="code-example">
<pre><code># Example multi-step RAG prompt
You are analyzing a user question in two steps:
Step 1: Evaluate if the retrieved context contains sufficient information
Context: {{#knowledge}}
Question: {{sys.query}}
If context is sufficient, respond with: SUFFICIENT
If context is insufficient, respond with: INSUFFICIENT - [reason]
Step 2 (only if sufficient): Provide a complete answer based on the context.</code></pre>
</div>
''',
'tips': [
'Question classification improves accuracy for diverse knowledge bases',
'Always have fallback options when retrieval fails',
'Consider the user experience when no good answers are found'
]
},
{
'title': 'Monitoring and Optimization',
'estimated_time': 6,
'content': '''
<p>Continuously improve your RAG system by monitoring performance and optimizing based on usage patterns.</p>
<h4>Key Metrics to Track</h4>
<ul>
<li><strong>Retrieval Accuracy:</strong> Percentage of queries with relevant results</li>
<li><strong>Response Quality:</strong> User satisfaction with answers</li>
<li><strong>Coverage:</strong> Percentage of questions that can be answered</li>
<li><strong>Response Time:</strong> Average time to generate answers</li>
<li><strong>Cost:</strong> Token usage for embeddings and generation</li>
</ul>
<h4>Optimization Strategies</h4>
<ul>
<li><strong>Document Quality:</strong> Improve source content structure and clarity</li>
<li><strong>Chunk Optimization:</strong> Adjust size and overlap based on performance</li>
<li><strong>Embedding Tuning:</strong> Experiment with different embedding models</li>
<li><strong>Prompt Refinement:</strong> Continuously improve instructions</li>
</ul>
<h4>Common Performance Issues</h4>
<ul>
<li><strong>Poor Retrieval:</strong>
<ul>
<li>Check document quality and structure</li>
<li>Adjust chunking strategy</li>
<li>Consider different embedding models</li>
</ul>
</li>
<li><strong>Slow Responses:</strong>
<ul>
<li>Optimize retrieval parameters</li>
<li>Use smaller, more focused knowledge bases</li>
<li>Consider caching frequent queries</li>
</ul>
</li>
<li><strong>High Costs:</strong>
<ul>
<li>Optimize chunk sizes to reduce token usage</li>
<li>Use more efficient embedding models</li>
<li>Implement query caching</li>
</ul>
</li>
</ul>
<h4>Best Practices for Production</h4>
<ul>
<li><strong>Regular Updates:</strong> Keep knowledge bases current</li>
<li><strong>Quality Control:</strong> Review and curate content regularly</li>
<li><strong>User Feedback:</strong> Collect and act on user ratings</li>
<li><strong>A/B Testing:</strong> Test different configurations</li>
<li><strong>Backup Strategies:</strong> Maintain multiple knowledge sources</li>
</ul>
''',
'tips': [
'Monitor real user queries to identify content gaps',
'Regularly review and update your knowledge base content',
'Use analytics to identify the most common query patterns'
]
}
]
}
}
return tutorials.get(slug)