🤖 Construire un chatbot expert en documents avec un pipeline Gemini RAG
Ceci est unInternal Wiki, AI RAGworkflow d'automatisation du domainecontenant 46 nœuds.Utilise principalement des nœuds comme Set, Html, Filter, SplitOut, HttpRequest. Construire un chatbot d'expert pour la documentation n8n en utilisant le pipeline RAG d'OpenAI
- •Peut nécessiter les informations d'identification d'authentification de l'API cible
- •Clé API OpenAI
Nœuds utilisés (46)
Catégorie
{
"id": "0WrbldJbytCFt32Q",
"meta": {
"instanceId": "7f3a23cb64949b4506a831a13237262d06d8d391c70a726835bb67f156e11c9b",
"templateId": "6137",
"templateCredsSetupCompleted": true
},
"name": "🤖 Build a Documentation Expert Chatbot with Gemini RAG Pipeline",
"tags": [],
"nodes": [
{
"id": "f8083b86-d34e-4499-8366-1321ed4cff73",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Mémoire simple",
"type": "@n8n/n8n-nodes-langchain.memoryBufferWindow",
"creator": "Lucas Peyrin",
"position": [
1856,
3424
],
"parameters": {},
"typeVersion": 1.3
},
{
"id": "e4b137f4-11b8-4559-b1d1-eb66ea99a59d",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Chargeur de données par défaut",
"type": "@n8n/n8n-nodes-langchain.documentDefaultDataLoader",
"creator": "Lucas Peyrin",
"position": [
3568,
1904
],
"parameters": {
"options": {},
"jsonData": "={{ $json.documentation }}",
"jsonMode": "expressionData",
"textSplittingMode": "custom"
},
"typeVersion": 1.1
},
{
"id": "de258e6d-8c4f-4c3c-934b-f96413f4d617",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Séparateur de texte récursif",
"type": "@n8n/n8n-nodes-langchain.textSplitterRecursiveCharacterTextSplitter",
"creator": "Lucas Peyrin",
"position": [
3664,
2112
],
"parameters": {
"options": {
"splitCode": "markdown"
},
"chunkSize": 1500,
"chunkOverlap": 200
},
"typeVersion": 1
},
{
"id": "c54c329b-3d8a-47c6-87f3-ffb06d6d71d5",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Supprimer le contenu documentaire en double",
"type": "n8n-nodes-base.removeDuplicates",
"creator": "Lucas Peyrin",
"position": [
2944,
1680
],
"parameters": {
"options": {
"scope": "node",
"historySize": 10000
},
"operation": "removeItemsSeenInPreviousExecutions",
"dedupeValue": "={{ $json.documentation }}"
},
"typeVersion": 2
},
{
"id": "4e9f3fa9-2c4f-49e4-b9c3-3ff2130a37dd",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Conserver uniquement les chemins de documents",
"type": "n8n-nodes-base.filter",
"creator": "Lucas Peyrin",
"position": [
3072,
912
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "52b8b89a-30cd-4a0d-a428-e5d341bcebbf",
"operator": {
"type": "string",
"operation": "endsWith"
},
"leftValue": "={{ $json.link }}",
"rightValue": "/"
},
{
"id": "d8019c4e-e1a1-43ec-93a8-dac3d8c083b6",
"operator": {
"type": "string",
"operation": "notStartsWith"
},
"leftValue": "={{ $json.link }}",
"rightValue": "https://"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "37b9faf8-da95-4add-a8fb-d16083582e81",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Nettoyer la documentation",
"type": "n8n-nodes-base.set",
"creator": "Lucas Peyrin",
"position": [
2512,
1680
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "5ed964a6-5200-454c-b983-f3dc7c8c7a48",
"name": "documentation",
"type": "string",
"value": "={{ $json.data.replace(/([^#\\n]+)\\s*#/g, '# $1').trim().replace(/^\\s*https?:\\/\\/\\S+\\s*/, '') }}"
}
]
}
},
"typeVersion": 3.4
},
{
"id": "327aafbb-3628-4fdc-84fc-567caa2643c8",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Obtenir tous les liens de documentation n8n",
"type": "n8n-nodes-base.httpRequest",
"creator": "Lucas Peyrin",
"position": [
1920,
912
],
"parameters": {
"url": "https://docs.n8n.io/",
"options": {}
},
"typeVersion": 4.2
},
{
"id": "03e6b6f4-1674-4be6-b5c8-9505d39b3d13",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Extraire les liens du HTML",
"type": "n8n-nodes-base.html",
"creator": "Lucas Peyrin",
"position": [
2208,
912
],
"parameters": {
"options": {},
"operation": "extractHtmlContent",
"extractionValues": {
"values": [
{
"key": "links",
"attribute": "href",
"cssSelector": "a",
"returnArray": true,
"returnValue": "attribute"
}
]
}
},
"typeVersion": 1.2
},
{
"id": "a1a3eb41-4d40-4cab-8f9f-507fe968b211",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Séparer les liens",
"type": "n8n-nodes-base.splitOut",
"creator": "Lucas Peyrin",
"position": [
2544,
912
],
"parameters": {
"options": {
"destinationFieldName": "link"
},
"fieldToSplitOut": "links"
},
"typeVersion": 1
},
{
"id": "65a57136-d329-4455-9301-13f808f8dfef",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Supprimer les liens en double",
"type": "n8n-nodes-base.removeDuplicates",
"creator": "Lucas Peyrin",
"position": [
2848,
912
],
"parameters": {
"compare": "selectedFields",
"options": {},
"fieldsToCompare": "link"
},
"typeVersion": 2
},
{
"id": "7051a5f4-ee42-42ce-b678-f76e5e96e22f",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Ajouter une page de documentation au Vector Store",
"type": "n8n-nodes-base.executeWorkflow",
"creator": "Lucas Peyrin",
"position": [
3584,
944
],
"parameters": {
"options": {
"waitForSubWorkflow": true
},
"workflowId": {
"__rl": true,
"mode": "id",
"value": "={{ $workflow.id }}"
},
"workflowInputs": {
"value": {
"path": "={{ $json.link }}"
},
"schema": [
{
"id": "path",
"type": "string",
"display": true,
"removed": false,
"required": false,
"displayName": "path",
"defaultMatch": false,
"canBeUsedToMatch": true
}
],
"mappingMode": "defineBelow",
"matchingColumns": [
"data"
],
"attemptToConvertTypes": false,
"convertFieldsToString": true
}
},
"typeVersion": 1.2,
"alwaysOutputData": true
},
{
"id": "40517663-1ce0-47d8-9596-2bff302d0e58",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Obtenir la page de documentation",
"type": "n8n-nodes-base.httpRequest",
"creator": "Lucas Peyrin",
"onError": "continueErrorOutput",
"position": [
1984,
1680
],
"parameters": {
"url": "=https://docs.n8n.io/{{ $('Ingest Web Page').last().json.path }}",
"options": {}
},
"typeVersion": 4.2
},
{
"id": "cb53b073-26df-401e-927a-5e0f3d27acfc",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Extraire le contenu de la documentation",
"type": "n8n-nodes-base.html",
"creator": "Lucas Peyrin",
"position": [
2288,
1680
],
"parameters": {
"options": {},
"operation": "extractHtmlContent",
"extractionValues": {
"values": [
{
"key": "data",
"cssSelector": "article",
"skipSelectors": "img, footer, form"
}
]
}
},
"typeVersion": 1.2
},
{
"id": "1087b5fe-d8c5-46fc-884a-f173b4f22859",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive2",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1328,
560
],
"parameters": {
"color": 7,
"width": 2640,
"height": 1760,
"content": "## Part 1: Building the Knowledge Base (The Library)\n\n**Goal:** To read all n8n documentation, break it into small pieces, and store it in n8n's **in-memory vector store**.\n**Action:** You only need to run this part **ONCE** per n8n session by clicking the \"Execute workflow\" button on the `Start Indexing` trigger.\n**Time:** This will take several minutes to complete (~15 to 20).\n\n**Important:** The in-memory store is not permanent. If you restart n8n, you must run this indexing flow again to rebuild the knowledge base."
},
"typeVersion": 1
},
{
"id": "793bbb80-797b-429c-8710-41dec44ae84c",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Boucler sur les pages de documentation",
"type": "n8n-nodes-base.splitInBatches",
"creator": "Lucas Peyrin",
"position": [
3360,
912
],
"parameters": {
"options": {},
"batchSize": 10
},
"typeVersion": 3
},
{
"id": "7fbac9ee-8be3-4407-adcd-4420c63ac793",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive4",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
368,
240
],
"parameters": {
"color": 6,
"width": 1064,
"height": 296,
"content": "# Tutorial: Build an AI Expert with RAG\n\nWelcome! This workflow teaches you **RAG (Retrieval-Augmented Generation)** using n8n's built-in tools.\n\n**The Goal:** To create an AI chatbot that is an expert on a specific topic (the n8n docs) and *never* makes up answers.\n\n**It has two parts:**\n1. **TOP FLOW (Indexing):** You run this once manually to build the AI's knowledge base. It reads the n8n docs and stores them in n8n's **in-memory vector store**.\n2. **BOTTOM FLOW (Chat):** This is the live chatbot. It retrieves relevant info from the knowledge base to answer your questions accurately."
},
"typeVersion": 1
},
{
"id": "d64e0666-3ea6-4abf-8a3d-331300df7631",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive5",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
368,
560
],
"parameters": {
"color": 7,
"width": 924,
"height": 1392,
"content": "# Workflow Setup"
},
"typeVersion": 1
},
{
"id": "c58ee1a1-1626-441b-9b19-26f31acc2308",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Agent IA de documentation n8n",
"type": "@n8n/n8n-nodes-langchain.agent",
"creator": "Lucas Peyrin",
"position": [
1872,
2816
],
"parameters": {
"options": {
"systemMessage": "=<role>\nYou are a specialized AI assistant, an expert in n8n's official documentation. Your sole mission is to help users by providing accurate and factual information extracted exclusively from this documentation. You are meticulous, factual, and never deviate from your knowledge scope.\n</role>\n\n<instructions>\n<goal>\nYour primary goal is to provide precise and factual answers to user questions about the n8n automation platform, based **exclusively** on the excerpts from the official documentation provided in the context.\n</goal>\n\n<context>\nYou operate according to a RAG (Retrieval-Augmented Generation) model. For each user question, use the appropriate tool to retrieve the necessary information from the n8n documentation's vector database.\n\n**Mandatory rules:**\n\n1. **Single source of truth:** Your answer MUST be entirely and solely derived from the information present in the provided documentation.\n2. **Accuracy and implicit citation:** Base your answer as literally as possible on the documentation text. Rephrase for clarity and conciseness, but do not add any information not found there. Act as if the documentation is your only knowledge in the world.\n3. **Do not mention the process:** Never mention your tool or the fact that you are a RAG system in your answer to the user. Respond as an expert who directly consults their documentation.\n</context>\n\n<output_format>\n* **Clarity:** Provide a clear, concise, and direct answer.\n* **Structuring:** If the context contains steps, lists, or code examples, use Markdown syntax to format them legibly (bullet points, numbered lists, code blocks for code snippets, JSON, etc.).\n* **Tone:** Adopt a professional, helpful, and confident tone, that of a technical n8n expert.\n</output_format>\n</instructions>"
}
},
"typeVersion": 2.1
},
{
"id": "efc769ba-fc45-48aa-9924-9f9fe7572b5d",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive6",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1824,
704
],
"parameters": {
"color": 7,
"width": 288,
"height": 400,
"content": "### Step 1.1: Find All the 'Books'\n\n**What it does:** This node visits the main n8n documentation page.\n**Analogy:** We're asking the library's front desk for a complete list of every single book they have."
},
"typeVersion": 1
},
{
"id": "a988d207-5afd-41f0-9992-3ac21d22e19f",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive7",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
2128,
704
],
"parameters": {
"color": 7,
"width": 272,
"height": 400,
"content": "### Step 1.2: Read the List of Books\n\n**What it does:** It takes the HTML from the previous step and extracts every single link (`<a>` tag).\n**Analogy:** We're reading the list from the front desk and pulling out just the titles of the books (the links)."
},
"typeVersion": 1
},
{
"id": "114aa79b-0cc7-4f66-866f-2d8cbe001a0f",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive8",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
2416,
704
],
"parameters": {
"color": 7,
"width": 336,
"height": 400,
"content": "### Step 1.3: Process One Book at a Time\n\n**What it does:** It takes the big list of links and turns each link into a separate item for n8n to process.\n**Analogy:** Instead of trying to read all the books at once, we're creating a separate to-do item for each book."
},
"typeVersion": 1
},
{
"id": "a9b6daac-c326-4d38-ace1-e9cd633fd0b5",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive9",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
2768,
704
],
"parameters": {
"color": 7,
"width": 480,
"height": 400,
"content": "### Step 1.4: Tidy Up the To-Do List\n\n**What it does:** First, it removes any duplicate links. Then, it filters out any links that don't point to an actual documentation page (e.g., links to the homepage or external sites).\n**Analogy:** We're throwing away duplicate to-do items and any notes that aren't actually books."
},
"typeVersion": 1
},
{
"id": "a2f4b7c5-e034-4782-ae0b-8ee358b02e91",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive10",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
3264,
704
],
"parameters": {
"color": 4,
"width": 512,
"height": 464,
"content": "### Step 1.5: The Librarian's Reading Loop\n\n**What it does:** This is the engine of the indexing process. It takes our clean list of pages and processes them one by one. The `Execute Workflow` node calls the \"sub-workflow\" below for each and every page.\n**Analogy:** This loop tells our librarian to pick up one book (link) at a time and go through the full process of reading and indexing it."
},
"typeVersion": 1
},
{
"id": "0acc8bef-4f0b-42e2-b960-d5577ae4b5d8",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive11",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1888,
1472
],
"parameters": {
"color": 7,
"width": 288,
"height": 384,
"content": "### Step 1.5.1: Read a Single Page\n\n**What it does:** This node takes a single URL from the loop and fetches the HTML content of that page.\n**Analogy:** The librarian opens one book to the first page."
},
"typeVersion": 1
},
{
"id": "32c25451-55f0-44b6-9ff3-20a4339c44dd",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive12",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1360,
1328
],
"parameters": {
"color": 5,
"width": 512,
"height": 528,
"content": "### Why Use a Sub-Workflow?\n\nThe main reason is **memory management**.\n\nProcessing a single web page, especially creating vector embeddings for all its chunks, uses a lot of memory (RAM). The n8n documentation has over 1,000 pages. If we tried to process all of them in a single, linear run, the memory usage would build up with each page and would likely crash your n8n instance.\n\nWhen a sub-workflow finishes its run for one item (one page), n8n **clears out the memory** (RAM) it used before starting the next one.\n\nBy using a sub-workflow, we process pages one-by-one in a memory-efficient loop, making the entire indexing process stable and reliable, even on a massive scale."
},
"typeVersion": 1
},
{
"id": "582348d6-f15f-42c7-b3dc-4863fbde6983",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive13",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
2192,
1472
],
"parameters": {
"color": 7,
"width": 512,
"height": 384,
"content": "### Step 1.5.2: Get the Good Stuff\n\n**What it does:** It extracts only the main text from the article on the page, ignoring menus, footers, and images. Then, it cleans up the formatting.\n**Analogy:** The librarian tears out only the useful pages from the book and cleans up any smudges or messy handwriting."
},
"typeVersion": 1
},
{
"id": "db1bc60f-46a4-40a8-96b4-2fdbb7fc8e5c",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive14",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
2720,
1248
],
"parameters": {
"color": 5,
"width": 544,
"height": 608,
"content": "### Step 1.5.3: Avoid Re-reading\n\n**What it does:** It checks if we have already processed this exact page content in a previous run. If so, it stops here to save time and resources.\n**Analogy:** The librarian checks their \"already read\" pile to avoid reading the same book twice.\n\n**A Powerful Feature Explained:**\n\nUsually, the \"Remove Duplicates\" node only looks at the items in the *current* execution. But here, it's set to **\"Remove items seen in previous executions.\"**\n\nThis is incredibly powerful in a sub-workflow. Even though our sub-workflow only sees one page at a time, this node has a long-term memory. It remembers every single page it has *ever* processed across all previous runs.\n\nThis makes the workflow robust. You can run the indexing process again and again to update the knowledge base with new documentation, and it will never waste resources re-processing old pages or creating duplicate chunks in the in-memory store."
},
"typeVersion": 1
},
{
"id": "14010f42-6221-4f38-bbe8-7db8b60a4308",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive15",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
3280,
1248
],
"parameters": {
"color": 6,
"width": 656,
"height": 1040,
"content": "### Step 1.5.4: Create & Store the 'Magic Index Cards'\n\nThis is the most important step of indexing!\n\n1. **`Recursive Character Text Splitter`:** The text is broken down into small, overlapping paragraphs (\"chunks\").\n * **Analogy:** The librarian breaks the book down into small, meaningful paragraphs. This is vital for finding very specific answers later.\n\n2. **`OpenAI Embedding`:** An AI model converts each chunk of text into a list of numbers (a \"vector\" or \"embedding\").\n * **Analogy:** The librarian uses a 'magic pen' to turn each paragraph into a unique set of numbers. Paragraphs with similar meanings get similar numbers.\n\n3. **`Simple Vector Store (Insert)`:** The original text chunk and its new vector are saved together in n8n's in-memory vector store.\n * **Analogy:** The librarian files the original paragraph along with its magic number on an index card and puts it in a special filing cabinet (the in-memory vector store).\n * **The `Memory Key`** in this node acts as a label for the filing cabinet, ensuring the chat flow can find it later."
},
"typeVersion": 1
},
{
"id": "35c92a42-b5ad-4e87-b3a3-9d5c8446aabd",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive16",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1328,
2352
],
"parameters": {
"color": 7,
"width": 1200,
"height": 1456,
"content": "## Part 2: The Chatbot (Talking to the Expert Librarian)\n\n**Goal:** To provide an interface where a user can ask a question, have the system find relevant information from the knowledge base, and get an accurate, AI-generated answer.\n**Action:** After running the Indexing flow, **Activate** this workflow. Then, open the `When chat message received` node and use its Public URL to chat."
},
"typeVersion": 1
},
{
"id": "80b158c2-7e0f-4fc2-ab4e-ed7ff6b19f78",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive17",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1360,
2512
],
"parameters": {
"color": 4,
"width": 368,
"height": 448,
"content": "### The Front Desk: Ask Your Question Here\n\n**What it does:** This is the public-facing chat interface. When you send a message here, the entire RAG process begins.\n\n**Action:**\n1. **Activate** the entire workflow.\n2. To test directly inside n8n, click the **\"Open Chat\"** button in this node's parameter panel.\n3. To chat publicly, copy the **Public URL** from this node and open it in a new browser tab."
},
"typeVersion": 1
},
{
"id": "6b1e818e-2834-434a-a2e6-34fab11f0355",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive18",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1744,
2624
],
"parameters": {
"color": 5,
"width": 560,
"height": 400,
"content": "### The Brains: The Expert Librarian\n\n**What it does:** This is the AI agent that orchestrates the entire response. It understands your question, decides which tools to use, and formulates the final answer.\n**System Prompt:** Its instructions (in the System Message) are very strict: **\"Only use the provided documents to answer. Do not make things up.\"**"
},
"typeVersion": 1
},
{
"id": "b287613a-0b9d-4663-b389-cdab7066751a",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive19",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1360,
2976
],
"parameters": {
"color": 7,
"width": 368,
"height": 592,
"content": "### The 'Synthesizer' & 'Strategist'\n\n**What it does:** This is the Large Language Model (LLM). It has two critical jobs:\n\n1. **The Strategist:** Based on your question and the conversation history, it decides *what information to look for* in the knowledge base (the library).\n2. **The Synthesizer:** After getting the relevant documents, it generates the final, human-readable answer based *only* on that context.\n\n\n**Analogy:** This is the part of the librarian's brain that first decides which section of the library to search, and then reads the specific index cards to write a clear, helpful response.\n\nThe model used here is **GPT 4.1-nano**."
},
"typeVersion": 1
},
{
"id": "7f2a8dd1-f702-4eec-b5e7-e7e93eb72201",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive20",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1744,
3040
],
"parameters": {
"color": 7,
"width": 288,
"height": 528,
"content": "### Short-Term Memory\n\n**What it does:** This node stores the last few messages of your conversation.\n**Analogy:** This helps the librarian remember what you just talked about, so you can ask follow-up questions without having to repeat yourself."
},
"typeVersion": 1
},
{
"id": "fea370b3-4d87-4f6d-baf4-d59bffee633f",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive21",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
2048,
3040
],
"parameters": {
"color": 6,
"width": 448,
"height": 736,
"content": "### The Tool: The 'Magic Filing Cabinet' Retriever\n\nThis is the \"Retrieval\" part of RAG. When the agent gets your question, it uses this tool to find the most relevant information.\n\n1. **`OpenAI Query Embedding`:** Your question is converted into its own 'magic number' (vector).\n\n2. **`Official n8n Documentation (Vector Store Retrieve)`:** It searches the in-memory vector store for the document chunks with the most similar 'magic numbers' to your question's number.\n\n\n**Analogy:** The librarian takes your question, finds the most relevant index cards from the filing cabinet, and hands them over to the 'Synthesizer' to formulate the answer."
},
"typeVersion": 1
},
{
"id": "126b13aa-a2eb-4454-badb-fa97bf30d03b",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Démarrer l'indexation",
"type": "n8n-nodes-base.manualTrigger",
"creator": "Lucas Peyrin",
"position": [
1680,
912
],
"parameters": {},
"typeVersion": 1
},
{
"id": "9cceee8c-9f01-44d4-9dfd-03e28c1d40f6",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Chatbot RAG",
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
"creator": "Lucas Peyrin",
"position": [
1520,
2816
],
"webhookId": "285e5d0b-ffcf-44e8-a80c-0683966b78a4",
"parameters": {
"public": true,
"options": {
"title": "",
"subtitle": "",
"customCss": "/* === THEME OVERRIDE: n8n Glass & Glow (Revised) === */\n\n/*\n This theme is built upon the default n8n chat CSS variables.\n We'll use your desired branding and layout, and then add\n the \"glass\" effect using a semi-transparent background\n and a backdrop-filter on the main window class.\n*/\n\n:root {\n /* --- 1. CORE BRANDING & FONTS --- */\n /* Your choices for font and primary color are applied here. */\n --chat--font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;\n --chat--color-primary: #EA4B71; /* n8n Red */\n --chat--color-primary-shade-50: #D93A60;\n --chat--color-primary-shade-100: #C82A50;\n --chat--color-secondary: #20b69e; /* Kept default secondary for things like file uploads */\n --chat--color-secondary-shade-50: #1ca08a;\n --chat--color-white: #ffffff;\n --chat--color-light: #f2f4f8;\n --chat--color-dark: #1F2937; /* Darker, more modern text color */\n --chat--color-disabled: #9CA3AF;\n --chat--color-typing: #404040;\n\n /* --- 2. WINDOW & LAYOUT --- */\n /* Your dimensions and rounded corners. The actual glass effect is handled in the class override below. */\n --chat--window--width: 430px;\n --chat--window--height: 80vh;\n --chat--window--border-radius: 16px;\n --chat--window--border: 1px solid rgba(255, 255, 255, 0.3);\n --chat--spacing: 1rem; /* 16px base spacing */\n --chat--window--z-index: 9999;\n --chat--window--bottom: var(--chat--spacing);\n --chat--window--right: var(--chat--spacing);\n --chat--window--margin-bottom: var(--chat--spacing);\n\n /* --- 3. GLASSMORPHISM BACKGROUNDS --- */\n /* We make the main sections transparent to let the glass window show through. */\n --chat--header--background: transparent;\n --chat--body--background: transparent;\n --chat--footer--background: transparent;\n\n /* --- 4. HEADER STYLING (REMOVED) --- */\n /* The header is removed by setting its height, padding, and border to 0/none. */\n --chat--header-height: 0;\n --chat--header--padding: 0;\n --chat--header--border-bottom: none;\n /* The rest of the header variables are now irrelevant. */\n --chat--header--color: var(--chat--color-dark);\n --chat--heading--font-size: 1.5rem;\n --chat--subtitle--font-size: 0.875rem;\n --chat--subtitle--line-height: 1.4;\n --chat--close--button--color-hover: var(--chat--color-primary);\n\n /* --- 5. MESSAGE BUBBLES --- */\n --chat--message--font-size: 0.875rem;\n --chat--message--padding: 12px 16px;\n --chat--message--border-radius: 12px;\n --chat--message-line-height: 1.6;\n --chat--message--margin-bottom: calc(var(--chat--spacing) * 0.75);\n /* Adjusted padding: Top is 0, sides and bottom use the base spacing. */\n --chat--messages-list--padding: 0 var(--chat--spacing) var(--chat--spacing);\n --chat--message--bot--background: var(--chat--color-white);\n --chat--message--bot--color: #111827;\n --chat--message--bot--border: none;\n --chat--message--user--background: var(--chat--color-primary);\n --chat--message--user--color: var(--chat--color-white);\n --chat--message--user--border: none;\n\n /* --- 6. INPUT AREA --- */\n --chat--textarea--height: 50px;\n --chat--textarea--max-height: 10rem;\n --chat--input--font-size: 0.9rem;\n --chat--input--border: 0;\n --chat--input--border-radius: 12px; /* Rounded input field */\n --chat--input--padding: 12px 16px;\n --chat--input--background: rgba(255, 255, 255, 0.5); /* Semi-transparent input */\n --chat--input--text-color: #111827;\n --chat--input--line-height: 1.5;\n --chat--input--border-active: 0;\n\n /* --- 7. SEND & FILE BUTTONS --- */\n --chat--input--send--button--background: transparent;\n --chat--input--send--button--color: var(--chat--color-primary);\n --chat--input--send--button--background-hover: transparent;\n --chat--input--send--button--color-hover: var(--chat--color-primary-shade-50);\n --chat--input--file--button--background: transparent;\n --chat--input--file--button--color: var(--chat--color-secondary);\n --chat--input--file--button--background-hover: transparent;\n --chat--input--file--button--color-hover: var(--chat--color-secondary-shade-50);\n\n /* --- 8. TOGGLE BUTTON & OTHERS --- */\n /* Using your primary brand color for the main toggle button */\n --chat--toggle--size: 64px;\n --chat--toggle--width: var(--chat--toggle--size);\n --chat--toggle--height: var(--chat--toggle--size);\n --chat--toggle--border-radius: 50%;\n --chat--toggle--background: var(--chat--color-primary);\n --chat--toggle--hover--background: var(--chat--color-primary-shade-50);\n --chat--toggle--active--background: var(--chat--color-primary-shade-100);\n --chat--toggle--color: var(--chat--color-white);\n}\n\n/* === CLASS OVERRIDES === */\n/*\n These are essential for effects that CSS variables can't control,\n like the glass blur, shadows, and max-height.\n*/\n\n.chat-window {\n /* This is the magic for the glass effect! */\n background-color: rgba(249, 243, 245, 0.6); /* A semi-transparent background color */\n backdrop-filter: blur(20px);\n -webkit-backdrop-filter: blur(20px); /* For Safari compatibility */\n\n /* A subtle shadow helps lift the window off the page */\n box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.1);\n\n /* Here we apply the max-height you wanted */\n max-height: 750px;\n}\n\n/* Add top margin to the first message only for initial spacing */\n.chat-messages-list .chat-message:first-child {\n margin-top: var(--chat--spacing);\n}\n\n/* Make user messages slightly wider for better balance */\n.chat-message.is-user {\n\tmax-width: 70%;\n}\n\n.chat-message.is-bot {\n\tmax-width: 80%;\n}\n\n/* Add a subtle glow to messages on hover */\n.chat-message {\n transition: transform 0.2s ease, box-shadow 0.2s ease;\n}\n.chat-message:hover {\n transform: translateY(-2px);\n box-shadow: 0 0 25px 0 rgba(234, 75, 113, 0.2);\n}\n\n/* === PAGE BACKGROUND === */\n/* Your dotted background to simulate the n8n canvas. Perfect as is! */\nbody {\n background-color: #FDFBFF;\n background-image:\n radial-gradient(circle at 1px 1px, rgba(0,0,0,0.07) 1px, transparent 0),\n linear-gradient(135deg, #F9F3F5 0%, #EFF3FB 100%);\n background-size: 25px 25px, 100% 100%;\n}",
"inputPlaceholder": "Type your n8n related question.."
},
"initialMessages": "Hello! I'm an AI assistant trained on the official n8n documentation.\nYou can ask me anything about n8n nodes, workflows, or concepts. For example:\n\"How does the IF node work?\"\n\"What is a sub-workflow?\""
},
"typeVersion": 1.1
},
{
"id": "62f102ca-7c80-4b5c-b097-3f6383f2a6eb",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive1",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
400,
1600
],
"parameters": {
"width": 624,
"height": 320,
"content": "## Setup Step 4: Chat with Your Expert!\n\nCongratulations, the knowledge base is built! Now it's time to ask the expert a question.\n\n**Action:**\n1. **Activate** the entire workflow using the toggle switch at the top of the screen.\n2. Open the **`RAG Chatbot`** chat trigger node (bottom-left).\n3. To chat:\n * Copy its **Public URL** and open it in a new browser tab.\n * OR, click the **\"Open Chat\"** button in the node's panel to test it directly inside n8n.\n\n\n**Try asking:** \"How does the IF node work?\" or \"What is a sub-workflow?\""
},
"typeVersion": 1
},
{
"id": "4b82ed61-eee5-4a00-a8af-45157fa89cd6",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive24",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
400,
640
],
"parameters": {
"width": 656,
"height": 272,
"content": "## Setup Step 1: Connect Your Open AI Credentials\n\nThis workflow uses OpenAI' models for generating embeddings and chat responses. You'll need a OpenAI API key.\n\n**Action:**\n1. Go to any of the OpenAI nodes (e.g., **`OpenAI Chat Model`**).\n2. Click the **Credential** dropdown and select **`+ Create New Credential`**.\n3. In the window that opens, paste your **OpenAI API Key**.\n4. Click **Save**. Your new credential will be created and automatically selected for this node."
},
"typeVersion": 1
},
{
"id": "ec88d605-9af4-4144-99a4-4f394a8703ab",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive27",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
400,
928
],
"parameters": {
"width": 864,
"height": 304,
"content": "## Setup Step 2: Apply Credentials to All GPT Nodes\n\nYour new OpenAI credential is now saved. Let's make sure all the other Gemini nodes are using it.\n\n**Action:**\nGo to the remaining Gemini nodes on the canvas. Your new credential will now be available in their **Credential** dropdown lists. Simply select it for each one.\n\n**Nodes to check:**\n* `OpenAI Chat Model`\n* `Embeddings OpenAI`\n* `Embeddings OpenAI`"
},
"typeVersion": 1
},
{
"id": "c6da61a2-5ad4-4263-af1e-a40603fb4654",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Note adhésive28",
"type": "n8n-nodes-base.stickyNote",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
400,
1248
],
"parameters": {
"width": 784,
"height": 336,
"content": "## Setup Step 3: Build the Knowledge Base (Indexing)\n\nThis is the one-time step where the AI \"reads\" all the documentation.\n\n**Action:**\n1. Find the **`Start Indexing`** manual trigger node at the top-left of the canvas.\n2. Click its **\"Execute workflow\"** button.\n\n\n**⚠️ IMPORTANT:**\n* **Be Patient!** This process will take **15-20 minutes** to scrape, process, and store the entire n8n documentation in memory.\n* **The knowledge base is temporary.** It is stored in n8n's memory and will be **erased if you restart your n8n instance**. If that happens, you must run this indexing step again."
},
"typeVersion": 1
},
{
"id": "15b0b258-5fa3-4084-a312-7a8b8f9d420c",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Ingérer une page web",
"type": "n8n-nodes-base.executeWorkflowTrigger",
"notes": "© 2025 Lucas Peyrin",
"creator": "Lucas Peyrin",
"position": [
1680,
1680
],
"parameters": {
"workflowInputs": {
"values": [
{
"name": "path"
}
]
}
},
"typeVersion": 1.1
},
{
"id": "14d75d60-9364-454f-9903-f523a3532e4c",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Vector Store simple",
"type": "@n8n/n8n-nodes-langchain.vectorStoreInMemory",
"creator": "Lucas Peyrin",
"position": [
3472,
1680
],
"parameters": {
"mode": "insert",
"memoryKey": {
"__rl": true,
"mode": "id",
"value": "n8n_documentation_vector_store"
},
"embeddingBatchSize": 30
},
"typeVersion": 1.3
},
{
"id": "5ef1d0ac-137f-4b20-a9e0-150f2a36944c",
"cid": "Ikx1Y2FzIFBleXJpbiI",
"name": "Documentation officielle n8n",
"type": "@n8n/n8n-nodes-langchain.vectorStoreInMemory",
"creator": "Lucas Peyrin",
"position": [
2128,
3424
],
"parameters": {
"mode": "retrieve-as-tool",
"topK": 10,
"memoryKey": {
"__rl": true,
"mode": "id",
"value": "=n8n_documentation_vector_store"
},
"toolDescription": "Get related chunks of the official n8n documentation.",
"includeDocumentMetadata": false
},
"typeVersion": 1.3
},
{
"id": "c508f6fd-68ea-4e65-9f57-2a1476baf0c6",
"name": "Embeddings OpenAI",
"type": "@n8n/n8n-nodes-langchain.embeddingsOpenAi",
"position": [
3392,
1888
],
"parameters": {
"options": {}
},
"credentials": {
"openAiApi": {
"id": "DTF8OQIcI5iP0YR4",
"name": "OpenAi account"
}
},
"typeVersion": 1.2
},
{
"id": "4674c996-c9f2-4c14-b27d-73f0aa5ce0d2",
"name": "Modèle de chat OpenAI",
"type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
"position": [
1488,
3440
],
"parameters": {
"model": {
"__rl": true,
"mode": "list",
"value": "gpt-4.1-nano",
"cachedResultName": "gpt-4.1-nano"
},
"options": {}
},
"credentials": {
"openAiApi": {
"id": "DTF8OQIcI5iP0YR4",
"name": "OpenAi account"
}
},
"typeVersion": 1.2
},
{
"id": "02480036-e68b-452e-a5c3-a7414021d678",
"name": "Embeddings OpenAI1",
"type": "@n8n/n8n-nodes-langchain.embeddingsOpenAi",
"position": [
2224,
3632
],
"parameters": {
"options": {}
},
"credentials": {
"openAiApi": {
"id": "DTF8OQIcI5iP0YR4",
"name": "OpenAi account"
}
},
"typeVersion": 1.2
}
],
"active": true,
"pinData": {},
"settings": {
"executionOrder": "v1"
},
"versionId": "59702c6f-70d1-4136-95ac-361964031c94",
"connections": {
"9cceee8c-9f01-44d4-9dfd-03e28c1d40f6": {
"main": [
[
{
"node": "c58ee1a1-1626-441b-9b19-26f31acc2308",
"type": "main",
"index": 0
}
]
]
},
"f8083b86-d34e-4499-8366-1321ed4cff73": {
"ai_memory": [
[
{
"node": "c58ee1a1-1626-441b-9b19-26f31acc2308",
"type": "ai_memory",
"index": 0
}
]
]
},
"126b13aa-a2eb-4454-badb-fa97bf30d03b": {
"main": [
[
{
"node": "327aafbb-3628-4fdc-84fc-567caa2643c8",
"type": "main",
"index": 0
}
]
]
},
"15b0b258-5fa3-4084-a312-7a8b8f9d420c": {
"main": [
[
{
"node": "40517663-1ce0-47d8-9596-2bff302d0e58",
"type": "main",
"index": 0
}
]
]
},
"a1a3eb41-4d40-4cab-8f9f-507fe968b211": {
"main": [
[
{
"node": "65a57136-d329-4455-9301-13f808f8dfef",
"type": "main",
"index": 0
}
]
]
},
"c508f6fd-68ea-4e65-9f57-2a1476baf0c6": {
"ai_embedding": [
[
{
"node": "14d75d60-9364-454f-9903-f523a3532e4c",
"type": "ai_embedding",
"index": 0
}
]
]
},
"4674c996-c9f2-4c14-b27d-73f0aa5ce0d2": {
"ai_languageModel": [
[
{
"node": "c58ee1a1-1626-441b-9b19-26f31acc2308",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"02480036-e68b-452e-a5c3-a7414021d678": {
"ai_embedding": [
[
{
"node": "5ef1d0ac-137f-4b20-a9e0-150f2a36944c",
"type": "ai_embedding",
"index": 0
}
]
]
},
"37b9faf8-da95-4add-a8fb-d16083582e81": {
"main": [
[
{
"node": "c54c329b-3d8a-47c6-87f3-ffb06d6d71d5",
"type": "main",
"index": 0
}
]
]
},
"e4b137f4-11b8-4559-b1d1-eb66ea99a59d": {
"ai_document": [
[
{
"node": "14d75d60-9364-454f-9903-f523a3532e4c",
"type": "ai_document",
"index": 0
}
]
]
},
"4e9f3fa9-2c4f-49e4-b9c3-3ff2130a37dd": {
"main": [
[
{
"node": "793bbb80-797b-429c-8710-41dec44ae84c",
"type": "main",
"index": 0
}
]
]
},
"40517663-1ce0-47d8-9596-2bff302d0e58": {
"main": [
[
{
"node": "cb53b073-26df-401e-927a-5e0f3d27acfc",
"type": "main",
"index": 0
}
]
]
},
"65a57136-d329-4455-9301-13f808f8dfef": {
"main": [
[
{
"node": "4e9f3fa9-2c4f-49e4-b9c3-3ff2130a37dd",
"type": "main",
"index": 0
}
]
]
},
"03e6b6f4-1674-4be6-b5c8-9505d39b3d13": {
"main": [
[
{
"node": "a1a3eb41-4d40-4cab-8f9f-507fe968b211",
"type": "main",
"index": 0
}
]
]
},
"5ef1d0ac-137f-4b20-a9e0-150f2a36944c": {
"ai_tool": [
[
{
"node": "c58ee1a1-1626-441b-9b19-26f31acc2308",
"type": "ai_tool",
"index": 0
}
]
]
},
"cb53b073-26df-401e-927a-5e0f3d27acfc": {
"main": [
[
{
"node": "37b9faf8-da95-4add-a8fb-d16083582e81",
"type": "main",
"index": 0
}
]
]
},
"793bbb80-797b-429c-8710-41dec44ae84c": {
"main": [
[],
[
{
"node": "7051a5f4-ee42-42ce-b678-f76e5e96e22f",
"type": "main",
"index": 0
}
]
]
},
"327aafbb-3628-4fdc-84fc-567caa2643c8": {
"main": [
[
{
"node": "03e6b6f4-1674-4be6-b5c8-9505d39b3d13",
"type": "main",
"index": 0
}
]
]
},
"de258e6d-8c4f-4c3c-934b-f96413f4d617": {
"ai_textSplitter": [
[
{
"node": "e4b137f4-11b8-4559-b1d1-eb66ea99a59d",
"type": "ai_textSplitter",
"index": 0
}
]
]
},
"7051a5f4-ee42-42ce-b678-f76e5e96e22f": {
"main": [
[
{
"node": "793bbb80-797b-429c-8710-41dec44ae84c",
"type": "main",
"index": 0
}
]
]
},
"c54c329b-3d8a-47c6-87f3-ffb06d6d71d5": {
"main": [
[
{
"node": "14d75d60-9364-454f-9903-f523a3532e4c",
"type": "main",
"index": 0
}
]
]
}
}
}Comment utiliser ce workflow ?
Copiez le code de configuration JSON ci-dessus, créez un nouveau workflow dans votre instance n8n et sélectionnez "Importer depuis le JSON", collez la configuration et modifiez les paramètres d'authentification selon vos besoins.
Dans quelles scénarios ce workflow est-il adapté ?
Avancé - Wiki interne, RAG IA
Est-ce payant ?
Ce workflow est entièrement gratuit et peut être utilisé directement. Veuillez noter que les services tiers utilisés dans le workflow (comme l'API OpenAI) peuvent nécessiter un paiement de votre part.
Workflows recommandés
Ayham
@ayhamjo7Partager ce workflow