Erstellung eines forschungsunterstützenden SEO-Content-Briefings mit Schlüsselwörtern für Google Docs
Dies ist ein Miscellaneous, AI RAG, Multimodal AI-Bereich Automatisierungsworkflow mit 10 Nodes. Hauptsächlich werden Code, GoogleDocs, FormTrigger, HttpRequest, Agent und andere Nodes verwendet. SEO-Inhalts-Briefe mit Forschungsunterstützung mit OpenAI und Firecrawl zu Google Docs generieren
- •Möglicherweise sind Ziel-API-Anmeldedaten erforderlich
- •OpenAI API Key
Verwendete Nodes (10)
Kategorie
{
"id": "qyhYS0xy7fCuyqLn",
"meta": {
"instanceId": "ec3694b688f9802e6ff1f61802ca6bc63a40ecfa76d32f2c71cfdf2978426fce",
"templateCredsSetupCompleted": true
},
"name": "Create keyword-to-Google Docs research-backed SEO content brief",
"tags": [],
"nodes": [
{
"id": "d0cbb365-b3df-430e-89a6-338bc316315e",
"name": "Bei Formularübermittlung",
"type": "n8n-nodes-base.formTrigger",
"position": [
0,
0
],
"webhookId": "e561604e-f1f9-445d-b734-d268da983895",
"parameters": {
"options": {
"appendAttribution": false,
"respondWithOptions": {
"values": {
"redirectUrl": "={{google_drive_folder_url}}",
"respondWith": "redirect"
}
}
},
"formTitle": "VertoDigital Keyword to SEO Content Brief",
"formFields": {
"values": [
{
"html": "<div style=\"text-align:left !important; font-family:Arial, Helvetica, sans-serif; font-size:16px;\"> <p style=\"text-align:left !important; margin:0 0 24px 0; line-height:1.9;\"> Enter the primary keyword or topic for the content brief. This should represent the main focus of the SEO-optimized content you want to create. Once completed, you'll be redirected to a Google Drive folder containing your generated SEO content brief. </p> \n<p style=\"text-align:left !important; margin:0 0 24px 0; line-height:1.9;\">Here is how the workflow works:</p>\n<ol style=\"text-align:left !important; margin:0; padding-left:1.5rem; line-height:1.9;\"> <li style=\"text-align:left !important; margin:0 0 18px 0;\">Firecrawl searches the web for your keyword and scrapes the top 5 relevant pages (main content only).</li> <li style=\"text-align:left !important; margin:0 0 18px 0;\">GPT AI Agent (with Think) analyzes the sources and generates the content brief in Markdown.</li> <li style=\"text-align:left !important; margin:0 0 18px 0;\">The Markdown brief is converted and formatted for Google Docs.</li> <li style=\"text-align:left !important; margin:0 0 18px 0;\">A Google Doc titled “SEO Brief for <Keyword>” is created and populated.</li> <li style=\"text-align:left !important; margin:0 0 18px 0;\">You are redirected to the Google Drive folder containing your SEO content brief.</li> </ol> </div>",
"fieldType": "html",
"elementName": "Description"
},
{
"fieldLabel": "Keyword/Topic",
"placeholder": "Write your keyword/topic here",
"requiredField": true
}
]
},
"responseMode": "lastNode"
},
"notesInFlow": false,
"typeVersion": 2.2
},
{
"id": "c3ac02d5-67c7-4f24-8ae1-0af6ba9f4d79",
"name": "FireCrawl Search & Scrape",
"type": "n8n-nodes-base.httpRequest",
"position": [
256,
0
],
"parameters": {
"url": "https://api.firecrawl.dev/v2/search",
"method": "POST",
"options": {},
"jsonBody": "={\n \"query\": \"{{ $('On form submission').item.json['Keyword/Topic'] }}\",\n \"sources\": [\n \"web\"\n ],\n \"limit\": 5,\n \"location\": \"United States\",\n \"scrapeOptions\": {\n \"onlyMainContent\": true,\n \"maxAge\": 172800000,\n \"parsers\": [\n \"pdf\"\n ],\n \"formats\": [\n \"markdown\",\n \"links\"\n ]\n }\n}",
"sendBody": true,
"sendHeaders": true,
"specifyBody": "json",
"headerParameters": {
"parameters": [
{
"name": "Authorization",
"value": "Bearer {{API_KEY}}"
}
]
}
},
"typeVersion": 4.2
},
{
"id": "8d7a216c-017e-49fa-a3dc-980aab03d32c",
"name": "Markdown zu JSON",
"type": "n8n-nodes-base.code",
"position": [
816,
0
],
"parameters": {
"jsCode": "// Markdown → Google Docs batchUpdate (safe ranges, compact spacing, proper headings/lists/inline)\n// Input: items[0].json.output (Markdown)\n// Output: { json: { requests: [...] } }\n\nconst rawInput = items[0].json.output ?? \"\";\n\n/* =========================\n Helpers & Normalization\n ========================= */\nfunction hexToRgb(hex) {\n if (!hex || !hex.startsWith('#')) return null;\n const r = parseInt(hex.slice(1, 3), 16) / 255;\n const g = parseInt(hex.slice(3, 5), 16) / 255;\n const b = parseInt(hex.slice(5, 7), 16) / 255;\n return { red: r, green: g, blue: b };\n}\n\nfunction headingStyleForLevel(level) {\n const map = { 1: 'HEADING_1', 2: 'HEADING_2', 3: 'HEADING_3', 4: 'HEADING_4', 5: 'HEADING_5', 6: 'HEADING_6' };\n return map[level] || 'HEADING_1';\n}\n\n// Collapse 3+ blank lines → 2; trim trailing spaces\nfunction normalizeMarkdown(md) {\n return md\n .replace(/\\r\\n/g, '\\n')\n .replace(/[ \\t]+\\n/g, '\\n')\n .replace(/\\n{3,}/g, '\\n\\n')\n .replace(/[ \\t]+$/gm, '');\n}\n\n// Parse inline markdown for one line\n// Returns { text, spans[] } where spans: { offsetStart, offsetEnd, bold?, italic?, link? }\nfunction parseInline(line) {\n let text = line;\n const spans = [];\n\n // Handle your previous LINK markers if present (safety)\n // \\u0001LINKSTART\\u0001Label\\u0001LINKURL:https://...\\u0001LINKEND\\u0001\n if (text.includes('\\u0001LINKSTART\\u0001')) {\n let acc = '';\n for (let i = 0; i < text.length; ) {\n if (text.startsWith('\\u0001LINKSTART\\u0001', i)) {\n i += '\\u0001LINKSTART\\u0001'.length;\n const urlTag = '\\u0001LINKURL:';\n const endTag = '\\u0001LINKEND\\u0001';\n const urlIdx = text.indexOf(urlTag, i);\n const endIdx = text.indexOf(endTag, i);\n const label = text.slice(i, urlIdx);\n const url = text.slice(urlIdx + urlTag.length, endIdx);\n const start = acc.length;\n acc += label;\n spans.push({ offsetStart: start, offsetEnd: start + label.length, link: url });\n i = endIdx + endTag.length;\n } else {\n acc += text[i++];\n }\n }\n text = acc;\n }\n\n // Markdown links: [label](url)\n text = text.replace(/\\[([^\\]]+)\\]\\((https?:\\/\\/[^\\s)]+)\\)/g, (m, label, url) => {\n const start = m.indexOf(label);\n return `\\u0002L${label}\\u0002U${url}\\u0002E`;\n });\n\n // Bold+Italic ***text*** or ___text___\n text = text.replace(/(\\*{3}|_{3})([\\s\\S]*?)\\1/g, (m, _w, inner) => `\\u0002BIS${inner}\\u0002BIE`);\n // Bold **text** or __text__\n text = text.replace(/(\\*{2}|__)([\\s\\S]*?)\\1/g, (m, _w, inner) => `\\u0002BS${inner}\\u0002BE`);\n // Italic *text* or _text_\n text = text.replace(/(\\*|_)([\\s\\S]*?)\\1/g, (m, _w, inner) => `\\u0002IS${inner}\\u0002IE`);\n\n // Second pass: build clean text + spans\n let clean = '';\n for (let i = 0; i < text.length; ) {\n if (text.startsWith('\\u0002L', i)) {\n i += 2; // \\u0002 + 'L'\n const uIdx = text.indexOf('\\u0002U', i);\n const eIdx = text.indexOf('\\u0002E', i);\n const label = text.slice(i, uIdx);\n const url = text.slice(uIdx + 2, eIdx);\n const start = clean.length;\n clean += label;\n spans.push({ offsetStart: start, offsetEnd: start + label.length, link: url });\n i = eIdx + 2;\n continue;\n }\n if (text.startsWith('\\u0002BIS', i)) {\n i += 4;\n const eIdx = text.indexOf('\\u0002BIE', i);\n const inner = text.slice(i, eIdx);\n const start = clean.length;\n clean += inner;\n spans.push({ offsetStart: start, offsetEnd: start + inner.length, bold: true, italic: true });\n i = eIdx + 4;\n continue;\n }\n if (text.startsWith('\\u0002BS', i)) {\n i += 3;\n const eIdx = text.indexOf('\\u0002BE', i);\n const inner = text.slice(i, eIdx);\n const start = clean.length;\n clean += inner;\n spans.push({ offsetStart: start, offsetEnd: start + inner.length, bold: true });\n i = eIdx + 3;\n continue;\n }\n if (text.startsWith('\\u0002IS', i)) {\n i += 3;\n const eIdx = text.indexOf('\\u0002IE', i);\n const inner = text.slice(i, eIdx);\n const start = clean.length;\n clean += inner;\n spans.push({ offsetStart: start, offsetEnd: start + inner.length, italic: true });\n i = eIdx + 3;\n continue;\n }\n clean += text[i++];\n }\n return { text: clean, spans };\n}\n\n/* =========================\n Block-level parser\n ========================= */\nfunction parseMarkdownToModel(md) {\n const lines = normalizeMarkdown(md).split('\\n');\n\n let docText = '';\n const blocks = []; // { start, end, kind: 'heading'|'paragraph'|'blockquote'|'hr'|'numbered'|'bulleted', level? }\n const inlineSpans = []; // { start, end, bold?, italic?, link? }\n const listGroups = []; // contiguous list ranges → { kind, start, end }\n let cursor = 0;\n\n function addListLine(kind, start, end) {\n const last = listGroups[listGroups.length - 1];\n if (last && last.kind === kind && last.end === start) last.end = end;\n else listGroups.push({ kind, start, end });\n }\n\n for (let i = 0; i < lines.length; i++) {\n const raw = lines[i];\n\n // Horizontal rule\n if (/^\\s*(?:-{3,}|_{3,}|\\*{3,})\\s*$/.test(raw)) {\n const hr = '────────────────────────\\n';\n const start = cursor;\n docText += hr;\n const end = cursor + hr.length;\n blocks.push({ start, end, kind: 'hr' });\n cursor = end;\n continue;\n }\n\n // Headings\n const h = raw.match(/^(\\s{0,3})(#{1,6})\\s+(.*)$/);\n if (h) {\n const level = h[2].length;\n const { text, spans } = parseInline(h[3]);\n const para = text + '\\n';\n const start = cursor;\n docText += para;\n const end = cursor + para.length;\n blocks.push({ start, end, kind: 'heading', level });\n spans.forEach(sp => inlineSpans.push({ start: start + sp.offsetStart, end: start + sp.offsetEnd, bold: sp.bold, italic: sp.italic, link: sp.link }));\n cursor = end;\n continue;\n }\n\n // Blockquote\n const bq = raw.match(/^\\s*>\\s?(.*)$/);\n if (bq) {\n const { text, spans } = parseInline(bq[1]);\n const para = text + '\\n';\n const start = cursor;\n docText += para;\n const end = cursor + para.length;\n blocks.push({ start, end, kind: 'blockquote' });\n spans.forEach(sp => inlineSpans.push({ start: start + sp.offsetStart, end: start + sp.offsetEnd, bold: sp.bold, italic: sp.italic, link: sp.link }));\n cursor = end;\n continue;\n }\n\n // Numbered list\n const num = raw.match(/^\\s*\\d+\\.\\s+(.*)$/);\n if (num) {\n const { text, spans } = parseInline(num[1]);\n const para = text + '\\n';\n const start = cursor;\n docText += para;\n const end = cursor + para.length;\n blocks.push({ start, end, kind: 'numbered' });\n addListLine('numbered', start, end);\n spans.forEach(sp => inlineSpans.push({ start: start + sp.offsetStart, end: start + sp.offsetEnd, bold: sp.bold, italic: sp.italic, link: sp.link }));\n cursor = end;\n continue;\n }\n\n // Bulleted list\n const bul = raw.match(/^\\s*([-*•])\\s+(.*)$/);\n if (bul) {\n const { text, spans } = parseInline(bul[2]);\n const para = text + '\\n';\n const start = cursor;\n docText += para;\n const end = cursor + para.length;\n blocks.push({ start, end, kind: 'bulleted' });\n addListLine('bulleted', start, end);\n spans.forEach(sp => inlineSpans.push({ start: start + sp.offsetStart, end: start + sp.offsetEnd, bold: sp.bold, italic: sp.italic, link: sp.link }));\n cursor = end;\n continue;\n }\n\n // Paragraph\n {\n const { text, spans } = parseInline(raw);\n const para = text + '\\n';\n const start = cursor;\n docText += para;\n const end = cursor + para.length;\n blocks.push({ start, end, kind: 'paragraph' });\n spans.forEach(sp => inlineSpans.push({ start: start + sp.offsetStart, end: start + sp.offsetEnd, bold: sp.bold, italic: sp.italic, link: sp.link }));\n cursor = end;\n }\n }\n\n return { docText, blocks, inlineSpans, listGroups };\n}\n\n/* =========================\n Build requests (safe ranges)\n ========================= */\nconst { docText, blocks, inlineSpans, listGroups } = parseMarkdownToModel(rawInput);\n\n// Insert at 1 to skip the first section break at 0\nconst baseOffset = 1;\nconst docLen = docText.length;\nconst bodyEndExclusive = baseOffset + docLen;\n\n// Safe range builder: clamp end < bodyEndExclusive (end is exclusive per API)\nfunction absRangeSafe(relStart, relEnd) {\n const start = Math.max(baseOffset + relStart, baseOffset);\n const maxEnd = Math.max(Math.min(baseOffset + relEnd, bodyEndExclusive - 1), start); // clamp exclusive\n if (maxEnd <= start) return null; // skip zero/invalid\n return { startIndex: start, endIndex: maxEnd };\n}\n\nconst requests = [];\n\n// 1) Insert full text\nrequests.push({ insertText: { location: { index: baseOffset }, text: docText } });\n\n// 2) Default text styling across the inserted range (clamped end)\nconst rAll = absRangeSafe(0, docLen);\nif (rAll) {\n requests.push({\n updateTextStyle: {\n range: rAll,\n textStyle: {\n weightedFontFamily: { fontFamily: 'Sofia Sans' },\n fontSize: { magnitude: 12.5, unit: 'PT' },\n foregroundColor: { color: { rgbColor: hexToRgb('#0d1216') } }\n },\n fields: 'weightedFontFamily,fontSize,foregroundColor'\n }\n });\n}\n\n// 3) Headings & blockquotes (tight spacing to avoid big gaps)\nfor (const b of blocks) {\n if (b.kind === 'heading') {\n const rng = absRangeSafe(b.start, b.end);\n if (!rng) continue;\n requests.push({\n updateParagraphStyle: {\n range: rng,\n paragraphStyle: {\n namedStyleType: headingStyleForLevel(b.level),\n alignment: 'START',\n spaceAbove: { magnitude: 0, unit: 'PT' },\n spaceBelow: { magnitude: 0, unit: 'PT' },\n indentStart: { magnitude: 0, unit: 'PT' },\n indentFirstLine: { magnitude: 0, unit: 'PT' }\n },\n fields: 'namedStyleType,alignment,spaceAbove,spaceBelow,indentStart,indentFirstLine'\n }\n });\n } else if (b.kind === 'blockquote') {\n const rng = absRangeSafe(b.start, b.end);\n if (!rng) continue;\n requests.push({\n updateParagraphStyle: {\n range: rng,\n paragraphStyle: {\n namedStyleType: 'NORMAL_TEXT',\n alignment: 'START',\n spaceAbove: { magnitude: 0, unit: 'PT' },\n spaceBelow: { magnitude: 0, unit: 'PT' },\n indentStart: { magnitude: 18, unit: 'PT' },\n indentFirstLine: { magnitude: 18, unit: 'PT' }\n },\n fields: 'namedStyleType,alignment,spaceAbove,spaceBelow,indentStart,indentFirstLine'\n }\n });\n requests.push({ updateTextStyle: { range: rng, textStyle: { italic: true }, fields: 'italic' } });\n }\n}\n\n// 4) Lists (convert contiguous ranges to bullets; clamp ranges)\nfor (const g of listGroups) {\n const rng = absRangeSafe(g.start, g.end);\n if (!rng) continue;\n requests.push({\n createParagraphBullets: {\n range: rng,\n bulletPreset: g.kind === 'numbered' ? 'NUMBERED_DECIMAL_ALPHA_ROMAN' : 'BULLET_DISC_CIRCLE_SQUARE'\n }\n });\n}\n\n// 5) Inline styles (bold/italic/links)\nfor (const sp of inlineSpans) {\n const rng = absRangeSafe(sp.start, sp.end);\n if (!rng) continue;\n const fields = [];\n const textStyle = {};\n if (sp.bold) { textStyle.bold = true; fields.push('bold'); }\n if (sp.italic) { textStyle.italic = true; fields.push('italic'); }\n if (sp.link) { textStyle.link = { url: sp.link }; fields.push('link'); }\n if (!fields.length) continue;\n requests.push({ updateTextStyle: { range: rng, textStyle, fields: fields.join(',') } });\n}\n\nreturn { json: { requests } };\n"
},
"typeVersion": 2
},
{
"id": "320d3015-2fa6-4d54-b729-802025444f7e",
"name": "Dokument erstellen",
"type": "n8n-nodes-base.googleDocs",
"position": [
1040,
0
],
"parameters": {
"title": "=SEO Brief for {{ $('On form submission').item.json['Keyword/Topic'] }}",
"driveId": "sharedWithMe",
"folderId": "={{google_drive_folder_id}}"
},
"credentials": {
"googleDocsOAuth2Api": {
"id": "M5Z3DWLrDsl5ycru",
"name": "Google Docs account"
}
},
"executeOnce": true,
"typeVersion": 2
},
{
"id": "d4c538c3-d527-4c96-99f0-e06b236aee10",
"name": "Dokument aktualisieren",
"type": "n8n-nodes-base.httpRequest",
"onError": "continueRegularOutput",
"position": [
1264,
0
],
"parameters": {
"url": "=https://docs.googleapis.com/v1/documents/{{ $('Create a document').item.json.id }}:batchUpdate",
"method": "POST",
"options": {},
"jsonBody": "={{ JSON.stringify($('Markdown to JSON').item.json) }}",
"sendBody": true,
"specifyBody": "json",
"authentication": "predefinedCredentialType",
"nodeCredentialType": "googleDocsOAuth2Api"
},
"credentials": {
"googleDocsOAuth2Api": {
"id": "M5Z3DWLrDsl5ycru",
"name": "Google Docs account"
}
},
"typeVersion": 4.2
},
{
"id": "e46c8bdd-b125-4791-9b20-f09d0dea472d",
"name": "KI-Agent",
"type": "@n8n/n8n-nodes-langchain.agent",
"position": [
496,
0
],
"parameters": {
"text": "=Here is the topic for the blog post:\n\n<topic>\n{{ $('On form submission').item.json['Keyword/Topic'] }}\n</topic>\n\nBelow are the top-ranking pages and their content for the target keyword. For each source, you'll find the content and its corresponding URL:\n\n\n<source_content_1>\n{{ $json.data.web[0].markdown }}\n<source_content_1>\n\n<source_url_1>\n{{ $json.data.web[0].url }}\n<source_url_1>\n\n<source_content_2>\n{{ $json.data.web[1].markdown }}\n<source_content_2>\n\n<source_url_2>\n{{ $json.data.web[1].url }}\n<source_url_2>\n\n<source_content_3>\n{{ $json.data.web[2].markdown }}\n<source_content_3>\n\n<source_url_3>\n{{ $json.data.web[2].url }}\n<source_url_3>\n\n<source_content_4>\n{{ $json.data.web[3].markdown }}\n<source_content_4>\n\n<source_url_4>\n{{ $json.data.web[3].url }}\n<source_url_4>\n\n<source_content_5>\n{{ $json.data.web[4].markdown }}\n<source_content_5>\n\n<source_url_5>\n{{ $json.data.web[4].url }}\n<source_url_5>\n\nYour task is to analyze these sources and create a unique, comprehensive content brief that incorporates best practices and insights from top-ranking content. Focus on providing valuable insights and a fresh perspective on the given topic while incorporating SEO best practices.\n\nBefore creating the content brief, use think tool and make a strategy and analysis. In your analysis, consider the following:\n\n1. For each source:\n - List 3-5 key points\n - Note any unique angles or perspectives\n - Identify potential content gaps\n\n2. Compare and contrast the sources:\n - Identify common themes across all sources (list at least 3)\n - Note any conflicting information or viewpoints (if any)\n - Highlight unique elements in each source\n\n3. Brainstorm 5 unique angles or perspectives not covered in the existing sources. Number each idea as you list it.\n\n4. Determine 3-5 content gaps and areas for improvement or expansion across all sources\n\n5. Outline 5 key SEO strategies to be implemented\n\n6. Suggest 3 visual or interactive content ideas that could enhance the article\n\n7. List 10 questions that the content should aim to answer. Number each question as you list it.\n\n8. Propose an overall approach to make the content competitive with top-ranking pages, including 5 specific strategies and tactics\n\nAfter your analysis, develop the content brief using the following structure:\n\nContent Brief for {{ $('On form submission').item.json['Keyword/Topic'] }}\n(H1) [Optimized Title] (5-10 words)\n\nTarget Keywords: [List primary keywords]\nSecondary Keywords: [List secondary keywords]\n\nIntroduction Guidelines: [2-3 sentences outlining the introduction]\n\nContent Structure:\n[H2] [Section Title]\n- [H3] [Subsection Title]\n [Content guidelines and key points to cover]\n- [H3] [Subsection Title]\n [Content guidelines and key points to cover]\n\n[H2] [Section Title]\n- [H3] [Subsection Title]\n [Content guidelines and key points to cover]\n- [H3] [Subsection Title]\n [Content guidelines and key points to cover]\n\n[Continue with additional H2 and H3 sections as needed]\n\nSEO Recommendations:\n- Semantic keywords: [List relevant semantic keywords]\n- Word count: [Recommended word count range]\n- Technical requirements: [Specific technical SEO recommendations]\n\nTop-Ranking References: [List the provided URLs]\n\nAdditional Content Guidelines:\n- [List any additional guidelines or requirements]\n\nSource Overview:\n[Provide a 4-5 sentence overview summarizing key insights from all sources]\n\n\nThe final format should be only a markdown with the content brief.\n\nRequirements:\n1. Include all major aspects of the topic in your content structure.\n2. Suggest at least one to three visual representation of a key concept related to the topic.\n3. If relevant, compare the main topic to a related concept or model.\n4. Total word count should be based on best SEO practices and the content lenght - n words.\n5. Incorporate semantic keywords naturally throughout the content brief.\n6. Identify opportunities for featured snippets, internal linking, and FAQs where relevant.\n7. Include the provided source URLs in the \"Top-ranking references\" section of the SEO Recommendations.\n8. Provide a concise 5-10 sentence overview of all sources at the end of the brief.\n9. 9. Everything should be in markdown format. Never write ```markdown in the beginning and ```\\n at the end, just start with the content.\n\nRemember to create a unique and comprehensive brief that goes beyond summarizing the existing content. Your goal is to provide a strategy for creating content that can compete with and surpass the top-ranking pages. The content structure provided is an example; adapt it as necessary for the specific topic given.",
"options": {
"systemMessage": "You are an expert content strategist and SEO specialist tasked with creating a comprehensive content brief for a blog post. Your goal is to analyze top-ranking content on a given topic and create a detailed outline that will help produce high-quality, SEO-optimized content that can compete with and surpass the top-ranking pages."
},
"promptType": "define"
},
"typeVersion": 1.7
},
{
"id": "ba5808b2-ce45-4c2e-8cd9-87dcd11f4772",
"name": "Denken",
"type": "@n8n/n8n-nodes-langchain.toolThink",
"position": [
656,
208
],
"parameters": {},
"typeVersion": 1.1
},
{
"id": "ee7a9941-823b-47e2-8196-51e1f19307a8",
"name": "OpenAI Chat-Modell",
"type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
"position": [
496,
208
],
"parameters": {
"model": "gpt-4.1-mini",
"options": {}
},
"credentials": {
"openAiApi": {
"id": "SVy0dG502VH2c1j9",
"name": "OpenAi account"
}
},
"typeVersion": 1
},
{
"id": "96050769-6142-44bb-9529-3079bd68d20e",
"name": "Haftnotiz",
"type": "n8n-nodes-base.stickyNote",
"position": [
272,
-832
],
"parameters": {
"width": 944,
"height": 736,
"content": "## 🟨 Overview — Create SEO content brief from keyword to Google Docs\n\n## Who’s it for\nContent/SEO teams who want a fast, consistent, research-driven brief for a copywriters from a single keyword—without manual review and analysis of the SERP (Google results).\n\n## How it works / What it does\n- **Form Trigger** collects the keyword/topic and redirects to Google Drive Folder after the final node. \n- **FireCrawl Search & Scrape** pulls the top 5 pages for the chosen keyword. \n- **AI Agent** (with **Think** + **OpenAI Chat Model**) analyzes sources and generates an **original Markdown** brief. \n- **Markdown to JSON** converts the Markdown into Google Docs `batchUpdate` requests (H1/H2/H3, lists, links, spacing). Then this is used in **Update a document** for updating the empty doc.\n- **Create a document** + **Update a document** write a Google Doc titled **“SEO Brief for <keyword>”** and update the Google Doc in your target Drive folder.\n\n## How to set up\n1) Add credentials: **Firecrawl** (Authorization header), **OpenAI** (Chat), **Google Docs OAuth2**. \n2) Replace placeholders: `{{API_KEY}}`, `{{google_drive_folder_id}}`, `{{google_drive_folder_url}}`. \n3) Publish and open the Form URL to test.\n\n## Requirements\nFirecrawl API key • OpenAI API key • Google account with access to the target Drive folder.\n\n## Resources\nGoogle OAuth2 Credentials Setup - https://docs.n8n.io/integrations/builtin/credentials/google/oauth-generic/\nFirecrawl API key - https://take.ms/lGcUp\nOpenAI API key - https://docs.n8n.io/integrations/builtin/credentials/openai/\n\n"
},
"typeVersion": 1
},
{
"id": "0fcc0b14-172e-4b1f-ba32-bccbd66fd9ed",
"name": "Haftnotiz1",
"type": "n8n-nodes-base.stickyNote",
"position": [
-592,
-64
],
"parameters": {
"width": 528,
"height": 272,
"content": "## 🗒️ Setup checklist (do these before first run)\n\n- **On form submission → Respond with redirect**: set `={{google_drive_folder_url}}`. \n- **Create a document → folderId**: set `={{google_drive_folder_id}}`. \n- **FireCrawl headers**: `Authorization: Bearer {{API_KEY}}`. \n- **OpenAI Chat Model**: attach your OpenAI credential. \n- **Google Docs OAuth2**: attach to both Docs nodes - Create a document + Update a document. \n- Test with a keyword (e.g., `GEO strategy`) and verify the created Doc & redirect."
},
"typeVersion": 1
}
],
"active": false,
"pinData": {},
"settings": {
"executionOrder": "v1"
},
"versionId": "c1662ea4-210d-47f6-bf2f-fea975cacef1",
"connections": {
"ba5808b2-ce45-4c2e-8cd9-87dcd11f4772": {
"ai_tool": [
[
{
"node": "e46c8bdd-b125-4791-9b20-f09d0dea472d",
"type": "ai_tool",
"index": 0
}
]
]
},
"e46c8bdd-b125-4791-9b20-f09d0dea472d": {
"main": [
[
{
"node": "8d7a216c-017e-49fa-a3dc-980aab03d32c",
"type": "main",
"index": 0
}
]
]
},
"8d7a216c-017e-49fa-a3dc-980aab03d32c": {
"main": [
[
{
"node": "320d3015-2fa6-4d54-b729-802025444f7e",
"type": "main",
"index": 0
}
]
]
},
"320d3015-2fa6-4d54-b729-802025444f7e": {
"main": [
[
{
"node": "d4c538c3-d527-4c96-99f0-e06b236aee10",
"type": "main",
"index": 0
}
]
]
},
"ee7a9941-823b-47e2-8196-51e1f19307a8": {
"ai_languageModel": [
[
{
"node": "e46c8bdd-b125-4791-9b20-f09d0dea472d",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"d0cbb365-b3df-430e-89a6-338bc316315e": {
"main": [
[
{
"node": "c3ac02d5-67c7-4f24-8ae1-0af6ba9f4d79",
"type": "main",
"index": 0
}
]
]
},
"c3ac02d5-67c7-4f24-8ae1-0af6ba9f4d79": {
"main": [
[
{
"node": "e46c8bdd-b125-4791-9b20-f09d0dea472d",
"type": "main",
"index": 0
}
]
]
}
}
}Wie verwende ich diesen Workflow?
Kopieren Sie den obigen JSON-Code, erstellen Sie einen neuen Workflow in Ihrer n8n-Instanz und wählen Sie "Aus JSON importieren". Fügen Sie die Konfiguration ein und passen Sie die Anmeldedaten nach Bedarf an.
Für welche Szenarien ist dieser Workflow geeignet?
Fortgeschritten - Verschiedenes, KI RAG, Multimodales KI
Ist es kostenpflichtig?
Dieser Workflow ist völlig kostenlos. Beachten Sie jedoch, dass Drittanbieterdienste (wie OpenAI API), die im Workflow verwendet werden, möglicherweise kostenpflichtig sind.
Verwandte Workflows
Simeon Penev
@monkatagptAI-Powered Automation & Analytics I specialize in building AI workflows that streamline processes and reduce manual work. My main workflows are in the marketing field, but I can automate all kind of processes and integrate AI in them.
Diesen Workflow teilen