Automatisiertes Senden von LinkedIn-Beitragszusammenfassungen an Slack mit KI und Apify
Dies ist ein AI Summarization, Multimodal AI-Bereich Automatisierungsworkflow mit 14 Nodes. Hauptsächlich werden Code, Cron, Slack, HttpRequest, GoogleSheets und andere Nodes verwendet. Automatisierte Zusammenfassung von LinkedIn-Posts an Slack senden mit AI und Apify
- •Slack Bot Token oder Webhook URL
- •Möglicherweise sind Ziel-API-Anmeldedaten erforderlich
- •Google Sheets API-Anmeldedaten
- •OpenAI API Key
Verwendete Nodes (14)
Kategorie
{
"meta": {
"instanceId": "f01290caa6c024522b0ed5bb2d09cea02bb113d8970b898b340ca3d74255326e",
"templateCredsSetupCompleted": true
},
"nodes": [
{
"id": "2de83c7c-8f98-4598-ab33-53ac2f05bc17",
"name": "Start: Wöchentlicher Cron",
"type": "n8n-nodes-base.cron",
"notes": "Runs every Sunday 09:00 Africa/Cairo.",
"position": [
400,
80
],
"parameters": {
"triggerTimes": {
"item": [
{
"hour": 9
}
]
}
},
"typeVersion": 1
},
{
"id": "fdf4142d-fe4d-44a8-87fb-303e4094349c",
"name": "Profile aus Google Sheets lesen",
"type": "n8n-nodes-base.googleSheets",
"position": [
624,
80
],
"parameters": {
"options": {},
"sheetName": {
"__rl": true,
"mode": "id",
"value": "gid=0"
},
"documentId": {
"__rl": true,
"mode": "id",
"value": "__GOOGLE_SHEETS_CREDENTIAL_ID__"
}
},
"credentials": {
"googleSheetsOAuth2Api": {
"id": "I0hj4qBrW1yS7OdT",
"name": "Google Sheets account"
}
},
"typeVersion": 4
},
{
"id": "efa70b5f-002d-4264-8b9d-c639803cf829",
"name": "Apify: Scraper starten",
"type": "n8n-nodes-base.httpRequest",
"position": [
1072,
96
],
"parameters": {
"url": "https://api.apify.com/v2/acts/apimaestro~linkedin-profile-posts/run-sync-get-dataset-items?token=apify_api_{{YOUR_API_TOKEN}}",
"method": "POST",
"options": {},
"jsonBody": "={\n \"username\": \"{{ $json.profileUrl }}\",\n \"page_number\": 1,\n \"limit\": 3,\n \"maxItems\": 20,\n \"total_posts\": 3,\n \"post_type\": \"regular\",\n \"includePostReactions\": true,\n \"includePostComments\": false,\n \"includePostShares\": true,\n \"extendOutputFunction\": \"async function extendOutputFunction({ item }) { const now=Date.now(); const weekMs=7*24*60*60*1000; const ts=(item?.posted_at?.timestamp)||(item?.postedAt?.timestamp)||0; const postType=item?.post_type||item?.postType||null; if(!ts||now-ts>weekMs||postType!=='regular') return null; const pick=(o,k)=>Object.fromEntries(Object.entries(o||{}).filter(([kk])=>k.includes(kk))); const author=pick(item?.author,['first_name','last_name','headline','username','profile_url','profile_picture']); const stats=pick(item?.stats,['total_reactions','like','support','love','insight','celebrate','comments','reposts']); const media=item?.media?pick(item.media,['type','url','thumbnail','images']):null; const out={ urn:item?.urn||null, full_urn:item?.full_urn||item?.fullUrn||null, posted_at=item?.posted_at||item?.postedAt||null, text:item?.text||null, url=item?.url||null, post_type:postType, author, stats, media, username: author?.profile_url||null }; return out; }\"\n}",
"sendBody": true,
"specifyBody": "json"
},
"typeVersion": 4
},
{
"id": "6647dd07-8989-4aee-b5b9-5369db6bfa7c",
"name": "Markdown-Digest erstellen",
"type": "n8n-nodes-base.code",
"position": [
2144,
96
],
"parameters": {
"jsCode": "// Inputs: array of items coming from \"Message a model\"\n// Each item usually looks like { json: { message: { role: 'assistant', content: '...' }, ... } }\n\nconst items = $input.all();\n\n// Helper: pick a grouping key per profile if present\nconst keyOf = (j) =>\n j.profileUrl ||\n j.author?.profile_url ||\n j.author?.username ||\n j.username ||\n 'Feed';\n\n// Build buckets per profile\nconst buckets = new Map();\nfor (const it of items) {\n const j = it.json ?? {};\n const key = keyOf(j);\n const arr = buckets.get(key) || [];\n arr.push(j);\n buckets.set(key, arr);\n}\n\n// Compose markdown\nconst today = new Date().toISOString().slice(0, 10);\nlet md = `# LinkedIn Digest (${today})\\n\\n`;\n\nfor (const [profile, arr] of buckets.entries()) {\n md += `## ${profile}\\n\\n`;\n arr.forEach((j, idx) => {\n const content =\n j.message?.content ?? j.content ?? j.text ?? '(no content)';\n const url =\n j.url || j.post?.url || j.source_url || j.link || null;\n\n md += `**${idx + 1}.** ${content}\\n`;\n if (url) md += `\\n[Link](${url})\\n`;\n md += `\\n---\\n`;\n });\n md += `\\n`;\n}\n\n// Slack hard limit ~4000 chars for a single text message.\n// Trim if needed to be safe.\nconst MAX = 3800;\nif (md.length > MAX) {\n md = md.slice(0, MAX - 50) + '\\n\\n…_truncated_';\n}\n\n// Return ONE item that downstream nodes can use.\n// Slack node: set Message Text = {{ $json.text }} (or {{ $json.markdown }})\nreturn [\n {\n json: {\n markdown: md,\n text: md,\n },\n },\n];\n"
},
"typeVersion": 2
},
{
"id": "e63e2244-0dfa-4e54-8bac-9abf356138b1",
"name": "Batch",
"type": "n8n-nodes-base.code",
"position": [
1248,
96
],
"parameters": {
"jsCode": "const posts = items.map(item => item.json);\n\n// find min and max date in this batch\nconst dates = posts.map(p => new Date(p.posted_at?.date));\nconst minDate = new Date(Math.min(...dates));\nconst maxDate = new Date(Math.max(...dates));\n\nfunction formatDate(d) {\n return d.toISOString().split(\"T\")[0]; // YYYY-MM-DD\n}\n\n// group by author username\nconst grouped = {};\nfor (const post of posts) {\n const author = `${post.author?.first_name || \"\"} ${post.author?.last_name || \"\"}`.trim() || \"Unknown\";\n if (!grouped[author]) grouped[author] = [];\n grouped[author].push(post);\n}\n\nlet digest = `## LinkedIn Digest (${formatDate(minDate)} → ${formatDate(maxDate)})\\n\\n`;\n\nfor (const [author, authorPosts] of Object.entries(grouped)) {\n digest += `### ${author}\\n`;\n for (const post of authorPosts) {\n const date = post.posted_at?.date || \"Unknown date\";\n const text = (post.text || \"\").slice(0, 200).replace(/\\n+/g, \" \"); // keep short snippet\n const reactions = post.stats?.total_reactions || 0;\n const comments = post.stats?.comments || 0;\n const reposts = post.stats?.reposts || 0;\n digest += `- (${date}) \"${text}...\"\\n 👍 ${reactions} | 💬 ${comments} | 🔁 ${reposts} | [Link](${post.url})\\n`;\n }\n digest += \"\\n\";\n}\n\nreturn [{ json: { digest } }];\n"
},
"typeVersion": 2
},
{
"id": "2e0f532a-c8ad-4d71-840e-e2176a43a70b",
"name": "Code",
"type": "n8n-nodes-base.code",
"position": [
2496,
96
],
"parameters": {
"jsCode": "// Input: one item with json.markdown (or json.text)\n// Output: N items -> { text, part, total } for Slack\n\nconst MAX = 35000; // safe margin under Slack's ~40k limit\nconst SEP = '\\n---\\n';\n\nconst md = $input.first().json.markdown ?? $input.first().json.text ?? '';\nif (!md) return [{ json: { text: '(empty digest)' } }];\n\n// Split on section separators, then pack sections into <= MAX chunks\nconst sections = md.split(SEP);\nconst chunks = [];\nlet buf = '';\n\nfunction pushBuf() {\n if (buf.trim()) chunks.push(buf.trim());\n buf = '';\n}\n\nfor (const s of sections) {\n const candidate = buf ? `${buf}${SEP}${s}` : s;\n if (candidate.length <= MAX) {\n buf = candidate;\n } else {\n // flush what we have\n pushBuf();\n // if a single section is still too long, split it on newlines\n if (s.length > MAX) {\n let start = 0;\n while (start < s.length) {\n let end = Math.min(start + MAX, s.length);\n const nl = s.lastIndexOf('\\n', end);\n if (nl > start + 1000) end = nl; // try not to cut mid‑line\n chunks.push(s.slice(start, end).trim());\n start = end;\n }\n } else {\n buf = s; // start new buffer with this section\n }\n }\n}\npushBuf();\n\nreturn chunks.map((c, i) => ({\n json: { text: c, part: i + 1, total: chunks.length }\n}));\n"
},
"typeVersion": 2
},
{
"id": "7da7a9ec-c541-4556-bf86-9360eae28e37",
"name": "LinkedIn Digest",
"type": "n8n-nodes-base.slack",
"position": [
2752,
96
],
"webhookId": "18881790-2665-4d76-83f5-35e6b0e30946",
"parameters": {
"text": "=**LinkedIn Digest (part {{$json.part}}/{{$json.total}})**\n\n{{$json.text}}",
"select": "channel",
"channelId": {
"__rl": true,
"mode": "id",
"value": "={{TARGET_SLACK_CHANNEL}}"
},
"otherOptions": {
"unfurl_links": "={{ false }}",
"includeLinkToWorkflow": "={{ false }}"
}
},
"credentials": {
"slackApi": {
"id": "SLACK_ID",
"name": "Your_Slack_HQ"
}
},
"executeOnce": true,
"typeVersion": 2.3
},
{
"id": "9103c0a5-8a0a-4f55-93f7-920ec2b363fd",
"name": "Threads Messaging",
"type": "n8n-nodes-base.slack",
"position": [
3168,
96
],
"webhookId": "52de7e8a-c716-4079-bacd-3ac514a93364",
"parameters": {
"text": "={{ $json[\"linksText\"] }}\n",
"select": "channel",
"channelId": {
"__rl": true,
"mode": "id",
"value": "={{TARGET_SLACK_CHANNEL}}"
},
"otherOptions": {
"mrkdwn": true,
"thread_ts": {
"replyValues": {
"thread_ts": "={{ $('LinkedIn Digest').item.json.message.ts }}"
}
},
"sendAsUser": "TARGET_USER",
"includeLinkToWorkflow": "={{ false }}"
}
},
"credentials": {
"slackApi": {
"id": "SLACK_ID",
"name": "MY_SLACK"
}
},
"executeOnce": false,
"typeVersion": 2.3,
"alwaysOutputData": false
},
{
"id": "ee8150a4-5ad6-4984-b1cf-3f6630c5e654",
"name": "Quelllinks",
"type": "n8n-nodes-base.code",
"position": [
2960,
96
],
"parameters": {
"jsCode": "// --- CONFIG ---\n// Change this to the exact node name that has the LinkedIn objects:\nconst SOURCE_NODE = 'Apify: Start Scraper'; // e.g. 'API Request' if that’s your node’s name\n\n// --- READ ITEMS SAFELY ---\nconst items = $(SOURCE_NODE).all(); // requires “Run Once for All Items”\n\n// Your API payload shows each item has a .url field.\n// If sometimes it’s nested, add more fallbacks in the mapper.\nconst links = items\n .map(i => i.json?.url || i.json?.article?.url || i.json?.reshared_post?.url)\n .filter(Boolean);\n\n// Build Slack-friendly list\nconst linksText = '*Sources for today’s digest:*\\n' +\n links.map((u, i) => `${i + 1}. <${u}|Post ${i + 1}>`).join('\\n');\n\n// Output a single item for the Slack Threads node\nreturn [{ json: { linksText } }];\n"
},
"typeVersion": 2
},
{
"id": "3709705b-f03c-4e2c-84fb-3433ec4ec030",
"name": "Über Elemente iterieren1",
"type": "n8n-nodes-base.splitInBatches",
"notes": "You can customize the batch size according to how many posts you want to go through each run of the loop",
"position": [
832,
80
],
"parameters": {
"options": {},
"batchSize": 5
},
"notesInFlow": true,
"typeVersion": 3
},
{
"id": "1fb5cf18-b44d-40ac-bba3-5db186fc7587",
"name": "Nachricht an ein Modell",
"type": "@n8n/n8n-nodes-langchain.openAi",
"position": [
1808,
96
],
"parameters": {
"modelId": {
"__rl": true,
"mode": "list",
"value": "gpt-5-mini",
"cachedResultName": "GPT-5-MINI"
},
"options": {},
"messages": {
"values": [
{
"content": "=## LinkedIn Digest ({{ $json.date || \"This Week\" }})\n\nINPUT:\n{{ $json.text }}\n\nTASK:\nSummarize the INPUT per the rules in the system message.\n"
},
{
"role": "system",
"content": "=You are a professional LinkedIn digest summarizer. \nYour goal is to take raw LinkedIn posts and turn them into a short, structured daily digest. \nConstraints:\n- Each post must be summarized in **max 2–3 bullet points**. \n- Each bullet point must be **≤15 words**. \n- Always include the author’s name in **bold** at the top of each post summary. \n- Do not include hashtags, links, emojis, or filler text. \n- The entire digest must stay under **500 words total**. \n- Keep summaries actionable and focused on insights, not fluff.\nFormat:\n## LinkedIn Digest ({{ $json.date || $json.today }})\n\n**[Author Name]**\n- Bullet\n- Bullet\n\n**[Next Author]**\n- Bullet\n- Bullet\n"
},
{
"content": "If a post has only a link or no clear content, \nexplicitly say \"Shared only a link (no text)\" instead of \"Content unspecified.\""
}
]
}
},
"credentials": {
"openAiApi": {
"id": "OPENAI_ID",
"name": "YOUR_OpenAI"
}
},
"typeVersion": 1.8
},
{
"id": "08242d63-3788-4c6e-a728-8b6063eeeef7",
"name": "Markdown entfernen",
"type": "n8n-nodes-base.code",
"position": [
1408,
96
],
"parameters": {
"jsCode": "return items.map(item => {\n let digest = item.json.digest;\n\n // Remove markdown headers\n digest = digest.replace(/^#+\\s?/gm, \"\");\n\n // Replace markdown links [text](url) → just \"text\"\n digest = digest.replace(/\\[([^\\]]+)\\]\\([^)]+\\)/g, \"$1\");\n\n // Remove \"(Link: …)\" remnants\n digest = digest.replace(/\\(Link:[^)]+\\)/g, \"\");\n\n // Unescape quotes \\\" → \"\n digest = digest.replace(/\\\\\"/g, '\"');\n\n // Convert \\n to real newlines\n digest = digest.replace(/\\\\n/g, \"\\n\");\n\n // Collapse multiple newlines\n digest = digest.replace(/\\n{2,}/g, \"\\n\");\n\n // Trim leading/trailing spaces\n digest = digest.trim();\n\n return {\n json: {\n text: digest\n }\n };\n});\n"
},
"typeVersion": 2
},
{
"id": "e60efb9d-3c38-4f63-a90f-410f4b09d1cf",
"name": "Datum extrahieren",
"type": "n8n-nodes-base.code",
"position": [
1616,
96
],
"parameters": {
"jsCode": "return items.map(item => {\n const text = item.json.text;\n const match = text.match(/LinkedIn Digest \\((.*?)\\)/);\n return {\n json: {\n date: match ? match[1] : new Date().toISOString().split(\"T\")[0],\n text: text.replace(/LinkedIn Digest.*?\\n/, \"\") // strip old header\n }\n }\n});\n"
},
"typeVersion": 2
},
{
"id": "3bec6bd4-f2c5-464a-82a3-7b76952d5f59",
"name": "Sticky Note",
"type": "n8n-nodes-base.stickyNote",
"position": [
-656,
-448
],
"parameters": {
"width": 944,
"height": 1536,
"content": "# Bite-Sized LinkedIn Digest Workflow\n\nThis workflow automatically summarizes LinkedIn posts from selected profiles and delivers a clean weekly digest to Slack.\n\n---\n\n## 🔄 How It Works\n1. **Start: Weekly Cron**\n - Triggers every Sunday at 09:00 (Africa/Cairo).\n\n2. **Google Sheets → Profiles**\n - Reads LinkedIn profile URLs from a Google Sheet (`profileUrl` column).\n\n3. **Apify Scraper**\n - Uses the Apify actor `apimaestro~linkedin-profile-posts`.\n - Collects posts from each profile, filtering for the last 7 days only.\n\n4. **Strip & Clean**\n - Code nodes remove Markdown, clean URLs, and extract the date range.\n\n5. **OpenAI Summarizer**\n - Summarizes each post into **2–3 concise bullet points** (≤15 words).\n - Keeps the digest under **500 words**.\n\n6. **Build Markdown Digest**\n - Groups posts by author.\n - Formats the digest with headings, dates, and author names.\n\n7. **Slack Delivery**\n - Splits the digest into safe chunks (avoiding Slack’s character limit).\n - Posts the digest into your Slack channel.\n - A follow-up thread lists **all source links**.\n\n---\n\n## ⚙️ One-Time Setup\n1. **Google Sheets Credential**\n - Create a Google Sheets OAuth2 credential in n8n.\n - Replace `__GOOGLE_SHEETS_CREDENTIAL_ID__` in the JSON.\n\n2. **Apify**\n - Generate an Apify API token.\n - Replace `<APIFY_API_TOKEN>` in the HTTP Request node URL.\n\n3. **OpenAI**\n - Add your OpenAI API key as a credential in n8n.\n - Replace `__OPENAI_API_CREDENTIAL_ID__`.\n\n4. **Slack**\n - Create a Slack API credential with `chat:write` scope.\n - Replace `__TARGET_SLACK_CHANNEL__` in both Slack nodes.\n - Confirm the channel ID and ensure the bot is in the channel.\n\n5. **Google Sheet Setup**\n - Use your LinkedIn profile list sheet.\n - Ensure it has a column `profileUrl`.\n\n---\n\n## ✅ Benefits\n- Saves **hours of manual LinkedIn scrolling** every week.\n- Produces a **polished, Slack-ready digest** under 500 words.\n- Ensures your team gets **only the most relevant insights**.\n- Keeps a **weekly cadence** without missing updates.\n\n---\n"
},
"typeVersion": 1
}
],
"pinData": {},
"connections": {
"2e0f532a-c8ad-4d71-840e-e2176a43a70b": {
"main": [
[
{
"node": "7da7a9ec-c541-4556-bf86-9360eae28e37",
"type": "main",
"index": 0
}
]
]
},
"e63e2244-0dfa-4e54-8bac-9abf356138b1": {
"main": [
[
{
"node": "08242d63-3788-4c6e-a728-8b6063eeeef7",
"type": "main",
"index": 0
}
]
]
},
"e60efb9d-3c38-4f63-a90f-410f4b09d1cf": {
"main": [
[
{
"node": "1fb5cf18-b44d-40ac-bba3-5db186fc7587",
"type": "main",
"index": 0
}
]
]
},
"ee8150a4-5ad6-4984-b1cf-3f6630c5e654": {
"main": [
[
{
"node": "9103c0a5-8a0a-4f55-93f7-920ec2b363fd",
"type": "main",
"index": 0
}
]
]
},
"08242d63-3788-4c6e-a728-8b6063eeeef7": {
"main": [
[
{
"node": "e60efb9d-3c38-4f63-a90f-410f4b09d1cf",
"type": "main",
"index": 0
}
]
]
},
"7da7a9ec-c541-4556-bf86-9360eae28e37": {
"main": [
[
{
"node": "ee8150a4-5ad6-4984-b1cf-3f6630c5e654",
"type": "main",
"index": 0
}
]
]
},
"1fb5cf18-b44d-40ac-bba3-5db186fc7587": {
"main": [
[
{
"node": "6647dd07-8989-4aee-b5b9-5369db6bfa7c",
"type": "main",
"index": 0
}
]
]
},
"3709705b-f03c-4e2c-84fb-3433ec4ec030": {
"main": [
[],
[
{
"node": "efa70b5f-002d-4264-8b9d-c639803cf829",
"type": "main",
"index": 0
}
]
]
},
"9103c0a5-8a0a-4f55-93f7-920ec2b363fd": {
"main": [
[
{
"node": "3709705b-f03c-4e2c-84fb-3433ec4ec030",
"type": "main",
"index": 0
}
]
]
},
"2de83c7c-8f98-4598-ab33-53ac2f05bc17": {
"main": [
[
{
"node": "fdf4142d-fe4d-44a8-87fb-303e4094349c",
"type": "main",
"index": 0
}
]
]
},
"efa70b5f-002d-4264-8b9d-c639803cf829": {
"main": [
[
{
"node": "e63e2244-0dfa-4e54-8bac-9abf356138b1",
"type": "main",
"index": 0
}
]
]
},
"6647dd07-8989-4aee-b5b9-5369db6bfa7c": {
"main": [
[
{
"node": "2e0f532a-c8ad-4d71-840e-e2176a43a70b",
"type": "main",
"index": 0
}
]
]
},
"fdf4142d-fe4d-44a8-87fb-303e4094349c": {
"main": [
[
{
"node": "3709705b-f03c-4e2c-84fb-3433ec4ec030",
"type": "main",
"index": 0
}
]
]
}
}
}Wie verwende ich diesen Workflow?
Kopieren Sie den obigen JSON-Code, erstellen Sie einen neuen Workflow in Ihrer n8n-Instanz und wählen Sie "Aus JSON importieren". Fügen Sie die Konfiguration ein und passen Sie die Anmeldedaten nach Bedarf an.
Für welche Szenarien ist dieser Workflow geeignet?
Fortgeschritten - KI-Zusammenfassung, Multimodales KI
Ist es kostenpflichtig?
Dieser Workflow ist völlig kostenlos. Beachten Sie jedoch, dass Drittanbieterdienste (wie OpenAI API), die im Workflow verwendet werden, möglicherweise kostenpflichtig sind.
Verwandte Workflows
Ziad Adel
@ziadadelAI Automation Growth Partner with 5 years of experience in the tech and hyper growing startups industry
Diesen Workflow teilen