Extractor de sentimiento y palabras clave de comentarios de YouTube
Este es unMarket Research, Multimodal AIflujo de automatización del dominio deautomatización que contiene 20 nodos.Utiliza principalmente nodos como Set, Code, Telegram, Aggregate, HttpRequest. Usar Gemini AI para analizar el sentimiento y palabras clave de los comentarios de YouTube y reportar a través de Telegram
- •Bot Token de Telegram
- •Pueden requerirse credenciales de autenticación para la API de destino
- •Credenciales de API de Google Sheets
Nodos utilizados (20)
{
"id": "1mZD3H0r79bhY71M",
"meta": {
"instanceId": "0d045f8fe3802ff2be0bb9a9ea445ee6c9ed61973377effe00767e483681e2f4",
"templateCredsSetupCompleted": true
},
"name": "YouTube Comment Sentiment & Keyword Extractor",
"tags": [],
"nodes": [
{
"id": "55d27fc7-ac2c-4ccc-a98f-f8c4ab7547c4",
"name": "Disparador Programado",
"type": "n8n-nodes-base.scheduleTrigger",
"position": [
0,
0
],
"parameters": {
"rule": {
"interval": [
{}
]
}
},
"typeVersion": 1.2
},
{
"id": "bd198bd2-705f-4fc9-ba30-c1facfa06359",
"name": "Obtener fila(s) en hoja",
"type": "n8n-nodes-base.googleSheets",
"position": [
220,
0
],
"parameters": {
"options": {},
"sheetName": {
"__rl": true,
"mode": "list",
"value": ""
},
"documentId": {
"__rl": true,
"mode": "list",
"value": ""
}
},
"typeVersion": 4.6
},
{
"id": "514959bb-14aa-4947-9d78-f903cb3201e3",
"name": "Iterar sobre elementos",
"type": "n8n-nodes-base.splitInBatches",
"position": [
740,
0
],
"parameters": {
"options": {}
},
"typeVersion": 3
},
{
"id": "1a194433-415d-42cd-aeda-c983b5d81883",
"name": "HTTP Request",
"type": "n8n-nodes-base.httpRequest",
"position": [
980,
120
],
"parameters": {
"url": "=https://www.googleapis.com/youtube/v3/commentThreads?part=snippet&videoId={{ $('Get row(s) in sheet').item.json.video_id }}&maxResults=100&key={{ $json['Youtube API Key'] }}",
"options": {}
},
"typeVersion": 4.2
},
{
"id": "93821e4a-b8d9-4364-aa9c-2e9200c49691",
"name": "Youtube API Key",
"type": "n8n-nodes-base.set",
"position": [
480,
0
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "77fd67c8-e3a8-44a9-a347-5d10797ab01b",
"name": "Youtube API Key",
"type": "string",
"value": "YOUR_YOUTUBE_API_KEY"
}
]
}
},
"typeVersion": 3.4
},
{
"id": "5e91edf9-2ea6-4615-8a8c-710ac509a1f2",
"name": "Código",
"type": "n8n-nodes-base.code",
"position": [
1200,
120
],
"parameters": {
"jsCode": "// Input: HTTP Request result (object with .items)\n// Output: one item per comment, flat data ready to write\nconst out = [];\nconst root = items[0].json || {};\nfor (const it of (root.items || [])) {\n const s = it.snippet || {};\n const tl = (s.topLevelComment || {}).snippet || {};\n out.push({\n json: {\n comment_id: it.id,\n video_id: s.videoId,\n author: tl.authorDisplayName,\n comment_text: tl.textOriginal,\n like_count: tl.likeCount || 0,\n published_at: tl.publishedAt,\n author_channel_id: (tl.authorChannelId || {}).value,\n author_channel_url: tl.authorChannelUrl\n }\n });\n}\nreturn out;\n"
},
"typeVersion": 2
},
{
"id": "bfd6ff22-ae88-4d21-ac46-9a92c518cd87",
"name": "Cadena Básica de LLM",
"type": "@n8n/n8n-nodes-langchain.chainLlm",
"position": [
1420,
120
],
"parameters": {
"text": "=Data to analyze:\nyoutube_comment_text: \"{{ $json.comment_text }}\"\nlike_count: {{ $json.like_count }}\n\nConsider like_count only as a weak signal (slightly increase |score| if like_count is high and sentiment is clear).\nReturn the JSON only.\n",
"batching": {},
"messages": {
"messageValues": [
{
"message": "=You are a concise social-listening analyst. \nReturn ONLY valid JSON (no markdown, no backticks).\nRules:\n- sentiment ∈ [\"positive\",\"neutral\",\"negative\"] based on overall tone.\n- score is a float in [-1, 1]; map highly positive ≈ 0.7..1, neutral ≈ -0.2..0.2, highly negative ≈ -1..-0.7.\n- keywords is an array of 3–8 short keywords (lowercase, ascii, no emojis/hashtags/stopwords, no duplicates).\n- language is a 2-letter ISO 639-1 code (e.g., \"id\", \"en\").\n- Remove emojis, excessive punctuation, URLs from consideration (but keep meaning).\n- If text is spam/promotional or meaningless, set sentiment=\"neutral\" and score≈0 and still extract topic keywords.\n- Output must be STRICT JSON with exactly these keys: sentiment, score, keywords, language.\n"
}
]
},
"promptType": "define",
"hasOutputParser": true
},
"executeOnce": false,
"typeVersion": 1.7
},
{
"id": "29802a0-fd43-467a-8afd-534f9099c492",
"name": "Analizador de Salida Estructurada",
"type": "@n8n/n8n-nodes-langchain.outputParserStructured",
"position": [
1580,
340
],
"parameters": {
"jsonSchemaExample": "{\n \"sentiment\": \"neutral\",\n \"score\": 0.05,\n \"keywords\": [\"jogja\", \"nyimak\", \"harapan\", \"promo\"],\n \"language\": \"id\"\n}"
},
"typeVersion": 1.3
},
{
"id": "67469c87-b1cf-4ed7-8b41-ffadf44fc32c",
"name": "Modelo de Chat OpenRouter",
"type": "@n8n/n8n-nodes-langchain.lmChatOpenRouter",
"position": [
1420,
340
],
"parameters": {
"model": "google/gemini-2.0-flash-exp:free",
"options": {}
},
"typeVersion": 1
},
{
"id": "2aa4b295-b562-43ff-b9df-b411dc781181",
"name": "Código1",
"type": "n8n-nodes-base.code",
"position": [
1780,
120
],
"parameters": {
"jsCode": "// n8n Code node: Normalize LLM JSON -> {sentiment, score, keywords, language}\nconst out = [];\n\nfor (const item of items) {\n // 1) Get candidate payload from several possible formats\n let payload = item.json?.output ?? item.json?.data ?? item.json;\n\n // 2) If string, try to parse JSON\n if (typeof payload === 'string') {\n try { payload = JSON.parse(payload); } catch { payload = {}; }\n }\n\n // 3) If there's still payload.output as string JSON, parse it too\n if (payload && typeof payload.output === 'string') {\n try { payload = JSON.parse(payload.output); } catch {}\n }\n\n // 4) Normalize fields\n const allowed = new Set(['positive','neutral','negative']);\n let sentiment = String(payload?.sentiment ?? '').toLowerCase();\n if (!allowed.has(sentiment)) sentiment = 'neutral';\n\n let score = Number(payload?.score);\n if (!Number.isFinite(score)) score = 0;\n if (score > 1) score = 1;\n if (score < -1) score = -1;\n\n let keywords = Array.isArray(payload?.keywords) ? payload.keywords : [];\n keywords = keywords\n .map(k => String(k).toLowerCase().trim())\n .filter(k => k && k.length <= 32)\n .slice(0, 8);\n\n let language = String(payload?.language ?? '').slice(0, 2).toLowerCase();\n if (!language) language = 'en';\n\n out.push({ json: { sentiment, score, keywords, language } });\n}\n\nreturn out;\n"
},
"typeVersion": 2
},
{
"id": "1a6a9a96-abb2-468c-b7b2-45e527ac1e36",
"name": "Añadir o actualizar fila en hoja",
"type": "n8n-nodes-base.googleSheets",
"position": [
2000,
120
],
"parameters": {
"columns": {
"value": {
"author": "={{ $('Code').item.json.author }}",
"keywords": "={{ $json.keywords }}",
"video_id": "={{ $('Code').item.json.video_id }}",
"sentiment": "={{ $json.sentiment }}",
"comment_id": "={{ $('Code').item.json.comment_id }}",
"like_count": "={{ $('Code').item.json.like_count }}",
"comment_text": "={{ $('Code').item.json.comment_text }}",
"published_at": "={{ $('Code').item.json.published_at }}",
"sentiment_score": "={{ $json.score }}",
"author_channel_id": "={{ $('Code').item.json.author_channel_id }}",
"author_channel_url": "={{ $('Code').item.json.author_channel_url }}"
},
"schema": [
{
"id": "video_id",
"type": "string",
"display": true,
"removed": false,
"required": false,
"displayName": "video_id",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "comment_id",
"type": "string",
"display": true,
"removed": false,
"required": false,
"displayName": "comment_id",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "author",
"type": "string",
"display": true,
"required": false,
"displayName": "author",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "comment_text",
"type": "string",
"display": true,
"required": false,
"displayName": "comment_text",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "sentiment",
"type": "string",
"display": true,
"required": false,
"displayName": "sentiment",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "sentiment_score",
"type": "string",
"display": true,
"required": false,
"displayName": "sentiment_score",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "keywords",
"type": "string",
"display": true,
"required": false,
"displayName": "keywords",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "like_count",
"type": "string",
"display": true,
"required": false,
"displayName": "like_count",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "published_at",
"type": "string",
"display": true,
"required": false,
"displayName": "published_at",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "author_channel_id",
"type": "string",
"display": true,
"required": false,
"displayName": "author_channel_id",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "author_channel_url",
"type": "string",
"display": true,
"required": false,
"displayName": "author_channel_url",
"defaultMatch": false,
"canBeUsedToMatch": true
}
],
"mappingMode": "defineBelow",
"matchingColumns": [
"comment_id"
],
"attemptToConvertTypes": false,
"convertFieldsToString": false
},
"options": {},
"operation": "appendOrUpdate",
"sheetName": {
"__rl": true,
"mode": "list",
"value": ""
},
"documentId": {
"__rl": true,
"mode": "list",
"value": ""
}
},
"typeVersion": 4.6
},
{
"id": "ddfbf71a-2c92-415d-b3aa-fe087c9c9835",
"name": "Agregar",
"type": "n8n-nodes-base.aggregate",
"position": [
2220,
120
],
"parameters": {
"options": {},
"fieldsToAggregate": {
"fieldToAggregate": [
{
"fieldToAggregate": "sentiment"
},
{
"fieldToAggregate": "sentiment_score"
},
{
"fieldToAggregate": "keywords"
}
]
}
},
"typeVersion": 1
},
{
"id": "655ebe44-a4df-4184-87b9-ac18c5734c62",
"name": "Código2",
"type": "n8n-nodes-base.code",
"position": [
2440,
120
],
"parameters": {
"jsCode": "// n8n Code node: Summarize sentiments & top keywords\nconst isAggregatedArrays =\n items.length === 1 &&\n items[0].json &&\n (Array.isArray(items[0].json.sentiment) || Array.isArray(items[0].json.keywords));\n\nfunction summarizeFromArrays(j) {\n const sentiments = Array.isArray(j.sentiment) ? j.sentiment : [];\n const scores = Array.isArray(j.sentiment_score) ? j.sentiment_score : [];\n let keywordsNested = Array.isArray(j.keywords) ? j.keywords : [];\n\n // ensure keywordsNested = array of arrays\n keywordsNested = keywordsNested.map(k =>\n Array.isArray(k) ? k : (typeof k === 'string' ? k.split(',') : [])\n );\n\n const total = Math.max(sentiments.length, keywordsNested.length, scores.length, 0);\n const cnt = { positive: 0, neutral: 0, negative: 0 };\n const kwFreq = {};\n\n for (let i = 0; i < total; i++) {\n const s = String((sentiments[i] ?? 'neutral')).toLowerCase();\n const normS = (s === 'positive' || s === 'negative' || s === 'neutral') ? s : 'neutral';\n cnt[normS]++;\n\n const kws = Array.isArray(keywordsNested[i]) ? keywordsNested[i] : [];\n for (let k of kws) {\n const key = String(k).toLowerCase().trim();\n if (!key) continue;\n kwFreq[key] = (kwFreq[key] || 0) + 1;\n }\n }\n\n const pct = v => total ? Math.round((v / total) * 100) : 0;\n const top_keywords = Object.entries(kwFreq)\n .sort((a, b) => (b[1] - a[1]) || a[0].localeCompare(b[0]))\n .slice(0, 10)\n .map(([k, v]) => `${k} (${v})`);\n\n return {\n total_comments: total,\n positive: cnt.positive,\n neutral: cnt.neutral,\n negative: cnt.negative,\n positive_pct: pct(cnt.positive),\n neutral_pct: pct(cnt.neutral),\n negative_pct: pct(cnt.negative),\n top_keywords\n };\n}\n\nfunction summarizeFromItems(items) {\n const cnt = { positive: 0, neutral: 0, negative: 0 };\n const kwFreq = {};\n let total = 0;\n\n for (const it of items) {\n const s = String(it.json?.sentiment ?? 'neutral').toLowerCase();\n const normS = (s === 'positive' || s === 'negative' || s === 'neutral') ? s : 'neutral';\n cnt[normS]++;\n total++;\n\n const kws = Array.isArray(it.json?.keywords) ? it.json.keywords : [];\n for (let k of kws) {\n const key = String(k).toLowerCase().trim();\n if (!key) continue;\n kwFreq[key] = (kwFreq[key] || 0) + 1;\n }\n }\n\n const pct = v => total ? Math.round((v / total) * 100) : 0;\n const top_keywords = Object.entries(kwFreq)\n .sort((a, b) => (b[1] - a[1]) || a[0].localeCompare(b[0]))\n .slice(0, 10)\n .map(([k, v]) => `${k} (${v})`);\n\n return {\n total_comments: total,\n positive: cnt.positive,\n neutral: cnt.neutral,\n negative: cnt.negative,\n positive_pct: pct(cnt.positive),\n neutral_pct: pct(cnt.neutral),\n negative_pct: pct(cnt.negative),\n top_keywords\n };\n}\n\nconst summary = isAggregatedArrays\n ? summarizeFromArrays(items[0].json)\n : summarizeFromItems(items);\n\nreturn [{ json: summary }];\n"
},
"typeVersion": 2
},
{
"id": "c1f271f1-bc1b-4447-9902-aacbfd0e9750",
"name": "Enviar mensaje de texto",
"type": "n8n-nodes-base.telegram",
"position": [
2660,
120
],
"parameters": {
"text": "==📊 YouTube Comments Report\nVideo : {{ $('Get row(s) in sheet').item.json.video_url }}\nTotal: {{ $json.total_comments }}\n👍 Positive: {{$json.positive}} ({{ $json.positive_pct }}%)\n😐 Neutral: {{$json.neutral}} ({{ $json.neutral_pct }}%)\n👎 Negative: {{$json.negative}} ({{ $json.negative_pct }}%)\n\n🔎 Top Keywords:\n{{ $json.top_keywords.join(\", \") }}\n\n_Ran at {{new Date().toLocaleString()}}_\n",
"chatId": "YOUR_TELEGRAM_CHAT_ID",
"additionalFields": {
"appendAttribution": false
}
},
"typeVersion": 1.2
},
{
"id": "f010f65d-6c4d-4468-afcb-57f2a2f0cc9a",
"name": "Sin operación, no hacer nada",
"type": "n8n-nodes-base.noOp",
"position": [
960,
-100
],
"parameters": {},
"typeVersion": 1
},
{
"id": "923d6e70-ef03-4533-b1f1-535f76f93ce6",
"name": "Nota Adhesiva1",
"type": "n8n-nodes-base.stickyNote",
"position": [
-660,
-400
],
"parameters": {
"width": 580,
"height": 1040,
"content": "# YouTube Comment Sentiment & Keyword Extractor\n\nThis workflow automatically retrieves comments from YouTube videos listed in Google Sheets, analyzes sentiment and keywords using an LLM, stores the results, then generates an aggregate summary and sends a report to Telegram.\n\n\n## ✨ Key Features\n- **Automated Scheduling** – Runs the process at regular intervals.\n- **Google Sheets Integration** – Fetches a list of videos and stores analysis results directly in Google Sheets.\n- **YouTube Comment Retrieval** – Retrieves top-level comments (up to 100 per video; can be extended with pagination).\n- **Comment Parsing** – Flattens the API response into one item per comment.\n- **AI Sentiment & Keyword Analysis** – Powered by an LLM model to extract:\n - Sentiment (`positive`, `neutral`, `negative`)\n - Sentiment score (-1 to 1)\n - Keywords (cleaned from stopwords, emojis, hashtags)\n - Language (ISO 639-1, 2-letter code)\n- **Data Normalization** – Ensures LLM output is consistent and safe to write to the sheet.\n- **Telegram Reporting** – Sends a concise report for each video directly to a Telegram chat.\n\n---\n\n## 🔐 Required Credentials\n- **Google Sheets OAuth2 API** – Read & write access to the spreadsheet.\n- **YouTube Data API Key** – To fetch comments via `commentThreads`.\n- **OpenRouter API Key** – To access the LLM model.\n- **Telegram Bot API** – To send reports via a Telegram bot.\n\n---\n\n## 🎁 Benefits\n- **End-to-End Automation** – From comment retrieval to reporting, no manual steps required.\n- **No Duplication** – Each comment is uniquely tracked by `comment_id`.\n- **Data Transparency** – Raw and analyzed data stored neatly in Google Sheets.\n- **Actionable Insights** – Sentiment and keyword summaries ready for content strategy decisions.\n- **Scalable** – Can be extended for pagination, reply comment analysis, or additional metrics.\n- **Cross-Platform** – Direct Telegram integration for easy monitoring on any device.\n"
},
"typeVersion": 1
},
{
"id": "53fbf0dd-4131-4833-abca-96a4ca171241",
"name": "Nota Adhesiva",
"type": "n8n-nodes-base.stickyNote",
"position": [
-40,
-220
],
"parameters": {
"color": 2,
"width": 660,
"height": 400,
"content": "- Run the workflow on a schedule.\n- Fetch the data row that contains the video_id and video_url from the spreadsheet.\n- Add a YouTube API key field so that subsequent records (HTTP requests to the YouTube API) can access it."
},
"typeVersion": 1
},
{
"id": "a7500b30-f211-4311-843e-0b94a1c7efa4",
"name": "Nota Adhesiva2",
"type": "n8n-nodes-base.stickyNote",
"position": [
660,
-300
],
"parameters": {
"color": 4,
"width": 660,
"height": 560,
"content": "- Fetch data from the previous node and process it either one by one or in batches.\n- Call the YouTube Data API endpoint.\n- Receive the JSON results from the YouTube API and flatten the response so that each comment becomes a separate item."
},
"typeVersion": 1
},
{
"id": "da86e6c7-637a-4a02-8755-e687d22cb8f7",
"name": "Nota Adhesiva3",
"type": "n8n-nodes-base.stickyNote",
"position": [
1360,
-80
],
"parameters": {
"color": 5,
"width": 800,
"height": 560,
"content": "- Use the LLM model to analyze the comment text.\n- Ensure the LLM output follows a strict JSON structure based on the example schema, with no incorrect formats or keys.\n- Normalize the LLM JSON output.\n- Save the analysis results along with the comment metadata to Google Sheets."
},
"typeVersion": 1
},
{
"id": "83530f28-16df-418f-8763-5b6096b860e5",
"name": "Nota Adhesiva4",
"type": "n8n-nodes-base.stickyNote",
"position": [
2180,
-80
],
"parameters": {
"color": 6,
"width": 640,
"height": 560,
"content": "- Combine values from multiple comment analysis columns.\n- Create a summary based on the aggregated results.\n- Send the summary report to a Telegram chat."
},
"typeVersion": 1
}
],
"active": false,
"pinData": {},
"settings": {
"executionOrder": "v1"
},
"versionId": "ab1692ad-3351-4c24-b8dd-f7416de30a65",
"connections": {
"5e91edf9-2ea6-4615-8a8c-710ac509a1f2": {
"main": [
[
{
"node": "bfd6ff22-ae88-4d21-ac46-9a92c518cd87",
"type": "main",
"index": 0
}
]
]
},
"2aa4b295-b562-43ff-b9df-b411dc781181": {
"main": [
[
{
"node": "1a6a9a96-abb2-468c-b7b2-45e527ac1e36",
"type": "main",
"index": 0
}
]
]
},
"655ebe44-a4df-4184-87b9-ac18c5734c62": {
"main": [
[
{
"node": "c1f271f1-bc1b-4447-9902-aacbfd0e9750",
"type": "main",
"index": 0
}
]
]
},
"ddfbf71a-2c92-415d-b3aa-fe087c9c9835": {
"main": [
[
{
"node": "655ebe44-a4df-4184-87b9-ac18c5734c62",
"type": "main",
"index": 0
}
]
]
},
"1a194433-415d-42cd-aeda-c983b5d81883": {
"main": [
[
{
"node": "5e91edf9-2ea6-4615-8a8c-710ac509a1f2",
"type": "main",
"index": 0
}
]
]
},
"bfd6ff22-ae88-4d21-ac46-9a92c518cd87": {
"main": [
[
{
"node": "2aa4b295-b562-43ff-b9df-b411dc781181",
"type": "main",
"index": 0
}
]
]
},
"514959bb-14aa-4947-9d78-f903cb3201e3": {
"main": [
[
{
"node": "f010f65d-6c4d-4468-afcb-57f2a2f0cc9a",
"type": "main",
"index": 0
}
],
[
{
"node": "1a194433-415d-42cd-aeda-c983b5d81883",
"type": "main",
"index": 0
}
]
]
},
"93821e4a-b8d9-4364-aa9c-2e9200c49691": {
"main": [
[
{
"node": "514959bb-14aa-4947-9d78-f903cb3201e3",
"type": "main",
"index": 0
}
]
]
},
"55d27fc7-ac2c-4ccc-a98f-f8c4ab7547c4": {
"main": [
[
{
"node": "bd198bd2-705f-4fc9-ba30-c1facfa06359",
"type": "main",
"index": 0
}
]
]
},
"bd198bd2-705f-4fc9-ba30-c1facfa06359": {
"main": [
[
{
"node": "93821e4a-b8d9-4364-aa9c-2e9200c49691",
"type": "main",
"index": 0
}
]
]
},
"c1f271f1-bc1b-4447-9902-aacbfd0e9750": {
"main": [
[
{
"node": "514959bb-14aa-4947-9d78-f903cb3201e3",
"type": "main",
"index": 0
}
]
]
},
"67469c87-b1cf-4ed7-8b41-ffadf44fc32c": {
"ai_languageModel": [
[
{
"node": "bfd6ff22-ae88-4d21-ac46-9a92c518cd87",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"29802a0-fd43-467a-8afd-534f9099c492": {
"ai_outputParser": [
[
{
"node": "bfd6ff22-ae88-4d21-ac46-9a92c518cd87",
"type": "ai_outputParser",
"index": 0
}
]
]
},
"1a6a9a96-abb2-468c-b7b2-45e527ac1e36": {
"main": [
[
{
"node": "ddfbf71a-2c92-415d-b3aa-fe087c9c9835",
"type": "main",
"index": 0
}
]
]
}
}
}¿Cómo usar este flujo de trabajo?
Copie el código de configuración JSON de arriba, cree un nuevo flujo de trabajo en su instancia de n8n y seleccione "Importar desde JSON", pegue la configuración y luego modifique la configuración de credenciales según sea necesario.
¿En qué escenarios es adecuado este flujo de trabajo?
Avanzado - Investigación de mercado, IA Multimodal
¿Es de pago?
Este flujo de trabajo es completamente gratuito, puede importarlo y usarlo directamente. Sin embargo, tenga en cuenta que los servicios de terceros utilizados en el flujo de trabajo (como la API de OpenAI) pueden requerir un pago por su cuenta.
Flujos de trabajo relacionados recomendados
Budi SJ
@budisjI’m a Product Designer who also works as an Automation Developer. With a background in product design and systems thinking, I build user-centered workflows. My focus is on helping teams and businesses work more productively through impactful automation systems.
Compartir este flujo de trabajo