私のワークフロー 2
上級
これはMarket Research, AI Summarization分野の自動化ワークフローで、16個のノードを含みます。主にCode, Merge, Webhook, GoogleSheets, SplitInBatchesなどのノードを使用。 ScrapeGraphAI、GPT-4、Google Sheets を使った深堀りリサーチの自動化
前提条件
- •HTTP Webhookエンドポイント(n8nが自動生成)
- •Google Sheets API認証情報
ワークフロープレビュー
ノード接続関係を可視化、ズームとパンをサポート
ワークフローをエクスポート
以下のJSON設定をn8nにインポートして、このワークフローを使用できます
{
"id": "VhEwspDqzu7ssFVE",
"meta": {
"instanceId": "f4b0efaa33080e7774e0d9285c40c7abcd2c6f7cf1a8b901fa7106170dd4cda3",
"templateCredsSetupCompleted": true
},
"name": "My workflow 2",
"tags": [],
"nodes": [
{
"id": "48a84828-73de-4f4b-beb1-60e668342c11",
"name": "調査リクエスト Webhook",
"type": "n8n-nodes-base.webhook",
"position": [
-2048,
624
],
"webhookId": "5a9368a9-013f-41db-82cc-18be19ea6684",
"parameters": {
"path": "research-trigger",
"options": {},
"httpMethod": "POST",
"responseMode": "responseNode"
},
"typeVersion": 1.1
},
{
"id": "5d8a05fa-1528-4dc4-95cd-d99625a2221b",
"name": "調査設定プロセッサ",
"type": "n8n-nodes-base.code",
"position": [
-1760,
624
],
"parameters": {
"jsCode": "// Extract and validate research parameters\nconst body = $input.all()[0].json.body;\n\n// Default research configuration\nconst researchConfig = {\n topic: body.topic || 'artificial intelligence trends',\n depth: body.depth || 'comprehensive', // basic, detailed, comprehensive\n sources: body.sources || ['web', 'academic', 'news'],\n timeframe: body.timeframe || '6months',\n language: body.language || 'en',\n maxSources: body.maxSources || 10,\n analysisType: body.analysisType || 'summary' // summary, detailed, comparative\n};\n\n// Generate search queries based on topic\nconst baseQueries = [\n `${researchConfig.topic} latest developments`,\n `${researchConfig.topic} research findings`,\n `${researchConfig.topic} market analysis`,\n `${researchConfig.topic} expert opinions`,\n `${researchConfig.topic} case studies`\n];\n\n// Add specific queries based on depth\nif (researchConfig.depth === 'comprehensive') {\n baseQueries.push(\n `${researchConfig.topic} academic papers`,\n `${researchConfig.topic} industry reports`,\n `${researchConfig.topic} statistical data`,\n `${researchConfig.topic} future predictions`\n );\n}\n\nreturn [{\n json: {\n ...researchConfig,\n searchQueries: baseQueries,\n timestamp: new Date().toISOString(),\n sessionId: `research_${Date.now()}`\n }\n}];"
},
"typeVersion": 2
},
{
"id": "19e3c76b-f0fb-4324-b212-585ab132bde5",
"name": "検索クエリ分割",
"type": "n8n-nodes-base.splitInBatches",
"position": [
-1456,
624
],
"parameters": {
"options": {}
},
"typeVersion": 3
},
{
"id": "6eb0ff10-aaf6-430f-aea0-7c0cbe950b95",
"name": "クエリセレクター",
"type": "n8n-nodes-base.code",
"position": [
-1152,
624
],
"parameters": {
"jsCode": "// Get current batch data\nconst items = $input.all();\nconst currentItem = items[0].json;\nconst queries = currentItem.searchQueries;\nconst currentBatch = $('Split Search Queries').item.json;\n\n// Get current query\nconst currentQuery = queries[currentBatch.index];\n\nreturn [{\n json: {\n ...currentItem,\n currentQuery: currentQuery,\n batchIndex: currentBatch.index\n }\n}];"
},
"typeVersion": 2
},
{
"id": "99f73593-0ddd-4fc9-810f-8b1793cd8476",
"name": "AI調査スクレイパー",
"type": "n8n-nodes-scrapegraphai.scrapegraphAi",
"position": [
-848,
624
],
"parameters": {
"userPrompt": "Research and extract comprehensive information about this topic. Provide: 1) Key findings and insights, 2) Important statistics or data points, 3) Expert quotes or opinions, 4) Recent developments, 5) Source credibility assessment. Format as structured JSON with fields: title, summary, keyPoints, statistics, quotes, sources, credibilityScore, datePublished, relevanceScore.",
"websiteUrl": "={{ $json.currentQuery }}"
},
"typeVersion": 1
},
{
"id": "da52e96d-0aa2-41ef-886e-bd396e0f42f2",
"name": "ニュースソーススクレイパー",
"type": "n8n-nodes-scrapegraphai.scrapegraphAi",
"position": [
-848,
832
],
"parameters": {
"userPrompt": "Extract recent news articles about this topic. For each article provide: headline, publication date, source, brief summary, and direct URL. Focus on credible news sources and recent publications within the last 6 months.",
"websiteUrl": "https://www.google.com/search?q={{ encodeURIComponent($json.currentQuery) }}&tbm=nws"
},
"typeVersion": 1
},
{
"id": "0ee6cf16-02e5-4a3b-b068-dd76a1351718",
"name": "学術ソーススクレイパー",
"type": "n8n-nodes-scrapegraphai.scrapegraphAi",
"position": [
-848,
1024
],
"parameters": {
"userPrompt": "Extract academic papers and research studies. For each paper provide: title, authors, publication year, journal/conference, citation count, abstract summary, and DOI/URL if available. Focus on peer-reviewed sources and recent publications.",
"websiteUrl": "https://scholar.google.com/scholar?q={{ encodeURIComponent($json.currentQuery) }}"
},
"typeVersion": 1
},
{
"id": "3228908f-f816-4a0c-889b-abf756281eb8",
"name": "調査ソース統合",
"type": "n8n-nodes-base.merge",
"position": [
-560,
832
],
"parameters": {
"mode": "combine",
"options": {},
"mergeByFields": {
"values": [
{}
]
}
},
"typeVersion": 2.1
},
{
"id": "90b55ee1-3404-4db2-aec1-6d6219043c09",
"name": "調査データプロセッサ",
"type": "n8n-nodes-base.code",
"position": [
-256,
832
],
"parameters": {
"jsCode": "// Combine and process all research data\nconst allItems = $input.all();\nconst researchData = allItems[0].json;\nconst newsData = allItems[1]?.json || {};\nconst academicData = allItems[2]?.json || {};\n\n// Extract and structure the research findings\nconst processedData = {\n sessionId: researchData.sessionId,\n query: researchData.currentQuery,\n batchIndex: researchData.batchIndex,\n timestamp: new Date().toISOString(),\n \n // General research findings\n generalFindings: {\n title: researchData.result?.title || 'Research Findings',\n summary: researchData.result?.summary || '',\n keyPoints: researchData.result?.keyPoints || [],\n statistics: researchData.result?.statistics || [],\n credibilityScore: researchData.result?.credibilityScore || 0\n },\n \n // News findings\n newsFindings: {\n articles: newsData.result?.articles || [],\n totalArticles: newsData.result?.articles?.length || 0\n },\n \n // Academic findings\n academicFindings: {\n papers: academicData.result?.papers || [],\n totalPapers: academicData.result?.papers?.length || 0\n },\n \n // Meta information\n sourceTypes: ['general', 'news', 'academic'],\n totalSources: (researchData.result?.sources?.length || 0) + \n (newsData.result?.articles?.length || 0) + \n (academicData.result?.papers?.length || 0)\n};\n\nreturn [{\n json: processedData\n}];"
},
"typeVersion": 2
},
{
"id": "7eb34b80-f6d2-4e80-83f5-529d4748cbec",
"name": "調査データストレージ",
"type": "n8n-nodes-base.googleSheets",
"position": [
352,
832
],
"parameters": {
"columns": {
"value": {},
"schema": [
{
"id": "sessionId",
"type": "string",
"display": true,
"required": false,
"displayName": "Session ID",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "query",
"type": "string",
"display": true,
"required": false,
"displayName": "Research Query",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "timestamp",
"type": "string",
"display": true,
"required": false,
"displayName": "Timestamp",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "analysis",
"type": "string",
"display": true,
"required": false,
"displayName": "AI Analysis",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "totalSources",
"type": "number",
"display": true,
"required": false,
"displayName": "Total Sources",
"defaultMatch": false,
"canBeUsedToMatch": true
}
],
"mappingMode": "autoMapInputData",
"matchingColumns": []
},
"options": {},
"operation": "append",
"sheetName": {
"__rl": true,
"mode": "name",
"value": "Research_Data"
},
"documentId": {
"__rl": true,
"mode": "url",
"value": ""
}
},
"typeVersion": 4.5
},
{
"id": "d093ce1d-9716-4254-89b7-4b8bffd23b48",
"name": "調査完了レスポンス",
"type": "n8n-nodes-base.respondToWebhook",
"position": [
656,
832
],
"parameters": {
"options": {},
"respondWith": "json",
"responseBody": "={{ JSON.stringify({\n status: 'completed',\n sessionId: $json.sessionId,\n message: 'Research analysis completed successfully',\n totalSources: $json.totalSources,\n timestamp: $json.timestamp\n}) }}"
},
"typeVersion": 1.1
},
{
"id": "8398d709-67b8-4ad4-90f0-d2c041d4678e",
"name": "Webhook トリガーガイド",
"type": "n8n-nodes-base.stickyNote",
"position": [
-2160,
-448
],
"parameters": {
"color": 2,
"width": 520,
"height": 1732,
"content": "# Step 1: Research Request Webhook 🎯\n\nThis webhook endpoint receives research requests and initiates the deep research process.\n\n## Request Format\n```json\n{\n \"topic\": \"artificial intelligence in healthcare\",\n \"depth\": \"comprehensive\",\n \"sources\": [\"web\", \"academic\", \"news\"],\n \"timeframe\": \"6months\",\n \"maxSources\": 15,\n \"analysisType\": \"detailed\"\n}\n```\n\n## Configuration\n- **Method**: POST\n- **Path**: /research-trigger\n- **Authentication**: Optional API key\n- **Rate Limiting**: Configurable\n\n## Depth Levels\n- **Basic**: Quick overview with 3-5 sources\n- **Detailed**: Comprehensive analysis with 8-12 sources\n- **Comprehensive**: Deep dive with 15+ sources and academic papers\n\n## Source Types\n- **Web**: General web content and industry sites\n- **News**: Recent news articles and press releases\n- **Academic**: Peer-reviewed papers and research studies"
},
"typeVersion": 1
},
{
"id": "965963f7-6f98-4954-a0f0-916ab00477be",
"name": "設定ガイド",
"type": "n8n-nodes-base.stickyNote",
"position": [
-1600,
-448
],
"parameters": {
"color": 2,
"width": 520,
"height": 1748,
"content": "# Step 2: Research Configuration Processor 🔧\n\nThis node processes and validates the incoming research request, setting up the research parameters.\n\n## What it does\n- Validates and sanitizes input parameters\n- Sets default values for missing parameters\n- Generates multiple search queries based on topic\n- Creates unique session ID for tracking\n- Configures research depth and scope\n\n## Query Generation Strategy\n- **Base Queries**: Core topic searches\n- **Depth-Specific**: Additional queries for comprehensive research\n- **Time-Sensitive**: Recent developments and trends\n- **Multi-Angle**: Different perspectives and viewpoints\n\n## Customization Options\n- Modify query generation logic\n- Add industry-specific search patterns\n- Implement custom validation rules\n- Configure default research parameters"
},
"typeVersion": 1
},
{
"id": "47a160d4-d829-4133-93fa-aa4dbd41f785",
"name": "AIスクレイピングガイド",
"type": "n8n-nodes-base.stickyNote",
"position": [
-1040,
-448
],
"parameters": {
"color": 3,
"width": 520,
"height": 1748,
"content": "# Step 3: Multi-Source AI Scraping 🤖\n\nThree parallel AI-powered scrapers collect data from different source types for comprehensive research coverage.\n\n## AI Research Scraper\n- **Purpose**: General web research and industry insights\n- **Focus**: Key findings, statistics, expert opinions\n- **Output**: Structured insights with credibility scores\n\n## News Sources Scraper\n- **Purpose**: Recent news and current developments\n- **Focus**: Headlines, publication dates, credible sources\n- **Output**: Timestamped news articles with summaries\n\n## Academic Sources Scraper\n- **Purpose**: Peer-reviewed research and scholarly articles\n- **Focus**: Academic papers, citations, research studies\n- **Output**: Scientific literature with metadata\n\n## ScrapeGraphAI Benefits\n- **AI-Powered**: Intelligent content extraction\n- **Structured Output**: Consistent data format\n- **Source Validation**: Credibility assessment\n- **Multi-Language**: Global research capability"
},
"typeVersion": 1
},
{
"id": "503cdf42-cee7-4b44-a2fd-4f4a4a134f60",
"name": "処理・分析ガイド",
"type": "n8n-nodes-base.stickyNote",
"position": [
-464,
-448
],
"parameters": {
"color": 3,
"width": 520,
"height": 1748,
"content": "# Step 4: Data Processing & AI Analysis 🧠\n\nAdvanced data processing and AI-powered analysis to generate actionable insights from collected research data.\n\n## Research Data Processor\n- **Combines**: All source types into unified structure\n- **Validates**: Data quality and completeness\n- **Enriches**: Metadata and source attribution\n- **Structures**: For optimal analysis and storage\n\n## AI Research Analyst\n- **Model**: GPT-4 for sophisticated analysis\n- **Analysis Types**: Summary, trends, conflicts, reliability\n- **Output**: Executive summary with actionable insights\n- **Temperature**: Low (0.3) for consistent, factual analysis\n\n## Analysis Components\n1. **Executive Summary**: High-level overview\n2. **Key Insights**: Major findings and trends\n3. **Reliability Assessment**: Source credibility evaluation\n4. **Recommendations**: Actionable next steps\n5. **Further Research**: Suggested investigation areas"
},
"typeVersion": 1
},
{
"id": "0105d893-94ce-465d-9ef8-8f144280f0c9",
"name": "ストレージ・レスポンスガイド",
"type": "n8n-nodes-base.stickyNote",
"position": [
144,
-432
],
"parameters": {
"color": 4,
"width": 840,
"height": 1716,
"content": "# Step 5: Data Storage & Response 📊\n\nSecure storage of research findings and structured response delivery for seamless integration with other systems.\n\n## Google Sheets Storage\n- **Sheet Structure**: Research_Data with comprehensive columns\n- **Data Retention**: Historical research for trend analysis\n- **Access Control**: Secure OAuth2 authentication\n- **Format**: Structured data ready for analysis and reporting\n\n## Response Delivery\n- **Format**: JSON with status and metadata\n- **Content**: Session ID, completion status, source count\n- **Integration**: Ready for webhook consumers and APIs\n- **Tracking**: Unique session IDs for research correlation\n\n## Data Management Features\n- **Versioning**: Track research iterations\n- **Export**: Multiple format support\n- **Sharing**: Team collaboration capabilities\n- **Analytics**: Built-in Google Sheets analysis tools\n\n## Use Cases\n- **Market Research**: Competitive analysis and trends\n- **Academic Research**: Literature reviews and citations\n- **Business Intelligence**: Industry insights and reports"
},
"typeVersion": 1
}
],
"active": false,
"pinData": {},
"settings": {
"executionOrder": "v1"
},
"versionId": "076dd376-d6cb-4851-b335-e074cd47911c",
"connections": {
"6eb0ff10-aaf6-430f-aea0-7c0cbe950b95": {
"main": [
[
{
"node": "99f73593-0ddd-4fc9-810f-8b1793cd8476",
"type": "main",
"index": 0
},
{
"node": "da52e96d-0aa2-41ef-886e-bd396e0f42f2",
"type": "main",
"index": 0
},
{
"node": "0ee6cf16-02e5-4a3b-b068-dd76a1351718",
"type": "main",
"index": 0
}
]
]
},
"99f73593-0ddd-4fc9-810f-8b1793cd8476": {
"main": [
[
{
"node": "3228908f-f816-4a0c-889b-abf756281eb8",
"type": "main",
"index": 0
}
]
]
},
"da52e96d-0aa2-41ef-886e-bd396e0f42f2": {
"main": [
[
{
"node": "3228908f-f816-4a0c-889b-abf756281eb8",
"type": "main",
"index": 1
}
]
]
},
"19e3c76b-f0fb-4324-b212-585ab132bde5": {
"main": [
[
{
"node": "6eb0ff10-aaf6-430f-aea0-7c0cbe950b95",
"type": "main",
"index": 0
}
]
]
},
"7eb34b80-f6d2-4e80-83f5-529d4748cbec": {
"main": [
[
{
"node": "d093ce1d-9716-4254-89b7-4b8bffd23b48",
"type": "main",
"index": 0
}
]
]
},
"3228908f-f816-4a0c-889b-abf756281eb8": {
"main": [
[
{
"node": "90b55ee1-3404-4db2-aec1-6d6219043c09",
"type": "main",
"index": 0
}
]
]
},
"90b55ee1-3404-4db2-aec1-6d6219043c09": {
"main": [
[
{
"node": "7eb34b80-f6d2-4e80-83f5-529d4748cbec",
"type": "main",
"index": 0
}
]
]
},
"48a84828-73de-4f4b-beb1-60e668342c11": {
"main": [
[
{
"node": "5d8a05fa-1528-4dc4-95cd-d99625a2221b",
"type": "main",
"index": 0
}
]
]
},
"5d8a05fa-1528-4dc4-95cd-d99625a2221b": {
"main": [
[
{
"node": "19e3c76b-f0fb-4324-b212-585ab132bde5",
"type": "main",
"index": 0
}
]
]
}
}
}よくある質問
このワークフローの使い方は?
上記のJSON設定コードをコピーし、n8nインスタンスで新しいワークフローを作成して「JSONからインポート」を選択、設定を貼り付けて認証情報を必要に応じて変更してください。
このワークフローはどんな場面に適していますか?
上級 - 市場調査, AI要約
有料ですか?
このワークフローは完全無料です。ただし、ワークフローで使用するサードパーティサービス(OpenAI APIなど)は別途料金が発生する場合があります。
関連ワークフロー
私のワークフロー2
AI競合監視と収益最適化を組み合わせた自動動の管理価格設定
If
Code
Merge
+
If
Code
Merge
25 ノードvinci-king-01
市場調査
私のワークフロー 2
ScrapeGraphAIと戦略の計画を用いたAI駆動のコンテンツギャップ分析
Code
Google Sheets
Schedule Trigger
+
Code
Google Sheets
Schedule Trigger
18 ノードvinci-king-01
市場調査
ScrapegraphAIを使ってニュース記事を自動スクラップしてGoogleシートに保存
ScrapegraphAIを使ってニュース記事を自動スクレイピングし、Google Sheetsに保存
Code
Google Sheets
Schedule Trigger
+
Code
Google Sheets
Schedule Trigger
8 ノードvinci-king-01
市場調査
私のワークフロー2
ScrapeGraphAIとGoogle Sheetsを使ってSEOのバックリンクを発見・分析する
Code
Filter
Email Send
+
Code
Filter
Email Send
17 ノードvinci-king-01
市場調査
セールスパイプライン自動化ダッシュボード
HubSpot CRM、ScrapeGraphAI、Google Sheets ダッシュボードを使って販売パイプラインを自動化
If
Code
Slack
+
If
Code
Slack
22 ノードvinci-king-01
顧客管理
私のワークフロー 2
ScrapeGraphAI、Google スプレッドシート、Slack アラートを使用して、サポートチケット分析ダッシュボードを構築
If
Code
Slack
+
If
Code
Slack
15 ノードvinci-king-01
チケット管理