New nodes: 'Blend Shapes', 'Origins to Polyline', 'Reset Transform', 'Count Points', 'Index Points'; generalize Morph node behavior #4787
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: Profiling Changes | |
| on: | |
| pull_request: | |
| paths: | |
| - 'node-graph/**' | |
| - 'Cargo.toml' | |
| - 'Cargo.lock' | |
| env: | |
| CARGO_TERM_COLOR: always | |
| jobs: | |
| profile: | |
| runs-on: ubuntu-latest | |
| continue-on-error: true | |
| steps: | |
| - uses: actions/checkout@v4 | |
| with: | |
| fetch-depth: 0 | |
| - name: Install Rust | |
| uses: dtolnay/rust-toolchain@stable | |
| - name: Install Valgrind | |
| run: | | |
| sudo apt update | |
| sudo apt install -y valgrind | |
| - name: Cache Rust dependencies | |
| uses: Swatinem/rust-cache@v2 | |
| with: | |
| # Cache on Cargo.lock file | |
| cache-on-failure: true | |
| - name: Cache iai-callgrind binary | |
| id: cache-iai | |
| uses: actions/cache@v4 | |
| with: | |
| path: ~/.cargo/bin/iai-callgrind-runner | |
| key: ${{ runner.os }}-iai-callgrind-runner-0.16.1 | |
| - name: Install iai-callgrind | |
| if: steps.cache-iai.outputs.cache-hit != 'true' | |
| run: | | |
| cargo install [email protected] | |
| - name: Checkout master branch | |
| run: | | |
| git fetch origin master:master | |
| git checkout master | |
| - name: Get master commit SHA | |
| id: master-sha | |
| run: echo "sha=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT | |
| - name: Get CPU info | |
| id: cpu-info | |
| run: | | |
| # Get CPU model and create a short hash for cache key | |
| CPU_MODEL=$(cat /proc/cpuinfo | grep "model name" | head -1 | cut -d: -f2 | xargs) | |
| CPU_HASH=$(echo "$CPU_MODEL" | sha256sum | cut -c1-8) | |
| echo "cpu-hash=$CPU_HASH" >> $GITHUB_OUTPUT | |
| echo "CPU: $CPU_MODEL (hash: $CPU_HASH)" | |
| - name: Cache benchmark baselines | |
| id: cache-benchmark-baselines | |
| uses: actions/cache@v4 | |
| with: | |
| path: target/iai | |
| key: ${{ runner.os }}-${{ runner.arch }}-${{ steps.cpu-info.outputs.cpu-hash }}-benchmark-baselines-master-${{ steps.master-sha.outputs.sha }} | |
| restore-keys: | | |
| ${{ runner.os }}-${{ runner.arch }}-${{ steps.cpu-info.outputs.cpu-hash }}-benchmark-baselines-master- | |
| - name: Run baseline benchmarks | |
| if: steps.cache-benchmark-baselines.outputs.cache-hit != 'true' | |
| run: | | |
| # Compile benchmarks | |
| cargo bench --bench compile_demo_art_iai -- --save-baseline=master | |
| # Runtime benchmarks | |
| cargo bench --bench update_executor_iai -- --save-baseline=master | |
| cargo bench --bench run_once_iai -- --save-baseline=master | |
| cargo bench --bench run_cached_iai -- --save-baseline=master | |
| - name: Checkout PR branch | |
| run: | | |
| git checkout ${{ github.event.pull_request.head.sha }} | |
| - name: Run PR benchmarks | |
| run: | | |
| # Compile benchmarks | |
| cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g' > /tmp/compile_output.json | |
| # Runtime benchmarks | |
| cargo bench --bench update_executor_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g' > /tmp/update_output.json | |
| cargo bench --bench run_once_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g' > /tmp/run_once_output.json | |
| cargo bench --bench run_cached_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g' > /tmp/run_cached_output.json | |
| - name: Make old comments collapsed by default | |
| # Only run if we have write permissions (not a fork) | |
| if: github.event.pull_request.head.repo.full_name == github.repository | |
| uses: actions/github-script@v7 | |
| with: | |
| github-token: ${{secrets.GITHUB_TOKEN}} | |
| script: | | |
| const { data: comments } = await github.rest.issues.listComments({ | |
| issue_number: context.issue.number, | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| }); | |
| const botComments = comments.filter((comment) => | |
| comment.user.type === 'Bot' && comment.body.includes('Performance Benchmark Results') && comment.body.includes('<details open>') | |
| ); | |
| for (const comment of botComments) { | |
| // Edit the comment to remove the "open" attribute from the <details> tag | |
| await github.rest.issues.updateComment({ | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| comment_id: comment.id, | |
| body: comment.body.replace('<details open>', '<details>') | |
| }); | |
| } | |
| - name: Analyze profiling changes | |
| id: analyze | |
| uses: actions/github-script@v7 | |
| with: | |
| script: | | |
| const fs = require('fs'); | |
| function isSignificantChange(diffPct, absoluteChange, benchmarkType) { | |
| const meetsPercentageThreshold = Math.abs(diffPct) > 5; | |
| const meetsAbsoluteThreshold = absoluteChange > 200000; | |
| const isCachedExecution = benchmarkType === 'run_cached' || | |
| benchmarkType.includes('Cached Execution'); | |
| return isCachedExecution | |
| ? (meetsPercentageThreshold && meetsAbsoluteThreshold) | |
| : meetsPercentageThreshold; | |
| } | |
| const allOutputs = [ | |
| JSON.parse(fs.readFileSync('/tmp/compile_output.json', 'utf8')), | |
| JSON.parse(fs.readFileSync('/tmp/update_output.json', 'utf8')), | |
| JSON.parse(fs.readFileSync('/tmp/run_once_output.json', 'utf8')), | |
| JSON.parse(fs.readFileSync('/tmp/run_cached_output.json', 'utf8')) | |
| ]; | |
| const outputNames = ['compile', 'update', 'run_once', 'run_cached']; | |
| const sectionTitles = ['Compilation', 'Update', 'Run Once', 'Cached Execution']; | |
| let hasSignificantChanges = false; | |
| let regressionDetails = []; | |
| for (let i = 0; i < allOutputs.length; i++) { | |
| const benchmarkOutput = allOutputs[i]; | |
| const outputName = outputNames[i]; | |
| const sectionTitle = sectionTitles[i]; | |
| for (const benchmark of benchmarkOutput) { | |
| if (benchmark.profiles?.[0]?.summaries?.parts?.[0]?.metrics_summary?.Callgrind?.Ir?.diffs?.diff_pct) { | |
| const diffPct = parseFloat(benchmark.profiles[0].summaries.parts[0].metrics_summary.Callgrind.Ir.diffs.diff_pct); | |
| const oldValue = benchmark.profiles[0].summaries.parts[0].metrics_summary.Callgrind.Ir.metrics.Both[1].Int; | |
| const newValue = benchmark.profiles[0].summaries.parts[0].metrics_summary.Callgrind.Ir.metrics.Both[0].Int; | |
| const absoluteChange = Math.abs(newValue - oldValue); | |
| if (isSignificantChange(diffPct, absoluteChange, outputName)) { | |
| hasSignificantChanges = true; | |
| regressionDetails.push({ | |
| module_path: benchmark.module_path, | |
| id: benchmark.id, | |
| diffPct, | |
| absoluteChange, | |
| sectionTitle | |
| }); | |
| } | |
| } | |
| } | |
| } | |
| core.setOutput('has-significant-changes', hasSignificantChanges); | |
| core.setOutput('regression-details', JSON.stringify(regressionDetails)); | |
| - name: Comment PR | |
| if: github.event.pull_request.head.repo.full_name == github.repository | |
| uses: actions/github-script@v7 | |
| with: | |
| github-token: ${{secrets.GITHUB_TOKEN}} | |
| script: | | |
| const fs = require('fs'); | |
| const compileOutput = JSON.parse(fs.readFileSync('/tmp/compile_output.json', 'utf8')); | |
| const updateOutput = JSON.parse(fs.readFileSync('/tmp/update_output.json', 'utf8')); | |
| const runOnceOutput = JSON.parse(fs.readFileSync('/tmp/run_once_output.json', 'utf8')); | |
| const runCachedOutput = JSON.parse(fs.readFileSync('/tmp/run_cached_output.json', 'utf8')); | |
| const hasSignificantChanges = '${{ steps.analyze.outputs.has-significant-changes }}' === 'true'; | |
| let commentBody = ""; | |
| function formatNumber(num) { | |
| return num.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ","); | |
| } | |
| function formatPercentage(pct) { | |
| const sign = pct >= 0 ? '+' : ''; | |
| return `${sign}${pct.toFixed(2)}%`; | |
| } | |
| function padRight(str, len) { | |
| return str.padEnd(len); | |
| } | |
| function padLeft(str, len) { | |
| return str.padStart(len); | |
| } | |
| function processBenchmarkOutput(benchmarkOutput, sectionTitle, isLast = false) { | |
| let sectionBody = ""; | |
| let hasResults = false; | |
| let hasSignificantChanges = false; | |
| function isSignificantChange(diffPct, absoluteChange, benchmarkType) { | |
| const meetsPercentageThreshold = Math.abs(diffPct) > 5; | |
| const meetsAbsoluteThreshold = absoluteChange > 200000; | |
| const isCachedExecution = benchmarkType === 'run_cached' || | |
| benchmarkType.includes('Cached Execution'); | |
| return isCachedExecution | |
| ? (meetsPercentageThreshold && meetsAbsoluteThreshold) | |
| : meetsPercentageThreshold; | |
| } | |
| for (const benchmark of benchmarkOutput) { | |
| if (benchmark.profiles && benchmark.profiles.length > 0) { | |
| const profile = benchmark.profiles[0]; | |
| if (profile.summaries && profile.summaries.parts && profile.summaries.parts.length > 0) { | |
| const part = profile.summaries.parts[0]; | |
| if (part.metrics_summary && part.metrics_summary.Callgrind && part.metrics_summary.Callgrind.Ir) { | |
| const irData = part.metrics_summary.Callgrind.Ir; | |
| if (irData.diffs && irData.diffs.diff_pct !== null) { | |
| const irDiff = { | |
| diff_pct: parseFloat(irData.diffs.diff_pct), | |
| old: irData.metrics.Both[1].Int, | |
| new: irData.metrics.Both[0].Int | |
| }; | |
| hasResults = true; | |
| const changePercentage = formatPercentage(irDiff.diff_pct); | |
| const color = irDiff.diff_pct > 0 ? "red" : "lime"; | |
| sectionBody += `**${benchmark.module_path} ${benchmark.id}:${benchmark.details}**\n`; | |
| sectionBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) → \`${formatNumber(irDiff.new)}\` (HEAD) : `; | |
| sectionBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`; | |
| sectionBody += "<details>\n<summary>Detailed metrics</summary>\n\n```\n"; | |
| sectionBody += `Baselines: master| HEAD\n`; | |
| for (const [metricName, metricData] of Object.entries(part.metrics_summary.Callgrind)) { | |
| if (metricData.diffs && metricData.diffs.diff_pct !== null) { | |
| const changePercentage = formatPercentage(parseFloat(metricData.diffs.diff_pct)); | |
| const oldValue = metricData.metrics.Both[1].Int || metricData.metrics.Both[1].Float; | |
| const newValue = metricData.metrics.Both[0].Int || metricData.metrics.Both[0].Float; | |
| const line = `${padRight(metricName, 20)} ${padLeft(formatNumber(Math.round(oldValue)), 11)}|${padLeft(formatNumber(Math.round(newValue)), 11)} ${padLeft(changePercentage, 15)}`; | |
| sectionBody += `${line}\n`; | |
| } | |
| } | |
| sectionBody += "```\n</details>\n\n"; | |
| if (isSignificantChange(irDiff.diff_pct, Math.abs(irDiff.new - irDiff.old), sectionTitle)) { | |
| significantChanges = true; | |
| hasSignificantChanges = true; | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| if (hasResults) { | |
| // Wrap section in collapsible details, open only if there are significant changes | |
| const openAttribute = hasSignificantChanges ? " open" : ""; | |
| const ruler = isLast ? "" : "\n\n---"; | |
| return `<details${openAttribute}>\n<summary><h2>${sectionTitle}</h2></summary>\n\n${sectionBody}${ruler}\n</details>`; | |
| } | |
| return ""; | |
| } | |
| // Process each benchmark category | |
| const sections = [ | |
| { output: compileOutput, title: "🔧 Graph Compilation" }, | |
| { output: updateOutput, title: "🔄 Executor Update" }, | |
| { output: runOnceOutput, title: "🚀 Render: Cold Execution" }, | |
| { output: runCachedOutput, title: "⚡ Render: Cached Execution" } | |
| ]; | |
| // Generate sections and determine which ones have results | |
| const generatedSections = sections.map(({ output, title }) => | |
| processBenchmarkOutput(output, title, true) // temporarily mark all as last | |
| ).filter(section => section.length > 0); | |
| // Re-generate with correct isLast flags | |
| let sectionIndex = 0; | |
| const finalSections = sections.map(({ output, title }) => { | |
| const section = processBenchmarkOutput(output, title, true); // check if it has results | |
| if (section.length > 0) { | |
| const isLast = sectionIndex === generatedSections.length - 1; | |
| sectionIndex++; | |
| return processBenchmarkOutput(output, title, isLast); | |
| } | |
| return ""; | |
| }).filter(section => section.length > 0); | |
| // Combine all sections | |
| commentBody = finalSections.join("\n\n"); | |
| if (commentBody.length > 0) { | |
| const output = `<details open>\n<summary>Performance Benchmark Results</summary>\n\n${commentBody}\n</details>`; | |
| if (hasSignificantChanges) { | |
| github.rest.issues.createComment({ | |
| issue_number: context.issue.number, | |
| owner: context.repo.owner, | |
| repo: context.repo.repo, | |
| body: output | |
| }); | |
| } else { | |
| console.log("No significant performance changes detected. Skipping comment."); | |
| console.log(output); | |
| } | |
| } else { | |
| console.log("No benchmark results to display."); | |
| } | |
| - name: Fail on significant regressions | |
| if: steps.analyze.outputs.has-significant-changes == 'true' | |
| uses: actions/github-script@v7 | |
| with: | |
| script: | | |
| const regressionDetails = JSON.parse('${{ steps.analyze.outputs.regression-details }}'); | |
| const firstRegression = regressionDetails[0]; | |
| core.setFailed(`Significant performance regression detected: ${firstRegression.module_path} ${firstRegression.id} increased by ${firstRegression.absoluteChange.toLocaleString()} instructions (${firstRegression.diffPct.toFixed(2)}%)`); |