mirror of
https://github.com/esphome/esphome.git
synced 2026-01-28 15:32:07 -07:00
Compare commits
13 Commits
chunked_fi
...
esp32_comp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52d0f1cc68 | ||
|
|
03cfd87b16 | ||
|
|
6d8294c2d3 | ||
|
|
6a3205f4db | ||
|
|
6f22509883 | ||
|
|
455ade0dca | ||
|
|
87fcfc9d76 | ||
|
|
d86048cc2d | ||
|
|
e1355de4cb | ||
|
|
7385c4cf3d | ||
|
|
3bd6ec4ec7 | ||
|
|
051604f284 | ||
|
|
10dfd95ff2 |
36
.github/scripts/auto-label-pr/constants.js
vendored
Normal file
36
.github/scripts/auto-label-pr/constants.js
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Constants and markers for PR auto-labeling
|
||||
module.exports = {
|
||||
BOT_COMMENT_MARKER: '<!-- auto-label-pr-bot -->',
|
||||
CODEOWNERS_MARKER: '<!-- codeowners-request -->',
|
||||
TOO_BIG_MARKER: '<!-- too-big-request -->',
|
||||
|
||||
MANAGED_LABELS: [
|
||||
'new-component',
|
||||
'new-platform',
|
||||
'new-target-platform',
|
||||
'merging-to-release',
|
||||
'merging-to-beta',
|
||||
'chained-pr',
|
||||
'core',
|
||||
'small-pr',
|
||||
'dashboard',
|
||||
'github-actions',
|
||||
'by-code-owner',
|
||||
'has-tests',
|
||||
'needs-tests',
|
||||
'needs-docs',
|
||||
'needs-codeowners',
|
||||
'too-big',
|
||||
'labeller-recheck',
|
||||
'bugfix',
|
||||
'new-feature',
|
||||
'breaking-change',
|
||||
'developer-breaking-change',
|
||||
'code-quality',
|
||||
],
|
||||
|
||||
DOCS_PR_PATTERNS: [
|
||||
/https:\/\/github\.com\/esphome\/esphome-docs\/pull\/\d+/,
|
||||
/esphome\/esphome-docs#\d+/
|
||||
]
|
||||
};
|
||||
302
.github/scripts/auto-label-pr/detectors.js
vendored
Normal file
302
.github/scripts/auto-label-pr/detectors.js
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
const fs = require('fs');
|
||||
const { DOCS_PR_PATTERNS } = require('./constants');
|
||||
|
||||
// Strategy: Merge branch detection
|
||||
async function detectMergeBranch(context) {
|
||||
const labels = new Set();
|
||||
const baseRef = context.payload.pull_request.base.ref;
|
||||
|
||||
if (baseRef === 'release') {
|
||||
labels.add('merging-to-release');
|
||||
} else if (baseRef === 'beta') {
|
||||
labels.add('merging-to-beta');
|
||||
} else if (baseRef !== 'dev') {
|
||||
labels.add('chained-pr');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Component and platform labeling
|
||||
async function detectComponentPlatforms(changedFiles, apiData) {
|
||||
const labels = new Set();
|
||||
const componentRegex = /^esphome\/components\/([^\/]+)\//;
|
||||
const targetPlatformRegex = new RegExp(`^esphome\/components\/(${apiData.targetPlatforms.join('|')})/`);
|
||||
|
||||
for (const file of changedFiles) {
|
||||
const componentMatch = file.match(componentRegex);
|
||||
if (componentMatch) {
|
||||
labels.add(`component: ${componentMatch[1]}`);
|
||||
}
|
||||
|
||||
const platformMatch = file.match(targetPlatformRegex);
|
||||
if (platformMatch) {
|
||||
labels.add(`platform: ${platformMatch[1]}`);
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: New component detection
|
||||
async function detectNewComponents(prFiles) {
|
||||
const labels = new Set();
|
||||
const addedFiles = prFiles.filter(file => file.status === 'added').map(file => file.filename);
|
||||
|
||||
for (const file of addedFiles) {
|
||||
const componentMatch = file.match(/^esphome\/components\/([^\/]+)\/__init__\.py$/);
|
||||
if (componentMatch) {
|
||||
try {
|
||||
const content = fs.readFileSync(file, 'utf8');
|
||||
if (content.includes('IS_TARGET_PLATFORM = True')) {
|
||||
labels.add('new-target-platform');
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(`Failed to read content of ${file}:`, error.message);
|
||||
}
|
||||
labels.add('new-component');
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: New platform detection
|
||||
async function detectNewPlatforms(prFiles, apiData) {
|
||||
const labels = new Set();
|
||||
const addedFiles = prFiles.filter(file => file.status === 'added').map(file => file.filename);
|
||||
|
||||
for (const file of addedFiles) {
|
||||
const platformFileMatch = file.match(/^esphome\/components\/([^\/]+)\/([^\/]+)\.py$/);
|
||||
if (platformFileMatch) {
|
||||
const [, component, platform] = platformFileMatch;
|
||||
if (apiData.platformComponents.includes(platform)) {
|
||||
labels.add('new-platform');
|
||||
}
|
||||
}
|
||||
|
||||
const platformDirMatch = file.match(/^esphome\/components\/([^\/]+)\/([^\/]+)\/__init__\.py$/);
|
||||
if (platformDirMatch) {
|
||||
const [, component, platform] = platformDirMatch;
|
||||
if (apiData.platformComponents.includes(platform)) {
|
||||
labels.add('new-platform');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Core files detection
|
||||
async function detectCoreChanges(changedFiles) {
|
||||
const labels = new Set();
|
||||
const coreFiles = changedFiles.filter(file =>
|
||||
file.startsWith('esphome/core/') ||
|
||||
(file.startsWith('esphome/') && file.split('/').length === 2)
|
||||
);
|
||||
|
||||
if (coreFiles.length > 0) {
|
||||
labels.add('core');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: PR size detection
|
||||
async function detectPRSize(prFiles, totalAdditions, totalDeletions, totalChanges, isMegaPR, SMALL_PR_THRESHOLD, TOO_BIG_THRESHOLD) {
|
||||
const labels = new Set();
|
||||
|
||||
if (totalChanges <= SMALL_PR_THRESHOLD) {
|
||||
labels.add('small-pr');
|
||||
return labels;
|
||||
}
|
||||
|
||||
const testAdditions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.additions || 0), 0);
|
||||
const testDeletions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.deletions || 0), 0);
|
||||
|
||||
const nonTestChanges = (totalAdditions - testAdditions) - (totalDeletions - testDeletions);
|
||||
|
||||
// Don't add too-big if mega-pr label is already present
|
||||
if (nonTestChanges > TOO_BIG_THRESHOLD && !isMegaPR) {
|
||||
labels.add('too-big');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Dashboard changes
|
||||
async function detectDashboardChanges(changedFiles) {
|
||||
const labels = new Set();
|
||||
const dashboardFiles = changedFiles.filter(file =>
|
||||
file.startsWith('esphome/dashboard/') ||
|
||||
file.startsWith('esphome/components/dashboard_import/')
|
||||
);
|
||||
|
||||
if (dashboardFiles.length > 0) {
|
||||
labels.add('dashboard');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: GitHub Actions changes
|
||||
async function detectGitHubActionsChanges(changedFiles) {
|
||||
const labels = new Set();
|
||||
const githubActionsFiles = changedFiles.filter(file =>
|
||||
file.startsWith('.github/workflows/')
|
||||
);
|
||||
|
||||
if (githubActionsFiles.length > 0) {
|
||||
labels.add('github-actions');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Code owner detection
|
||||
async function detectCodeOwner(github, context, changedFiles) {
|
||||
const labels = new Set();
|
||||
const { owner, repo } = context.repo;
|
||||
|
||||
try {
|
||||
const { data: codeownersFile } = await github.rest.repos.getContent({
|
||||
owner,
|
||||
repo,
|
||||
path: 'CODEOWNERS',
|
||||
});
|
||||
|
||||
const codeownersContent = Buffer.from(codeownersFile.content, 'base64').toString('utf8');
|
||||
const prAuthor = context.payload.pull_request.user.login;
|
||||
|
||||
const codeownersLines = codeownersContent.split('\n')
|
||||
.map(line => line.trim())
|
||||
.filter(line => line && !line.startsWith('#'));
|
||||
|
||||
const codeownersRegexes = codeownersLines.map(line => {
|
||||
const parts = line.split(/\s+/);
|
||||
const pattern = parts[0];
|
||||
const owners = parts.slice(1);
|
||||
|
||||
let regex;
|
||||
if (pattern.endsWith('*')) {
|
||||
const dir = pattern.slice(0, -1);
|
||||
regex = new RegExp(`^${dir.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}`);
|
||||
} else if (pattern.includes('*')) {
|
||||
// First escape all regex special chars except *, then replace * with .*
|
||||
const regexPattern = pattern
|
||||
.replace(/[.+?^${}()|[\]\\]/g, '\\$&')
|
||||
.replace(/\*/g, '.*');
|
||||
regex = new RegExp(`^${regexPattern}$`);
|
||||
} else {
|
||||
regex = new RegExp(`^${pattern.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}$`);
|
||||
}
|
||||
|
||||
return { regex, owners };
|
||||
});
|
||||
|
||||
for (const file of changedFiles) {
|
||||
for (const { regex, owners } of codeownersRegexes) {
|
||||
if (regex.test(file) && owners.some(owner => owner === `@${prAuthor}`)) {
|
||||
labels.add('by-code-owner');
|
||||
return labels;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('Failed to read or parse CODEOWNERS file:', error.message);
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Test detection
|
||||
async function detectTests(changedFiles) {
|
||||
const labels = new Set();
|
||||
const testFiles = changedFiles.filter(file => file.startsWith('tests/'));
|
||||
|
||||
if (testFiles.length > 0) {
|
||||
labels.add('has-tests');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: PR Template Checkbox detection
|
||||
async function detectPRTemplateCheckboxes(context) {
|
||||
const labels = new Set();
|
||||
const prBody = context.payload.pull_request.body || '';
|
||||
|
||||
console.log('Checking PR template checkboxes...');
|
||||
|
||||
// Check for checked checkboxes in the "Types of changes" section
|
||||
const checkboxPatterns = [
|
||||
{ pattern: /- \[x\] Bugfix \(non-breaking change which fixes an issue\)/i, label: 'bugfix' },
|
||||
{ pattern: /- \[x\] New feature \(non-breaking change which adds functionality\)/i, label: 'new-feature' },
|
||||
{ pattern: /- \[x\] Breaking change \(fix or feature that would cause existing functionality to not work as expected\)/i, label: 'breaking-change' },
|
||||
{ pattern: /- \[x\] Developer breaking change \(an API change that could break external components\)/i, label: 'developer-breaking-change' },
|
||||
{ pattern: /- \[x\] Code quality improvements to existing code or addition of tests/i, label: 'code-quality' }
|
||||
];
|
||||
|
||||
for (const { pattern, label } of checkboxPatterns) {
|
||||
if (pattern.test(prBody)) {
|
||||
console.log(`Found checked checkbox for: ${label}`);
|
||||
labels.add(label);
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Requirements detection
|
||||
async function detectRequirements(allLabels, prFiles, context) {
|
||||
const labels = new Set();
|
||||
|
||||
// Check for missing tests
|
||||
if ((allLabels.has('new-component') || allLabels.has('new-platform') || allLabels.has('new-feature')) && !allLabels.has('has-tests')) {
|
||||
labels.add('needs-tests');
|
||||
}
|
||||
|
||||
// Check for missing docs
|
||||
if (allLabels.has('new-component') || allLabels.has('new-platform') || allLabels.has('new-feature')) {
|
||||
const prBody = context.payload.pull_request.body || '';
|
||||
const hasDocsLink = DOCS_PR_PATTERNS.some(pattern => pattern.test(prBody));
|
||||
|
||||
if (!hasDocsLink) {
|
||||
labels.add('needs-docs');
|
||||
}
|
||||
}
|
||||
|
||||
// Check for missing CODEOWNERS
|
||||
if (allLabels.has('new-component')) {
|
||||
const codeownersModified = prFiles.some(file =>
|
||||
file.filename === 'CODEOWNERS' &&
|
||||
(file.status === 'modified' || file.status === 'added') &&
|
||||
(file.additions || 0) > 0
|
||||
);
|
||||
|
||||
if (!codeownersModified) {
|
||||
labels.add('needs-codeowners');
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
detectMergeBranch,
|
||||
detectComponentPlatforms,
|
||||
detectNewComponents,
|
||||
detectNewPlatforms,
|
||||
detectCoreChanges,
|
||||
detectPRSize,
|
||||
detectDashboardChanges,
|
||||
detectGitHubActionsChanges,
|
||||
detectCodeOwner,
|
||||
detectTests,
|
||||
detectPRTemplateCheckboxes,
|
||||
detectRequirements
|
||||
};
|
||||
179
.github/scripts/auto-label-pr/index.js
vendored
Normal file
179
.github/scripts/auto-label-pr/index.js
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
const { MANAGED_LABELS } = require('./constants');
|
||||
const {
|
||||
detectMergeBranch,
|
||||
detectComponentPlatforms,
|
||||
detectNewComponents,
|
||||
detectNewPlatforms,
|
||||
detectCoreChanges,
|
||||
detectPRSize,
|
||||
detectDashboardChanges,
|
||||
detectGitHubActionsChanges,
|
||||
detectCodeOwner,
|
||||
detectTests,
|
||||
detectPRTemplateCheckboxes,
|
||||
detectRequirements
|
||||
} = require('./detectors');
|
||||
const { handleReviews } = require('./reviews');
|
||||
const { applyLabels, removeOldLabels } = require('./labels');
|
||||
|
||||
// Fetch API data
|
||||
async function fetchApiData() {
|
||||
try {
|
||||
const response = await fetch('https://data.esphome.io/components.json');
|
||||
const componentsData = await response.json();
|
||||
return {
|
||||
targetPlatforms: componentsData.target_platforms || [],
|
||||
platformComponents: componentsData.platform_components || []
|
||||
};
|
||||
} catch (error) {
|
||||
console.log('Failed to fetch components data from API:', error.message);
|
||||
return { targetPlatforms: [], platformComponents: [] };
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = async ({ github, context }) => {
|
||||
// Environment variables
|
||||
const SMALL_PR_THRESHOLD = parseInt(process.env.SMALL_PR_THRESHOLD);
|
||||
const MAX_LABELS = parseInt(process.env.MAX_LABELS);
|
||||
const TOO_BIG_THRESHOLD = parseInt(process.env.TOO_BIG_THRESHOLD);
|
||||
const COMPONENT_LABEL_THRESHOLD = parseInt(process.env.COMPONENT_LABEL_THRESHOLD);
|
||||
|
||||
// Global state
|
||||
const { owner, repo } = context.repo;
|
||||
const pr_number = context.issue.number;
|
||||
|
||||
// Get current labels and PR data
|
||||
const { data: currentLabelsData } = await github.rest.issues.listLabelsOnIssue({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number
|
||||
});
|
||||
const currentLabels = currentLabelsData.map(label => label.name);
|
||||
const managedLabels = currentLabels.filter(label =>
|
||||
label.startsWith('component: ') || MANAGED_LABELS.includes(label)
|
||||
);
|
||||
|
||||
// Check for mega-PR early - if present, skip most automatic labeling
|
||||
const isMegaPR = currentLabels.includes('mega-pr');
|
||||
|
||||
// Get all PR files with automatic pagination
|
||||
const prFiles = await github.paginate(
|
||||
github.rest.pulls.listFiles,
|
||||
{
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number
|
||||
}
|
||||
);
|
||||
|
||||
// Calculate data from PR files
|
||||
const changedFiles = prFiles.map(file => file.filename);
|
||||
const totalAdditions = prFiles.reduce((sum, file) => sum + (file.additions || 0), 0);
|
||||
const totalDeletions = prFiles.reduce((sum, file) => sum + (file.deletions || 0), 0);
|
||||
const totalChanges = totalAdditions + totalDeletions;
|
||||
|
||||
console.log('Current labels:', currentLabels.join(', '));
|
||||
console.log('Changed files:', changedFiles.length);
|
||||
console.log('Total changes:', totalChanges);
|
||||
if (isMegaPR) {
|
||||
console.log('Mega-PR detected - applying limited labeling logic');
|
||||
}
|
||||
|
||||
// Fetch API data
|
||||
const apiData = await fetchApiData();
|
||||
const baseRef = context.payload.pull_request.base.ref;
|
||||
|
||||
// Early exit for release and beta branches only
|
||||
if (baseRef === 'release' || baseRef === 'beta') {
|
||||
const branchLabels = await detectMergeBranch(context);
|
||||
const finalLabels = Array.from(branchLabels);
|
||||
|
||||
console.log('Computed labels (merge branch only):', finalLabels.join(', '));
|
||||
|
||||
// Apply labels
|
||||
await applyLabels(github, context, finalLabels);
|
||||
|
||||
// Remove old managed labels
|
||||
await removeOldLabels(github, context, managedLabels, finalLabels);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Run all strategies
|
||||
const [
|
||||
branchLabels,
|
||||
componentLabels,
|
||||
newComponentLabels,
|
||||
newPlatformLabels,
|
||||
coreLabels,
|
||||
sizeLabels,
|
||||
dashboardLabels,
|
||||
actionsLabels,
|
||||
codeOwnerLabels,
|
||||
testLabels,
|
||||
checkboxLabels,
|
||||
] = await Promise.all([
|
||||
detectMergeBranch(context),
|
||||
detectComponentPlatforms(changedFiles, apiData),
|
||||
detectNewComponents(prFiles),
|
||||
detectNewPlatforms(prFiles, apiData),
|
||||
detectCoreChanges(changedFiles),
|
||||
detectPRSize(prFiles, totalAdditions, totalDeletions, totalChanges, isMegaPR, SMALL_PR_THRESHOLD, TOO_BIG_THRESHOLD),
|
||||
detectDashboardChanges(changedFiles),
|
||||
detectGitHubActionsChanges(changedFiles),
|
||||
detectCodeOwner(github, context, changedFiles),
|
||||
detectTests(changedFiles),
|
||||
detectPRTemplateCheckboxes(context),
|
||||
]);
|
||||
|
||||
// Combine all labels
|
||||
const allLabels = new Set([
|
||||
...branchLabels,
|
||||
...componentLabels,
|
||||
...newComponentLabels,
|
||||
...newPlatformLabels,
|
||||
...coreLabels,
|
||||
...sizeLabels,
|
||||
...dashboardLabels,
|
||||
...actionsLabels,
|
||||
...codeOwnerLabels,
|
||||
...testLabels,
|
||||
...checkboxLabels,
|
||||
]);
|
||||
|
||||
// Detect requirements based on all other labels
|
||||
const requirementLabels = await detectRequirements(allLabels, prFiles, context);
|
||||
for (const label of requirementLabels) {
|
||||
allLabels.add(label);
|
||||
}
|
||||
|
||||
let finalLabels = Array.from(allLabels);
|
||||
|
||||
// For mega-PRs, exclude component labels if there are too many
|
||||
if (isMegaPR) {
|
||||
const componentLabels = finalLabels.filter(label => label.startsWith('component: '));
|
||||
if (componentLabels.length > COMPONENT_LABEL_THRESHOLD) {
|
||||
finalLabels = finalLabels.filter(label => !label.startsWith('component: '));
|
||||
console.log(`Mega-PR detected - excluding ${componentLabels.length} component labels (threshold: ${COMPONENT_LABEL_THRESHOLD})`);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle too many labels (only for non-mega PRs)
|
||||
const tooManyLabels = finalLabels.length > MAX_LABELS;
|
||||
const originalLabelCount = finalLabels.length;
|
||||
|
||||
if (tooManyLabels && !isMegaPR && !finalLabels.includes('too-big')) {
|
||||
finalLabels = ['too-big'];
|
||||
}
|
||||
|
||||
console.log('Computed labels:', finalLabels.join(', '));
|
||||
|
||||
// Handle reviews
|
||||
await handleReviews(github, context, finalLabels, originalLabelCount, prFiles, totalAdditions, totalDeletions, MAX_LABELS, TOO_BIG_THRESHOLD);
|
||||
|
||||
// Apply labels
|
||||
await applyLabels(github, context, finalLabels);
|
||||
|
||||
// Remove old managed labels
|
||||
await removeOldLabels(github, context, managedLabels, finalLabels);
|
||||
};
|
||||
41
.github/scripts/auto-label-pr/labels.js
vendored
Normal file
41
.github/scripts/auto-label-pr/labels.js
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// Apply labels to PR
|
||||
async function applyLabels(github, context, finalLabels) {
|
||||
const { owner, repo } = context.repo;
|
||||
const pr_number = context.issue.number;
|
||||
|
||||
if (finalLabels.length > 0) {
|
||||
console.log(`Adding labels: ${finalLabels.join(', ')}`);
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number,
|
||||
labels: finalLabels
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old managed labels
|
||||
async function removeOldLabels(github, context, managedLabels, finalLabels) {
|
||||
const { owner, repo } = context.repo;
|
||||
const pr_number = context.issue.number;
|
||||
|
||||
const labelsToRemove = managedLabels.filter(label => !finalLabels.includes(label));
|
||||
for (const label of labelsToRemove) {
|
||||
console.log(`Removing label: ${label}`);
|
||||
try {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number,
|
||||
name: label
|
||||
});
|
||||
} catch (error) {
|
||||
console.log(`Failed to remove label ${label}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
applyLabels,
|
||||
removeOldLabels
|
||||
};
|
||||
124
.github/scripts/auto-label-pr/reviews.js
vendored
Normal file
124
.github/scripts/auto-label-pr/reviews.js
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
const {
|
||||
BOT_COMMENT_MARKER,
|
||||
CODEOWNERS_MARKER,
|
||||
TOO_BIG_MARKER,
|
||||
} = require('./constants');
|
||||
|
||||
// Generate review messages
|
||||
function generateReviewMessages(finalLabels, originalLabelCount, prFiles, totalAdditions, totalDeletions, prAuthor, MAX_LABELS, TOO_BIG_THRESHOLD) {
|
||||
const messages = [];
|
||||
|
||||
// Too big message
|
||||
if (finalLabels.includes('too-big')) {
|
||||
const testAdditions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.additions || 0), 0);
|
||||
const testDeletions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.deletions || 0), 0);
|
||||
const nonTestChanges = (totalAdditions - testAdditions) - (totalDeletions - testDeletions);
|
||||
|
||||
const tooManyLabels = originalLabelCount > MAX_LABELS;
|
||||
const tooManyChanges = nonTestChanges > TOO_BIG_THRESHOLD;
|
||||
|
||||
let message = `${TOO_BIG_MARKER}\n### 📦 Pull Request Size\n\n`;
|
||||
|
||||
if (tooManyLabels && tooManyChanges) {
|
||||
message += `This PR is too large with ${nonTestChanges} line changes (excluding tests) and affects ${originalLabelCount} different components/areas.`;
|
||||
} else if (tooManyLabels) {
|
||||
message += `This PR affects ${originalLabelCount} different components/areas.`;
|
||||
} else {
|
||||
message += `This PR is too large with ${nonTestChanges} line changes (excluding tests).`;
|
||||
}
|
||||
|
||||
message += ` Please consider breaking it down into smaller, focused PRs to make review easier and reduce the risk of conflicts.\n\n`;
|
||||
message += `For guidance on breaking down large PRs, see: https://developers.esphome.io/contributing/submitting-your-work/#how-to-approach-large-submissions`;
|
||||
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
// CODEOWNERS message
|
||||
if (finalLabels.includes('needs-codeowners')) {
|
||||
const message = `${CODEOWNERS_MARKER}\n### 👥 Code Ownership\n\n` +
|
||||
`Hey there @${prAuthor},\n` +
|
||||
`Thanks for submitting this pull request! Can you add yourself as a codeowner for this integration? ` +
|
||||
`This way we can notify you if a bug report for this integration is reported.\n\n` +
|
||||
`In \`__init__.py\` of the integration, please add:\n\n` +
|
||||
`\`\`\`python\nCODEOWNERS = ["@${prAuthor}"]\n\`\`\`\n\n` +
|
||||
`And run \`script/build_codeowners.py\``;
|
||||
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Handle reviews
|
||||
async function handleReviews(github, context, finalLabels, originalLabelCount, prFiles, totalAdditions, totalDeletions, MAX_LABELS, TOO_BIG_THRESHOLD) {
|
||||
const { owner, repo } = context.repo;
|
||||
const pr_number = context.issue.number;
|
||||
const prAuthor = context.payload.pull_request.user.login;
|
||||
|
||||
const reviewMessages = generateReviewMessages(finalLabels, originalLabelCount, prFiles, totalAdditions, totalDeletions, prAuthor, MAX_LABELS, TOO_BIG_THRESHOLD);
|
||||
const hasReviewableLabels = finalLabels.some(label =>
|
||||
['too-big', 'needs-codeowners'].includes(label)
|
||||
);
|
||||
|
||||
const { data: reviews } = await github.rest.pulls.listReviews({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number
|
||||
});
|
||||
|
||||
const botReviews = reviews.filter(review =>
|
||||
review.user.type === 'Bot' &&
|
||||
review.state === 'CHANGES_REQUESTED' &&
|
||||
review.body && review.body.includes(BOT_COMMENT_MARKER)
|
||||
);
|
||||
|
||||
if (hasReviewableLabels) {
|
||||
const reviewBody = `${BOT_COMMENT_MARKER}\n\n${reviewMessages.join('\n\n---\n\n')}`;
|
||||
|
||||
if (botReviews.length > 0) {
|
||||
// Update existing review
|
||||
await github.rest.pulls.updateReview({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number,
|
||||
review_id: botReviews[0].id,
|
||||
body: reviewBody
|
||||
});
|
||||
console.log('Updated existing bot review');
|
||||
} else {
|
||||
// Create new review
|
||||
await github.rest.pulls.createReview({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number,
|
||||
body: reviewBody,
|
||||
event: 'REQUEST_CHANGES'
|
||||
});
|
||||
console.log('Created new bot review');
|
||||
}
|
||||
} else if (botReviews.length > 0) {
|
||||
// Dismiss existing reviews
|
||||
for (const review of botReviews) {
|
||||
try {
|
||||
await github.rest.pulls.dismissReview({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number,
|
||||
review_id: review.id,
|
||||
message: 'Review dismissed: All requirements have been met'
|
||||
});
|
||||
console.log(`Dismissed bot review ${review.id}`);
|
||||
} catch (error) {
|
||||
console.log(`Failed to dismiss review ${review.id}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
handleReviews
|
||||
};
|
||||
632
.github/workflows/auto-label-pr.yml
vendored
632
.github/workflows/auto-label-pr.yml
vendored
@@ -36,633 +36,5 @@ jobs:
|
||||
with:
|
||||
github-token: ${{ steps.generate-token.outputs.token }}
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
|
||||
// Constants
|
||||
const SMALL_PR_THRESHOLD = parseInt('${{ env.SMALL_PR_THRESHOLD }}');
|
||||
const MAX_LABELS = parseInt('${{ env.MAX_LABELS }}');
|
||||
const TOO_BIG_THRESHOLD = parseInt('${{ env.TOO_BIG_THRESHOLD }}');
|
||||
const COMPONENT_LABEL_THRESHOLD = parseInt('${{ env.COMPONENT_LABEL_THRESHOLD }}');
|
||||
const BOT_COMMENT_MARKER = '<!-- auto-label-pr-bot -->';
|
||||
const CODEOWNERS_MARKER = '<!-- codeowners-request -->';
|
||||
const TOO_BIG_MARKER = '<!-- too-big-request -->';
|
||||
|
||||
const MANAGED_LABELS = [
|
||||
'new-component',
|
||||
'new-platform',
|
||||
'new-target-platform',
|
||||
'merging-to-release',
|
||||
'merging-to-beta',
|
||||
'chained-pr',
|
||||
'core',
|
||||
'small-pr',
|
||||
'dashboard',
|
||||
'github-actions',
|
||||
'by-code-owner',
|
||||
'has-tests',
|
||||
'needs-tests',
|
||||
'needs-docs',
|
||||
'needs-codeowners',
|
||||
'too-big',
|
||||
'labeller-recheck',
|
||||
'bugfix',
|
||||
'new-feature',
|
||||
'breaking-change',
|
||||
'developer-breaking-change',
|
||||
'code-quality'
|
||||
];
|
||||
|
||||
const DOCS_PR_PATTERNS = [
|
||||
/https:\/\/github\.com\/esphome\/esphome-docs\/pull\/\d+/,
|
||||
/esphome\/esphome-docs#\d+/
|
||||
];
|
||||
|
||||
// Global state
|
||||
const { owner, repo } = context.repo;
|
||||
const pr_number = context.issue.number;
|
||||
|
||||
// Get current labels and PR data
|
||||
const { data: currentLabelsData } = await github.rest.issues.listLabelsOnIssue({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number
|
||||
});
|
||||
const currentLabels = currentLabelsData.map(label => label.name);
|
||||
const managedLabels = currentLabels.filter(label =>
|
||||
label.startsWith('component: ') || MANAGED_LABELS.includes(label)
|
||||
);
|
||||
|
||||
// Check for mega-PR early - if present, skip most automatic labeling
|
||||
const isMegaPR = currentLabels.includes('mega-pr');
|
||||
|
||||
// Get all PR files with automatic pagination
|
||||
const prFiles = await github.paginate(
|
||||
github.rest.pulls.listFiles,
|
||||
{
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number
|
||||
}
|
||||
);
|
||||
|
||||
// Calculate data from PR files
|
||||
const changedFiles = prFiles.map(file => file.filename);
|
||||
const totalAdditions = prFiles.reduce((sum, file) => sum + (file.additions || 0), 0);
|
||||
const totalDeletions = prFiles.reduce((sum, file) => sum + (file.deletions || 0), 0);
|
||||
const totalChanges = totalAdditions + totalDeletions;
|
||||
|
||||
console.log('Current labels:', currentLabels.join(', '));
|
||||
console.log('Changed files:', changedFiles.length);
|
||||
console.log('Total changes:', totalChanges);
|
||||
if (isMegaPR) {
|
||||
console.log('Mega-PR detected - applying limited labeling logic');
|
||||
}
|
||||
|
||||
// Fetch API data
|
||||
async function fetchApiData() {
|
||||
try {
|
||||
const response = await fetch('https://data.esphome.io/components.json');
|
||||
const componentsData = await response.json();
|
||||
return {
|
||||
targetPlatforms: componentsData.target_platforms || [],
|
||||
platformComponents: componentsData.platform_components || []
|
||||
};
|
||||
} catch (error) {
|
||||
console.log('Failed to fetch components data from API:', error.message);
|
||||
return { targetPlatforms: [], platformComponents: [] };
|
||||
}
|
||||
}
|
||||
|
||||
// Strategy: Merge branch detection
|
||||
async function detectMergeBranch() {
|
||||
const labels = new Set();
|
||||
const baseRef = context.payload.pull_request.base.ref;
|
||||
|
||||
if (baseRef === 'release') {
|
||||
labels.add('merging-to-release');
|
||||
} else if (baseRef === 'beta') {
|
||||
labels.add('merging-to-beta');
|
||||
} else if (baseRef !== 'dev') {
|
||||
labels.add('chained-pr');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Component and platform labeling
|
||||
async function detectComponentPlatforms(apiData) {
|
||||
const labels = new Set();
|
||||
const componentRegex = /^esphome\/components\/([^\/]+)\//;
|
||||
const targetPlatformRegex = new RegExp(`^esphome\/components\/(${apiData.targetPlatforms.join('|')})/`);
|
||||
|
||||
for (const file of changedFiles) {
|
||||
const componentMatch = file.match(componentRegex);
|
||||
if (componentMatch) {
|
||||
labels.add(`component: ${componentMatch[1]}`);
|
||||
}
|
||||
|
||||
const platformMatch = file.match(targetPlatformRegex);
|
||||
if (platformMatch) {
|
||||
labels.add(`platform: ${platformMatch[1]}`);
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: New component detection
|
||||
async function detectNewComponents() {
|
||||
const labels = new Set();
|
||||
const addedFiles = prFiles.filter(file => file.status === 'added').map(file => file.filename);
|
||||
|
||||
for (const file of addedFiles) {
|
||||
const componentMatch = file.match(/^esphome\/components\/([^\/]+)\/__init__\.py$/);
|
||||
if (componentMatch) {
|
||||
try {
|
||||
const content = fs.readFileSync(file, 'utf8');
|
||||
if (content.includes('IS_TARGET_PLATFORM = True')) {
|
||||
labels.add('new-target-platform');
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(`Failed to read content of ${file}:`, error.message);
|
||||
}
|
||||
labels.add('new-component');
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: New platform detection
|
||||
async function detectNewPlatforms(apiData) {
|
||||
const labels = new Set();
|
||||
const addedFiles = prFiles.filter(file => file.status === 'added').map(file => file.filename);
|
||||
|
||||
for (const file of addedFiles) {
|
||||
const platformFileMatch = file.match(/^esphome\/components\/([^\/]+)\/([^\/]+)\.py$/);
|
||||
if (platformFileMatch) {
|
||||
const [, component, platform] = platformFileMatch;
|
||||
if (apiData.platformComponents.includes(platform)) {
|
||||
labels.add('new-platform');
|
||||
}
|
||||
}
|
||||
|
||||
const platformDirMatch = file.match(/^esphome\/components\/([^\/]+)\/([^\/]+)\/__init__\.py$/);
|
||||
if (platformDirMatch) {
|
||||
const [, component, platform] = platformDirMatch;
|
||||
if (apiData.platformComponents.includes(platform)) {
|
||||
labels.add('new-platform');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Core files detection
|
||||
async function detectCoreChanges() {
|
||||
const labels = new Set();
|
||||
const coreFiles = changedFiles.filter(file =>
|
||||
file.startsWith('esphome/core/') ||
|
||||
(file.startsWith('esphome/') && file.split('/').length === 2)
|
||||
);
|
||||
|
||||
if (coreFiles.length > 0) {
|
||||
labels.add('core');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: PR size detection
|
||||
async function detectPRSize() {
|
||||
const labels = new Set();
|
||||
|
||||
if (totalChanges <= SMALL_PR_THRESHOLD) {
|
||||
labels.add('small-pr');
|
||||
return labels;
|
||||
}
|
||||
|
||||
const testAdditions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.additions || 0), 0);
|
||||
const testDeletions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.deletions || 0), 0);
|
||||
|
||||
const nonTestChanges = (totalAdditions - testAdditions) - (totalDeletions - testDeletions);
|
||||
|
||||
// Don't add too-big if mega-pr label is already present
|
||||
if (nonTestChanges > TOO_BIG_THRESHOLD && !isMegaPR) {
|
||||
labels.add('too-big');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Dashboard changes
|
||||
async function detectDashboardChanges() {
|
||||
const labels = new Set();
|
||||
const dashboardFiles = changedFiles.filter(file =>
|
||||
file.startsWith('esphome/dashboard/') ||
|
||||
file.startsWith('esphome/components/dashboard_import/')
|
||||
);
|
||||
|
||||
if (dashboardFiles.length > 0) {
|
||||
labels.add('dashboard');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: GitHub Actions changes
|
||||
async function detectGitHubActionsChanges() {
|
||||
const labels = new Set();
|
||||
const githubActionsFiles = changedFiles.filter(file =>
|
||||
file.startsWith('.github/workflows/')
|
||||
);
|
||||
|
||||
if (githubActionsFiles.length > 0) {
|
||||
labels.add('github-actions');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Code owner detection
|
||||
async function detectCodeOwner() {
|
||||
const labels = new Set();
|
||||
|
||||
try {
|
||||
const { data: codeownersFile } = await github.rest.repos.getContent({
|
||||
owner,
|
||||
repo,
|
||||
path: 'CODEOWNERS',
|
||||
});
|
||||
|
||||
const codeownersContent = Buffer.from(codeownersFile.content, 'base64').toString('utf8');
|
||||
const prAuthor = context.payload.pull_request.user.login;
|
||||
|
||||
const codeownersLines = codeownersContent.split('\n')
|
||||
.map(line => line.trim())
|
||||
.filter(line => line && !line.startsWith('#'));
|
||||
|
||||
const codeownersRegexes = codeownersLines.map(line => {
|
||||
const parts = line.split(/\s+/);
|
||||
const pattern = parts[0];
|
||||
const owners = parts.slice(1);
|
||||
|
||||
let regex;
|
||||
if (pattern.endsWith('*')) {
|
||||
const dir = pattern.slice(0, -1);
|
||||
regex = new RegExp(`^${dir.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}`);
|
||||
} else if (pattern.includes('*')) {
|
||||
// First escape all regex special chars except *, then replace * with .*
|
||||
const regexPattern = pattern
|
||||
.replace(/[.+?^${}()|[\]\\]/g, '\\$&')
|
||||
.replace(/\*/g, '.*');
|
||||
regex = new RegExp(`^${regexPattern}$`);
|
||||
} else {
|
||||
regex = new RegExp(`^${pattern.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}$`);
|
||||
}
|
||||
|
||||
return { regex, owners };
|
||||
});
|
||||
|
||||
for (const file of changedFiles) {
|
||||
for (const { regex, owners } of codeownersRegexes) {
|
||||
if (regex.test(file) && owners.some(owner => owner === `@${prAuthor}`)) {
|
||||
labels.add('by-code-owner');
|
||||
return labels;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('Failed to read or parse CODEOWNERS file:', error.message);
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Test detection
|
||||
async function detectTests() {
|
||||
const labels = new Set();
|
||||
const testFiles = changedFiles.filter(file => file.startsWith('tests/'));
|
||||
|
||||
if (testFiles.length > 0) {
|
||||
labels.add('has-tests');
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: PR Template Checkbox detection
|
||||
async function detectPRTemplateCheckboxes() {
|
||||
const labels = new Set();
|
||||
const prBody = context.payload.pull_request.body || '';
|
||||
|
||||
console.log('Checking PR template checkboxes...');
|
||||
|
||||
// Check for checked checkboxes in the "Types of changes" section
|
||||
const checkboxPatterns = [
|
||||
{ pattern: /- \[x\] Bugfix \(non-breaking change which fixes an issue\)/i, label: 'bugfix' },
|
||||
{ pattern: /- \[x\] New feature \(non-breaking change which adds functionality\)/i, label: 'new-feature' },
|
||||
{ pattern: /- \[x\] Breaking change \(fix or feature that would cause existing functionality to not work as expected\)/i, label: 'breaking-change' },
|
||||
{ pattern: /- \[x\] Developer breaking change \(an API change that could break external components\)/i, label: 'developer-breaking-change' },
|
||||
{ pattern: /- \[x\] Code quality improvements to existing code or addition of tests/i, label: 'code-quality' }
|
||||
];
|
||||
|
||||
for (const { pattern, label } of checkboxPatterns) {
|
||||
if (pattern.test(prBody)) {
|
||||
console.log(`Found checked checkbox for: ${label}`);
|
||||
labels.add(label);
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Strategy: Requirements detection
|
||||
async function detectRequirements(allLabels) {
|
||||
const labels = new Set();
|
||||
|
||||
// Check for missing tests
|
||||
if ((allLabels.has('new-component') || allLabels.has('new-platform') || allLabels.has('new-feature')) && !allLabels.has('has-tests')) {
|
||||
labels.add('needs-tests');
|
||||
}
|
||||
|
||||
// Check for missing docs
|
||||
if (allLabels.has('new-component') || allLabels.has('new-platform') || allLabels.has('new-feature')) {
|
||||
const prBody = context.payload.pull_request.body || '';
|
||||
const hasDocsLink = DOCS_PR_PATTERNS.some(pattern => pattern.test(prBody));
|
||||
|
||||
if (!hasDocsLink) {
|
||||
labels.add('needs-docs');
|
||||
}
|
||||
}
|
||||
|
||||
// Check for missing CODEOWNERS
|
||||
if (allLabels.has('new-component')) {
|
||||
const codeownersModified = prFiles.some(file =>
|
||||
file.filename === 'CODEOWNERS' &&
|
||||
(file.status === 'modified' || file.status === 'added') &&
|
||||
(file.additions || 0) > 0
|
||||
);
|
||||
|
||||
if (!codeownersModified) {
|
||||
labels.add('needs-codeowners');
|
||||
}
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
|
||||
// Generate review messages
|
||||
function generateReviewMessages(finalLabels, originalLabelCount) {
|
||||
const messages = [];
|
||||
const prAuthor = context.payload.pull_request.user.login;
|
||||
|
||||
// Too big message
|
||||
if (finalLabels.includes('too-big')) {
|
||||
const testAdditions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.additions || 0), 0);
|
||||
const testDeletions = prFiles
|
||||
.filter(file => file.filename.startsWith('tests/'))
|
||||
.reduce((sum, file) => sum + (file.deletions || 0), 0);
|
||||
const nonTestChanges = (totalAdditions - testAdditions) - (totalDeletions - testDeletions);
|
||||
|
||||
const tooManyLabels = originalLabelCount > MAX_LABELS;
|
||||
const tooManyChanges = nonTestChanges > TOO_BIG_THRESHOLD;
|
||||
|
||||
let message = `${TOO_BIG_MARKER}\n### 📦 Pull Request Size\n\n`;
|
||||
|
||||
if (tooManyLabels && tooManyChanges) {
|
||||
message += `This PR is too large with ${nonTestChanges} line changes (excluding tests) and affects ${originalLabelCount} different components/areas.`;
|
||||
} else if (tooManyLabels) {
|
||||
message += `This PR affects ${originalLabelCount} different components/areas.`;
|
||||
} else {
|
||||
message += `This PR is too large with ${nonTestChanges} line changes (excluding tests).`;
|
||||
}
|
||||
|
||||
message += ` Please consider breaking it down into smaller, focused PRs to make review easier and reduce the risk of conflicts.\n\n`;
|
||||
message += `For guidance on breaking down large PRs, see: https://developers.esphome.io/contributing/submitting-your-work/#how-to-approach-large-submissions`;
|
||||
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
// CODEOWNERS message
|
||||
if (finalLabels.includes('needs-codeowners')) {
|
||||
const message = `${CODEOWNERS_MARKER}\n### 👥 Code Ownership\n\n` +
|
||||
`Hey there @${prAuthor},\n` +
|
||||
`Thanks for submitting this pull request! Can you add yourself as a codeowner for this integration? ` +
|
||||
`This way we can notify you if a bug report for this integration is reported.\n\n` +
|
||||
`In \`__init__.py\` of the integration, please add:\n\n` +
|
||||
`\`\`\`python\nCODEOWNERS = ["@${prAuthor}"]\n\`\`\`\n\n` +
|
||||
`And run \`script/build_codeowners.py\``;
|
||||
|
||||
messages.push(message);
|
||||
}
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
// Handle reviews
|
||||
async function handleReviews(finalLabels, originalLabelCount) {
|
||||
const reviewMessages = generateReviewMessages(finalLabels, originalLabelCount);
|
||||
const hasReviewableLabels = finalLabels.some(label =>
|
||||
['too-big', 'needs-codeowners'].includes(label)
|
||||
);
|
||||
|
||||
const { data: reviews } = await github.rest.pulls.listReviews({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number
|
||||
});
|
||||
|
||||
const botReviews = reviews.filter(review =>
|
||||
review.user.type === 'Bot' &&
|
||||
review.state === 'CHANGES_REQUESTED' &&
|
||||
review.body && review.body.includes(BOT_COMMENT_MARKER)
|
||||
);
|
||||
|
||||
if (hasReviewableLabels) {
|
||||
const reviewBody = `${BOT_COMMENT_MARKER}\n\n${reviewMessages.join('\n\n---\n\n')}`;
|
||||
|
||||
if (botReviews.length > 0) {
|
||||
// Update existing review
|
||||
await github.rest.pulls.updateReview({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number,
|
||||
review_id: botReviews[0].id,
|
||||
body: reviewBody
|
||||
});
|
||||
console.log('Updated existing bot review');
|
||||
} else {
|
||||
// Create new review
|
||||
await github.rest.pulls.createReview({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number,
|
||||
body: reviewBody,
|
||||
event: 'REQUEST_CHANGES'
|
||||
});
|
||||
console.log('Created new bot review');
|
||||
}
|
||||
} else if (botReviews.length > 0) {
|
||||
// Dismiss existing reviews
|
||||
for (const review of botReviews) {
|
||||
try {
|
||||
await github.rest.pulls.dismissReview({
|
||||
owner,
|
||||
repo,
|
||||
pull_number: pr_number,
|
||||
review_id: review.id,
|
||||
message: 'Review dismissed: All requirements have been met'
|
||||
});
|
||||
console.log(`Dismissed bot review ${review.id}`);
|
||||
} catch (error) {
|
||||
console.log(`Failed to dismiss review ${review.id}:`, error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Main execution
|
||||
const apiData = await fetchApiData();
|
||||
const baseRef = context.payload.pull_request.base.ref;
|
||||
|
||||
// Early exit for release and beta branches only
|
||||
if (baseRef === 'release' || baseRef === 'beta') {
|
||||
const branchLabels = await detectMergeBranch();
|
||||
const finalLabels = Array.from(branchLabels);
|
||||
|
||||
console.log('Computed labels (merge branch only):', finalLabels.join(', '));
|
||||
|
||||
// Apply labels
|
||||
if (finalLabels.length > 0) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number,
|
||||
labels: finalLabels
|
||||
});
|
||||
}
|
||||
|
||||
// Remove old managed labels
|
||||
const labelsToRemove = managedLabels.filter(label => !finalLabels.includes(label));
|
||||
for (const label of labelsToRemove) {
|
||||
try {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number,
|
||||
name: label
|
||||
});
|
||||
} catch (error) {
|
||||
console.log(`Failed to remove label ${label}:`, error.message);
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Run all strategies
|
||||
const [
|
||||
branchLabels,
|
||||
componentLabels,
|
||||
newComponentLabels,
|
||||
newPlatformLabels,
|
||||
coreLabels,
|
||||
sizeLabels,
|
||||
dashboardLabels,
|
||||
actionsLabels,
|
||||
codeOwnerLabels,
|
||||
testLabels,
|
||||
checkboxLabels
|
||||
] = await Promise.all([
|
||||
detectMergeBranch(),
|
||||
detectComponentPlatforms(apiData),
|
||||
detectNewComponents(),
|
||||
detectNewPlatforms(apiData),
|
||||
detectCoreChanges(),
|
||||
detectPRSize(),
|
||||
detectDashboardChanges(),
|
||||
detectGitHubActionsChanges(),
|
||||
detectCodeOwner(),
|
||||
detectTests(),
|
||||
detectPRTemplateCheckboxes()
|
||||
]);
|
||||
|
||||
// Combine all labels
|
||||
const allLabels = new Set([
|
||||
...branchLabels,
|
||||
...componentLabels,
|
||||
...newComponentLabels,
|
||||
...newPlatformLabels,
|
||||
...coreLabels,
|
||||
...sizeLabels,
|
||||
...dashboardLabels,
|
||||
...actionsLabels,
|
||||
...codeOwnerLabels,
|
||||
...testLabels,
|
||||
...checkboxLabels
|
||||
]);
|
||||
|
||||
// Detect requirements based on all other labels
|
||||
const requirementLabels = await detectRequirements(allLabels);
|
||||
for (const label of requirementLabels) {
|
||||
allLabels.add(label);
|
||||
}
|
||||
|
||||
let finalLabels = Array.from(allLabels);
|
||||
|
||||
// For mega-PRs, exclude component labels if there are too many
|
||||
if (isMegaPR) {
|
||||
const componentLabels = finalLabels.filter(label => label.startsWith('component: '));
|
||||
if (componentLabels.length > COMPONENT_LABEL_THRESHOLD) {
|
||||
finalLabels = finalLabels.filter(label => !label.startsWith('component: '));
|
||||
console.log(`Mega-PR detected - excluding ${componentLabels.length} component labels (threshold: ${COMPONENT_LABEL_THRESHOLD})`);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle too many labels (only for non-mega PRs)
|
||||
const tooManyLabels = finalLabels.length > MAX_LABELS;
|
||||
const originalLabelCount = finalLabels.length;
|
||||
|
||||
if (tooManyLabels && !isMegaPR && !finalLabels.includes('too-big')) {
|
||||
finalLabels = ['too-big'];
|
||||
}
|
||||
|
||||
console.log('Computed labels:', finalLabels.join(', '));
|
||||
|
||||
// Handle reviews
|
||||
await handleReviews(finalLabels, originalLabelCount);
|
||||
|
||||
// Apply labels
|
||||
if (finalLabels.length > 0) {
|
||||
console.log(`Adding labels: ${finalLabels.join(', ')}`);
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number,
|
||||
labels: finalLabels
|
||||
});
|
||||
}
|
||||
|
||||
// Remove old managed labels
|
||||
const labelsToRemove = managedLabels.filter(label => !finalLabels.includes(label));
|
||||
for (const label of labelsToRemove) {
|
||||
console.log(`Removing label: ${label}`);
|
||||
try {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: pr_number,
|
||||
name: label
|
||||
});
|
||||
} catch (error) {
|
||||
console.log(`Failed to remove label ${label}:`, error.message);
|
||||
}
|
||||
}
|
||||
const script = require('./.github/scripts/auto-label-pr/index.js');
|
||||
await script({ github, context });
|
||||
|
||||
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@@ -102,12 +102,12 @@ jobs:
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Log in to docker hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Log in to the GitHub container registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
@@ -182,13 +182,13 @@ jobs:
|
||||
|
||||
- name: Log in to docker hub
|
||||
if: matrix.registry == 'dockerhub'
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USER }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Log in to the GitHub container registry
|
||||
if: matrix.registry == 'ghcr'
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
|
||||
import esphome.codegen as cg
|
||||
from esphome.components import sensor, voltage_sampler
|
||||
from esphome.components.esp32 import get_esp32_variant
|
||||
from esphome.components.esp32 import get_esp32_variant, include_idf_component
|
||||
from esphome.components.nrf52.const import AIN_TO_GPIO, EXTRA_ADC
|
||||
from esphome.components.zephyr import (
|
||||
zephyr_add_overlay,
|
||||
@@ -118,6 +118,9 @@ async def to_code(config):
|
||||
cg.add(var.set_sampling_mode(config[CONF_SAMPLING_MODE]))
|
||||
|
||||
if CORE.is_esp32:
|
||||
# Re-enable ESP-IDF's ADC driver (excluded by default to save compile time)
|
||||
include_idf_component("esp_adc")
|
||||
|
||||
if attenuation := config.get(CONF_ATTENUATION):
|
||||
if attenuation == "auto":
|
||||
cg.add(var.set_autorange(cg.global_ns.true))
|
||||
|
||||
@@ -53,6 +53,7 @@ from .const import ( # noqa
|
||||
KEY_BOARD,
|
||||
KEY_COMPONENTS,
|
||||
KEY_ESP32,
|
||||
KEY_EXCLUDE_COMPONENTS,
|
||||
KEY_EXTRA_BUILD_FILES,
|
||||
KEY_FLASH_SIZE,
|
||||
KEY_FULL_CERT_BUNDLE,
|
||||
@@ -114,6 +115,41 @@ COMPILER_OPTIMIZATIONS = {
|
||||
"SIZE": "CONFIG_COMPILER_OPTIMIZATION_SIZE",
|
||||
}
|
||||
|
||||
# ESP-IDF components that ESPHome never uses.
|
||||
# Excluding these reduces compile time - the linker would discard them anyway.
|
||||
#
|
||||
# NOTE: It is fine to remove components from this list when adding new features,
|
||||
# but when you do, make sure the component is still excluded via exclude_idf_component()
|
||||
# when the new feature is not enabled, to keep compile times short for users who
|
||||
# don't use that feature.
|
||||
#
|
||||
# Cannot be excluded (dependencies of required components):
|
||||
# - "console": espressif/mdns unconditionally depends on it
|
||||
# - "sdmmc": driver -> esp_driver_sdmmc -> sdmmc dependency chain
|
||||
NEVER_USED_IDF_COMPONENTS = (
|
||||
"cmock", # Unit testing mock framework - ESPHome doesn't use IDF's testing
|
||||
"esp_adc", # ADC driver - only needed by adc component
|
||||
"esp_driver_i2s", # I2S driver - only needed by i2s_audio component
|
||||
"esp_driver_rmt", # RMT driver - only needed by remote_transmitter/receiver, neopixelbus
|
||||
"esp_driver_touch_sens", # Touch sensor driver - only needed by esp32_touch
|
||||
"esp_eth", # Ethernet driver - only needed by ethernet component
|
||||
"esp_hid", # HID host/device support - ESPHome doesn't implement HID functionality
|
||||
"esp_http_client", # HTTP client - only needed by http_request component
|
||||
"esp_https_ota", # ESP-IDF HTTPS OTA - ESPHome has its own OTA implementation
|
||||
"esp_https_server", # HTTPS server - ESPHome has its own web server
|
||||
"esp_lcd", # LCD controller drivers - ESPHome has its own display components
|
||||
"esp_local_ctrl", # Local control over HTTPS/BLE - ESPHome has native API
|
||||
"espcoredump", # Core dump support - ESPHome has its own debug component
|
||||
"fatfs", # FAT filesystem - ESPHome doesn't use filesystem storage
|
||||
"mqtt", # ESP-IDF MQTT library - ESPHome has its own MQTT implementation
|
||||
"perfmon", # Xtensa performance monitor - ESPHome has its own debug component
|
||||
"protocomm", # Protocol communication for provisioning - unused by ESPHome
|
||||
"spiffs", # SPIFFS filesystem - ESPHome doesn't use filesystem storage (IDF only)
|
||||
"unity", # Unit testing framework - ESPHome doesn't use IDF's testing
|
||||
"wear_levelling", # Flash wear levelling for fatfs - unused since fatfs unused
|
||||
"wifi_provisioning", # WiFi provisioning - ESPHome uses its own improv implementation
|
||||
)
|
||||
|
||||
# ESP32 (original) chip revision options
|
||||
# Setting minimum revision to 3.0 or higher:
|
||||
# - Reduces flash size by excluding workaround code for older chip bugs
|
||||
@@ -203,6 +239,9 @@ def set_core_data(config):
|
||||
)
|
||||
CORE.data[KEY_ESP32][KEY_SDKCONFIG_OPTIONS] = {}
|
||||
CORE.data[KEY_ESP32][KEY_COMPONENTS] = {}
|
||||
# Initialize with default exclusions - components can call include_idf_component()
|
||||
# to re-enable any they need
|
||||
CORE.data[KEY_ESP32][KEY_EXCLUDE_COMPONENTS] = set(NEVER_USED_IDF_COMPONENTS)
|
||||
CORE.data[KEY_CORE][KEY_FRAMEWORK_VERSION] = cv.Version.parse(
|
||||
config[CONF_FRAMEWORK][CONF_VERSION]
|
||||
)
|
||||
@@ -328,6 +367,28 @@ def add_idf_component(
|
||||
}
|
||||
|
||||
|
||||
def exclude_idf_component(name: str) -> None:
|
||||
"""Exclude an ESP-IDF component from the build.
|
||||
|
||||
This reduces compile time by skipping components that are not needed.
|
||||
The component will be passed to ESP-IDF's EXCLUDE_COMPONENTS cmake variable.
|
||||
|
||||
Note: Components that are dependencies of other required components
|
||||
cannot be excluded - ESP-IDF will still build them.
|
||||
"""
|
||||
CORE.data[KEY_ESP32][KEY_EXCLUDE_COMPONENTS].add(name)
|
||||
|
||||
|
||||
def include_idf_component(name: str) -> None:
|
||||
"""Remove an ESP-IDF component from the exclusion list.
|
||||
|
||||
Call this from components that need an ESP-IDF component that is
|
||||
excluded by default in NEVER_USED_IDF_COMPONENTS. This ensures the
|
||||
component will be built when needed.
|
||||
"""
|
||||
CORE.data[KEY_ESP32][KEY_EXCLUDE_COMPONENTS].discard(name)
|
||||
|
||||
|
||||
def add_extra_script(stage: str, filename: str, path: Path):
|
||||
"""Add an extra script to the project."""
|
||||
key = f"{stage}:{filename}"
|
||||
@@ -982,6 +1043,19 @@ def _configure_lwip_max_sockets(conf: dict) -> None:
|
||||
add_idf_sdkconfig_option("CONFIG_LWIP_MAX_SOCKETS", max_sockets)
|
||||
|
||||
|
||||
@coroutine_with_priority(CoroPriority.FINAL)
|
||||
async def _write_exclude_components() -> None:
|
||||
"""Write EXCLUDE_COMPONENTS cmake arg after all components have registered exclusions."""
|
||||
if KEY_ESP32 not in CORE.data:
|
||||
return
|
||||
excluded = CORE.data[KEY_ESP32].get(KEY_EXCLUDE_COMPONENTS)
|
||||
if excluded:
|
||||
exclude_list = ";".join(sorted(excluded))
|
||||
cg.add_platformio_option(
|
||||
"board_build.cmake_extra_args", f"-DEXCLUDE_COMPONENTS={exclude_list}"
|
||||
)
|
||||
|
||||
|
||||
@coroutine_with_priority(CoroPriority.FINAL)
|
||||
async def _add_yaml_idf_components(components: list[ConfigType]):
|
||||
"""Add IDF components from YAML config with final priority to override code-added components."""
|
||||
@@ -1324,6 +1398,11 @@ async def to_code(config):
|
||||
if conf[CONF_COMPONENTS]:
|
||||
CORE.add_job(_add_yaml_idf_components, conf[CONF_COMPONENTS])
|
||||
|
||||
# Write EXCLUDE_COMPONENTS at FINAL priority after all components have had
|
||||
# a chance to call include_idf_component() to re-enable components they need.
|
||||
# Default exclusions are added in set_core_data() during config validation.
|
||||
CORE.add_job(_write_exclude_components)
|
||||
|
||||
|
||||
APP_PARTITION_SIZES = {
|
||||
"2MB": 0x0C0000, # 768 KB
|
||||
|
||||
@@ -175,6 +175,32 @@ ESP32_BOARD_PINS = {
|
||||
"LED": 13,
|
||||
"LED_BUILTIN": 13,
|
||||
},
|
||||
"adafruit_feather_esp32s3_reversetft": {
|
||||
"BUTTON": 0,
|
||||
"A0": 18,
|
||||
"A1": 17,
|
||||
"A2": 16,
|
||||
"A3": 15,
|
||||
"A4": 14,
|
||||
"A5": 8,
|
||||
"SCK": 36,
|
||||
"MOSI": 35,
|
||||
"MISO": 37,
|
||||
"RX": 38,
|
||||
"TX": 39,
|
||||
"SCL": 4,
|
||||
"SDA": 3,
|
||||
"NEOPIXEL": 33,
|
||||
"PIN_NEOPIXEL": 33,
|
||||
"NEOPIXEL_POWER": 21,
|
||||
"TFT_I2C_POWER": 7,
|
||||
"TFT_CS": 42,
|
||||
"TFT_DC": 40,
|
||||
"TFT_RESET": 41,
|
||||
"TFT_BACKLIGHT": 45,
|
||||
"LED": 13,
|
||||
"LED_BUILTIN": 13,
|
||||
},
|
||||
"adafruit_feather_esp32s3_tft": {
|
||||
"BUTTON": 0,
|
||||
"A0": 18,
|
||||
|
||||
@@ -6,6 +6,7 @@ KEY_FLASH_SIZE = "flash_size"
|
||||
KEY_VARIANT = "variant"
|
||||
KEY_SDKCONFIG_OPTIONS = "sdkconfig_options"
|
||||
KEY_COMPONENTS = "components"
|
||||
KEY_EXCLUDE_COMPONENTS = "exclude_components"
|
||||
KEY_REPO = "repo"
|
||||
KEY_REF = "ref"
|
||||
KEY_REFRESH = "refresh"
|
||||
|
||||
@@ -5,6 +5,7 @@ from esphome import pins
|
||||
import esphome.codegen as cg
|
||||
from esphome.components import esp32, light
|
||||
from esphome.components.const import CONF_USE_PSRAM
|
||||
from esphome.components.esp32 import include_idf_component
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import (
|
||||
CONF_CHIPSET,
|
||||
@@ -129,6 +130,9 @@ CONFIG_SCHEMA = cv.All(
|
||||
|
||||
|
||||
async def to_code(config):
|
||||
# Re-enable ESP-IDF's RMT driver (excluded by default to save compile time)
|
||||
include_idf_component("esp_driver_rmt")
|
||||
|
||||
var = cg.new_Pvariable(config[CONF_OUTPUT_ID])
|
||||
await light.register_light(var, config)
|
||||
await cg.register_component(var, config)
|
||||
|
||||
@@ -6,6 +6,7 @@ from esphome.components.esp32 import (
|
||||
VARIANT_ESP32S3,
|
||||
get_esp32_variant,
|
||||
gpio,
|
||||
include_idf_component,
|
||||
)
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import (
|
||||
@@ -266,6 +267,9 @@ CONFIG_SCHEMA = cv.All(
|
||||
|
||||
|
||||
async def to_code(config):
|
||||
# Re-enable ESP-IDF's touch sensor driver (excluded by default to save compile time)
|
||||
include_idf_component("esp_driver_touch_sens")
|
||||
|
||||
touch = cg.new_Pvariable(config[CONF_ID])
|
||||
await cg.register_component(touch, config)
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ from esphome.components.esp32 import (
|
||||
add_idf_component,
|
||||
add_idf_sdkconfig_option,
|
||||
get_esp32_variant,
|
||||
include_idf_component,
|
||||
)
|
||||
from esphome.components.network import ip_address_literal
|
||||
from esphome.components.spi import CONF_INTERFACE_INDEX, get_spi_interface
|
||||
@@ -419,6 +420,9 @@ async def to_code(config):
|
||||
# Also disable WiFi/BT coexistence since WiFi is disabled
|
||||
add_idf_sdkconfig_option("CONFIG_SW_COEXIST_ENABLE", False)
|
||||
|
||||
# Re-enable ESP-IDF's Ethernet driver (excluded by default to save compile time)
|
||||
include_idf_component("esp_eth")
|
||||
|
||||
if config[CONF_TYPE] == "LAN8670":
|
||||
# Add LAN867x 10BASE-T1S PHY support component
|
||||
add_idf_component(name="espressif/lan867x", ref="2.0.0")
|
||||
|
||||
@@ -9,30 +9,56 @@ from esphome.const import (
|
||||
CONF_VALUE,
|
||||
)
|
||||
from esphome.core import CoroPriority, coroutine_with_priority
|
||||
from esphome.types import ConfigType
|
||||
|
||||
CODEOWNERS = ["@esphome/core"]
|
||||
globals_ns = cg.esphome_ns.namespace("globals")
|
||||
GlobalsComponent = globals_ns.class_("GlobalsComponent", cg.Component)
|
||||
RestoringGlobalsComponent = globals_ns.class_("RestoringGlobalsComponent", cg.Component)
|
||||
RestoringGlobalsComponent = globals_ns.class_(
|
||||
"RestoringGlobalsComponent", cg.PollingComponent
|
||||
)
|
||||
RestoringGlobalStringComponent = globals_ns.class_(
|
||||
"RestoringGlobalStringComponent", cg.Component
|
||||
"RestoringGlobalStringComponent", cg.PollingComponent
|
||||
)
|
||||
GlobalVarSetAction = globals_ns.class_("GlobalVarSetAction", automation.Action)
|
||||
|
||||
CONF_MAX_RESTORE_DATA_LENGTH = "max_restore_data_length"
|
||||
|
||||
# Base schema fields shared by both variants
|
||||
_BASE_SCHEMA = {
|
||||
cv.Required(CONF_ID): cv.declare_id(GlobalsComponent),
|
||||
cv.Required(CONF_TYPE): cv.string_strict,
|
||||
cv.Optional(CONF_INITIAL_VALUE): cv.string_strict,
|
||||
cv.Optional(CONF_MAX_RESTORE_DATA_LENGTH): cv.int_range(0, 254),
|
||||
}
|
||||
|
||||
MULTI_CONF = True
|
||||
CONFIG_SCHEMA = cv.Schema(
|
||||
# Non-restoring globals: regular Component (no polling needed)
|
||||
_NON_RESTORING_SCHEMA = cv.Schema(
|
||||
{
|
||||
cv.Required(CONF_ID): cv.declare_id(GlobalsComponent),
|
||||
cv.Required(CONF_TYPE): cv.string_strict,
|
||||
cv.Optional(CONF_INITIAL_VALUE): cv.string_strict,
|
||||
**_BASE_SCHEMA,
|
||||
cv.Optional(CONF_RESTORE_VALUE, default=False): cv.boolean,
|
||||
cv.Optional(CONF_MAX_RESTORE_DATA_LENGTH): cv.int_range(0, 254),
|
||||
}
|
||||
).extend(cv.COMPONENT_SCHEMA)
|
||||
|
||||
# Restoring globals: PollingComponent with configurable update_interval
|
||||
_RESTORING_SCHEMA = cv.Schema(
|
||||
{
|
||||
**_BASE_SCHEMA,
|
||||
cv.Optional(CONF_RESTORE_VALUE, default=True): cv.boolean,
|
||||
}
|
||||
).extend(cv.polling_component_schema("1s"))
|
||||
|
||||
|
||||
def _globals_schema(config: ConfigType) -> ConfigType:
|
||||
"""Select schema based on restore_value setting."""
|
||||
if config.get(CONF_RESTORE_VALUE, False):
|
||||
return _RESTORING_SCHEMA(config)
|
||||
return _NON_RESTORING_SCHEMA(config)
|
||||
|
||||
|
||||
MULTI_CONF = True
|
||||
CONFIG_SCHEMA = _globals_schema
|
||||
|
||||
|
||||
# Run with low priority so that namespaces are registered first
|
||||
@coroutine_with_priority(CoroPriority.LATE)
|
||||
|
||||
@@ -5,8 +5,7 @@
|
||||
#include "esphome/core/helpers.h"
|
||||
#include <cstring>
|
||||
|
||||
namespace esphome {
|
||||
namespace globals {
|
||||
namespace esphome::globals {
|
||||
|
||||
template<typename T> class GlobalsComponent : public Component {
|
||||
public:
|
||||
@@ -24,13 +23,14 @@ template<typename T> class GlobalsComponent : public Component {
|
||||
T value_{};
|
||||
};
|
||||
|
||||
template<typename T> class RestoringGlobalsComponent : public Component {
|
||||
template<typename T> class RestoringGlobalsComponent : public PollingComponent {
|
||||
public:
|
||||
using value_type = T;
|
||||
explicit RestoringGlobalsComponent() = default;
|
||||
explicit RestoringGlobalsComponent(T initial_value) : value_(initial_value) {}
|
||||
explicit RestoringGlobalsComponent() : PollingComponent(1000) {}
|
||||
explicit RestoringGlobalsComponent(T initial_value) : PollingComponent(1000), value_(initial_value) {}
|
||||
explicit RestoringGlobalsComponent(
|
||||
std::array<typename std::remove_extent<T>::type, std::extent<T>::value> initial_value) {
|
||||
std::array<typename std::remove_extent<T>::type, std::extent<T>::value> initial_value)
|
||||
: PollingComponent(1000) {
|
||||
memcpy(this->value_, initial_value.data(), sizeof(T));
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ template<typename T> class RestoringGlobalsComponent : public Component {
|
||||
|
||||
float get_setup_priority() const override { return setup_priority::HARDWARE; }
|
||||
|
||||
void loop() override { store_value_(); }
|
||||
void update() override { store_value_(); }
|
||||
|
||||
void on_shutdown() override { store_value_(); }
|
||||
|
||||
@@ -66,13 +66,14 @@ template<typename T> class RestoringGlobalsComponent : public Component {
|
||||
};
|
||||
|
||||
// Use with string or subclasses of strings
|
||||
template<typename T, uint8_t SZ> class RestoringGlobalStringComponent : public Component {
|
||||
template<typename T, uint8_t SZ> class RestoringGlobalStringComponent : public PollingComponent {
|
||||
public:
|
||||
using value_type = T;
|
||||
explicit RestoringGlobalStringComponent() = default;
|
||||
explicit RestoringGlobalStringComponent(T initial_value) { this->value_ = initial_value; }
|
||||
explicit RestoringGlobalStringComponent() : PollingComponent(1000) {}
|
||||
explicit RestoringGlobalStringComponent(T initial_value) : PollingComponent(1000) { this->value_ = initial_value; }
|
||||
explicit RestoringGlobalStringComponent(
|
||||
std::array<typename std::remove_extent<T>::type, std::extent<T>::value> initial_value) {
|
||||
std::array<typename std::remove_extent<T>::type, std::extent<T>::value> initial_value)
|
||||
: PollingComponent(1000) {
|
||||
memcpy(this->value_, initial_value.data(), sizeof(T));
|
||||
}
|
||||
|
||||
@@ -90,7 +91,7 @@ template<typename T, uint8_t SZ> class RestoringGlobalStringComponent : public C
|
||||
|
||||
float get_setup_priority() const override { return setup_priority::HARDWARE; }
|
||||
|
||||
void loop() override { store_value_(); }
|
||||
void update() override { store_value_(); }
|
||||
|
||||
void on_shutdown() override { store_value_(); }
|
||||
|
||||
@@ -144,5 +145,4 @@ template<typename T> T &id(GlobalsComponent<T> *value) { return value->value();
|
||||
template<typename T> T &id(RestoringGlobalsComponent<T> *value) { return value->value(); }
|
||||
template<typename T, uint8_t SZ> T &id(RestoringGlobalStringComponent<T, SZ> *value) { return value->value(); }
|
||||
|
||||
} // namespace globals
|
||||
} // namespace esphome
|
||||
} // namespace esphome::globals
|
||||
|
||||
@@ -155,6 +155,9 @@ async def to_code(config):
|
||||
cg.add(var.set_watchdog_timeout(timeout_ms))
|
||||
|
||||
if CORE.is_esp32:
|
||||
# Re-enable ESP-IDF's HTTP client (excluded by default to save compile time)
|
||||
esp32.include_idf_component("esp_http_client")
|
||||
|
||||
cg.add(var.set_buffer_size_rx(config[CONF_BUFFER_SIZE_RX]))
|
||||
cg.add(var.set_buffer_size_tx(config[CONF_BUFFER_SIZE_TX]))
|
||||
cg.add(var.set_verify_ssl(config[CONF_VERIFY_SSL]))
|
||||
|
||||
@@ -131,6 +131,10 @@ std::shared_ptr<HttpContainer> HttpRequestArduino::perform(const std::string &ur
|
||||
}
|
||||
}
|
||||
|
||||
// HTTPClient::getSize() returns -1 for chunked transfer encoding (no Content-Length).
|
||||
// When cast to size_t, -1 becomes SIZE_MAX (4294967295 on 32-bit).
|
||||
// The read() method handles this: bytes_read_ can never reach SIZE_MAX, so the
|
||||
// early return check (bytes_read_ >= content_length) will never trigger.
|
||||
int content_length = container->client_.getSize();
|
||||
ESP_LOGD(TAG, "Content-Length: %d", content_length);
|
||||
container->content_length = (size_t) content_length;
|
||||
@@ -167,17 +171,23 @@ int HttpContainerArduino::read(uint8_t *buf, size_t max_len) {
|
||||
}
|
||||
|
||||
int available_data = stream_ptr->available();
|
||||
int bufsize = std::min(max_len, std::min(this->content_length - this->bytes_read_, (size_t) available_data));
|
||||
// For chunked transfer encoding, HTTPClient::getSize() returns -1, which becomes SIZE_MAX when
|
||||
// cast to size_t. SIZE_MAX - bytes_read_ is still huge, so it won't limit the read.
|
||||
size_t remaining = (this->content_length > 0) ? (this->content_length - this->bytes_read_) : max_len;
|
||||
int bufsize = std::min(max_len, std::min(remaining, (size_t) available_data));
|
||||
|
||||
if (bufsize == 0) {
|
||||
this->duration_ms += (millis() - start);
|
||||
// Check if we've read all expected content
|
||||
if (this->bytes_read_ >= this->content_length) {
|
||||
// Check if we've read all expected content (only valid when content_length is known and not SIZE_MAX)
|
||||
// For chunked encoding (content_length == SIZE_MAX), we can't use this check
|
||||
if (this->content_length > 0 && this->bytes_read_ >= this->content_length) {
|
||||
return 0; // All content read successfully
|
||||
}
|
||||
// No data available - check if connection is still open
|
||||
// For chunked encoding, !connected() after reading means EOF (all chunks received)
|
||||
// For known content_length with bytes_read_ < content_length, it means connection dropped
|
||||
if (!stream_ptr->connected()) {
|
||||
return HTTP_ERROR_CONNECTION_CLOSED; // Connection closed prematurely
|
||||
return HTTP_ERROR_CONNECTION_CLOSED; // Connection closed or EOF for chunked
|
||||
}
|
||||
return 0; // No data yet, caller should retry
|
||||
}
|
||||
|
||||
@@ -157,6 +157,8 @@ std::shared_ptr<HttpContainer> HttpRequestIDF::perform(const std::string &url, c
|
||||
}
|
||||
|
||||
container->feed_wdt();
|
||||
// esp_http_client_fetch_headers() returns 0 for chunked transfer encoding (no Content-Length header).
|
||||
// The read() method handles content_length == 0 specially to support chunked responses.
|
||||
container->content_length = esp_http_client_fetch_headers(client);
|
||||
container->feed_wdt();
|
||||
container->status_code = esp_http_client_get_status_code(client);
|
||||
@@ -225,14 +227,22 @@ std::shared_ptr<HttpContainer> HttpRequestIDF::perform(const std::string &url, c
|
||||
//
|
||||
// We normalize to HttpContainer::read() contract:
|
||||
// > 0: bytes read
|
||||
// 0: no data yet / all content read (caller should check bytes_read vs content_length)
|
||||
// 0: all content read (only returned when content_length is known and fully read)
|
||||
// < 0: error/connection closed
|
||||
//
|
||||
// Note on chunked transfer encoding:
|
||||
// esp_http_client_fetch_headers() returns 0 for chunked responses (no Content-Length header).
|
||||
// We handle this by skipping the content_length check when content_length is 0,
|
||||
// allowing esp_http_client_read() to handle chunked decoding internally and signal EOF
|
||||
// by returning 0.
|
||||
int HttpContainerIDF::read(uint8_t *buf, size_t max_len) {
|
||||
const uint32_t start = millis();
|
||||
watchdog::WatchdogManager wdm(this->parent_->get_watchdog_timeout());
|
||||
|
||||
// Check if we've already read all expected content
|
||||
if (this->bytes_read_ >= this->content_length) {
|
||||
// Skip this check when content_length is 0 (chunked transfer encoding or unknown length)
|
||||
// For chunked responses, esp_http_client_read() will return 0 when all data is received
|
||||
if (this->content_length > 0 && this->bytes_read_ >= this->content_length) {
|
||||
return 0; // All content read successfully
|
||||
}
|
||||
|
||||
@@ -247,7 +257,13 @@ int HttpContainerIDF::read(uint8_t *buf, size_t max_len) {
|
||||
return read_len_or_error;
|
||||
}
|
||||
|
||||
// Connection closed by server before all content received
|
||||
// esp_http_client_read() returns 0 in two cases:
|
||||
// 1. Known content_length: connection closed before all data received (error)
|
||||
// 2. Chunked encoding (content_length == 0): end of stream reached (EOF)
|
||||
// For case 1, returning HTTP_ERROR_CONNECTION_CLOSED is correct.
|
||||
// For case 2, 0 indicates that all chunked data has already been delivered
|
||||
// in previous successful read() calls, so treating this as a closed
|
||||
// connection does not cause any loss of response data.
|
||||
if (read_len_or_error == 0) {
|
||||
return HTTP_ERROR_CONNECTION_CLOSED;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
from esphome import pins
|
||||
import esphome.codegen as cg
|
||||
from esphome.components.esp32 import (
|
||||
add_idf_sdkconfig_option,
|
||||
get_esp32_variant,
|
||||
include_idf_component,
|
||||
)
|
||||
from esphome.components.esp32.const import (
|
||||
VARIANT_ESP32,
|
||||
VARIANT_ESP32C3,
|
||||
VARIANT_ESP32C5,
|
||||
@@ -10,8 +15,6 @@ from esphome.components.esp32 import (
|
||||
VARIANT_ESP32P4,
|
||||
VARIANT_ESP32S2,
|
||||
VARIANT_ESP32S3,
|
||||
add_idf_sdkconfig_option,
|
||||
get_esp32_variant,
|
||||
)
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import CONF_BITS_PER_SAMPLE, CONF_CHANNEL, CONF_ID, CONF_SAMPLE_RATE
|
||||
@@ -272,6 +275,10 @@ FINAL_VALIDATE_SCHEMA = _final_validate
|
||||
async def to_code(config):
|
||||
var = cg.new_Pvariable(config[CONF_ID])
|
||||
await cg.register_component(var, config)
|
||||
|
||||
# Re-enable ESP-IDF's I2S driver (excluded by default to save compile time)
|
||||
include_idf_component("esp_driver_i2s")
|
||||
|
||||
if use_legacy():
|
||||
cg.add_define("USE_I2S_LEGACY")
|
||||
|
||||
|
||||
@@ -451,7 +451,7 @@ void LD2450Component::handle_periodic_data_() {
|
||||
int16_t ty = 0;
|
||||
int16_t td = 0;
|
||||
int16_t ts = 0;
|
||||
int16_t angle = 0;
|
||||
float angle = 0;
|
||||
uint8_t index = 0;
|
||||
Direction direction{DIRECTION_UNDEFINED};
|
||||
bool is_moving = false;
|
||||
|
||||
@@ -143,6 +143,7 @@ CONFIG_SCHEMA = CONFIG_SCHEMA.extend(
|
||||
],
|
||||
icon=ICON_FORMAT_TEXT_ROTATION_ANGLE_UP,
|
||||
unit_of_measurement=UNIT_DEGREES,
|
||||
accuracy_decimals=1,
|
||||
),
|
||||
cv.Optional(CONF_DISTANCE): sensor.sensor_schema(
|
||||
device_class=DEVICE_CLASS_DISTANCE,
|
||||
|
||||
@@ -4,7 +4,7 @@ from esphome import automation
|
||||
from esphome.automation import Condition
|
||||
import esphome.codegen as cg
|
||||
from esphome.components import logger, socket
|
||||
from esphome.components.esp32 import add_idf_sdkconfig_option
|
||||
from esphome.components.esp32 import add_idf_sdkconfig_option, include_idf_component
|
||||
from esphome.config_helpers import filter_source_files_from_platform
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import (
|
||||
@@ -360,6 +360,8 @@ async def to_code(config):
|
||||
# This enables low-latency MQTT event processing instead of waiting for select() timeout
|
||||
if CORE.is_esp32:
|
||||
socket.require_wake_loop_threadsafe()
|
||||
# Re-enable ESP-IDF's mqtt component (excluded by default to save compile time)
|
||||
include_idf_component("mqtt")
|
||||
|
||||
cg.add_define("USE_MQTT")
|
||||
cg.add_global(mqtt_ns.using)
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
from esphome import pins
|
||||
import esphome.codegen as cg
|
||||
from esphome.components import light
|
||||
from esphome.components.esp32 import VARIANT_ESP32C3, VARIANT_ESP32S3, get_esp32_variant
|
||||
from esphome.components.esp32 import (
|
||||
VARIANT_ESP32C3,
|
||||
VARIANT_ESP32S3,
|
||||
get_esp32_variant,
|
||||
include_idf_component,
|
||||
)
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import (
|
||||
CONF_CHANNEL,
|
||||
@@ -205,6 +210,10 @@ async def to_code(config):
|
||||
has_white = "W" in config[CONF_TYPE]
|
||||
method = config[CONF_METHOD]
|
||||
|
||||
# Re-enable ESP-IDF's RMT driver if using RMT method (excluded by default)
|
||||
if CORE.is_esp32 and method[CONF_TYPE] == METHOD_ESP32_RMT:
|
||||
include_idf_component("esp_driver_rmt")
|
||||
|
||||
method_template = METHODS[method[CONF_TYPE]].to_code(
|
||||
method, config[CONF_VARIANT], config[CONF_INVERT]
|
||||
)
|
||||
|
||||
@@ -170,6 +170,9 @@ CONFIG_SCHEMA = remote_base.validate_triggers(
|
||||
async def to_code(config):
|
||||
pin = await cg.gpio_pin_expression(config[CONF_PIN])
|
||||
if CORE.is_esp32:
|
||||
# Re-enable ESP-IDF's RMT driver (excluded by default to save compile time)
|
||||
esp32.include_idf_component("esp_driver_rmt")
|
||||
|
||||
var = cg.new_Pvariable(config[CONF_ID], pin)
|
||||
cg.add(var.set_rmt_symbols(config[CONF_RMT_SYMBOLS]))
|
||||
cg.add(var.set_receive_symbols(config[CONF_RECEIVE_SYMBOLS]))
|
||||
|
||||
@@ -112,6 +112,9 @@ async def digital_write_action_to_code(config, action_id, template_arg, args):
|
||||
async def to_code(config):
|
||||
pin = await cg.gpio_pin_expression(config[CONF_PIN])
|
||||
if CORE.is_esp32:
|
||||
# Re-enable ESP-IDF's RMT driver (excluded by default to save compile time)
|
||||
esp32.include_idf_component("esp_driver_rmt")
|
||||
|
||||
var = cg.new_Pvariable(config[CONF_ID], pin)
|
||||
cg.add(var.set_rmt_symbols(config[CONF_RMT_SYMBOLS]))
|
||||
cg.add(var.set_non_blocking(config[CONF_NON_BLOCKING]))
|
||||
|
||||
@@ -27,46 +27,61 @@ void RuntimeStatsCollector::record_component_time(Component *component, uint32_t
|
||||
}
|
||||
|
||||
void RuntimeStatsCollector::log_stats_() {
|
||||
// First pass: count active components
|
||||
size_t count = 0;
|
||||
for (const auto &it : this->component_stats_) {
|
||||
if (it.second.get_period_count() > 0) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
ESP_LOGI(TAG,
|
||||
"Component Runtime Statistics\n"
|
||||
" Period stats (last %" PRIu32 "ms):",
|
||||
this->log_interval_);
|
||||
" Period stats (last %" PRIu32 "ms): %zu active components",
|
||||
this->log_interval_, count);
|
||||
|
||||
// First collect stats we want to display
|
||||
std::vector<ComponentStatPair> stats_to_display;
|
||||
if (count == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Stack buffer sized to actual active count (up to 256 components), heap fallback for larger
|
||||
SmallBufferWithHeapFallback<256, Component *> buffer(count);
|
||||
Component **sorted = buffer.get();
|
||||
|
||||
// Second pass: fill buffer with active components
|
||||
size_t idx = 0;
|
||||
for (const auto &it : this->component_stats_) {
|
||||
Component *component = it.first;
|
||||
const ComponentRuntimeStats &stats = it.second;
|
||||
if (stats.get_period_count() > 0) {
|
||||
ComponentStatPair pair = {component, &stats};
|
||||
stats_to_display.push_back(pair);
|
||||
if (it.second.get_period_count() > 0) {
|
||||
sorted[idx++] = it.first;
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by period runtime (descending)
|
||||
std::sort(stats_to_display.begin(), stats_to_display.end(), std::greater<ComponentStatPair>());
|
||||
std::sort(sorted, sorted + count, [this](Component *a, Component *b) {
|
||||
return this->component_stats_[a].get_period_time_ms() > this->component_stats_[b].get_period_time_ms();
|
||||
});
|
||||
|
||||
// Log top components by period runtime
|
||||
for (const auto &it : stats_to_display) {
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
const auto &stats = this->component_stats_[sorted[i]];
|
||||
ESP_LOGI(TAG, " %s: count=%" PRIu32 ", avg=%.2fms, max=%" PRIu32 "ms, total=%" PRIu32 "ms",
|
||||
LOG_STR_ARG(it.component->get_component_log_str()), it.stats->get_period_count(),
|
||||
it.stats->get_period_avg_time_ms(), it.stats->get_period_max_time_ms(), it.stats->get_period_time_ms());
|
||||
LOG_STR_ARG(sorted[i]->get_component_log_str()), stats.get_period_count(), stats.get_period_avg_time_ms(),
|
||||
stats.get_period_max_time_ms(), stats.get_period_time_ms());
|
||||
}
|
||||
|
||||
// Log total stats since boot
|
||||
ESP_LOGI(TAG, " Total stats (since boot):");
|
||||
// Log total stats since boot (only for active components - idle ones haven't changed)
|
||||
ESP_LOGI(TAG, " Total stats (since boot): %zu active components", count);
|
||||
|
||||
// Re-sort by total runtime for all-time stats
|
||||
std::sort(stats_to_display.begin(), stats_to_display.end(),
|
||||
[](const ComponentStatPair &a, const ComponentStatPair &b) {
|
||||
return a.stats->get_total_time_ms() > b.stats->get_total_time_ms();
|
||||
});
|
||||
std::sort(sorted, sorted + count, [this](Component *a, Component *b) {
|
||||
return this->component_stats_[a].get_total_time_ms() > this->component_stats_[b].get_total_time_ms();
|
||||
});
|
||||
|
||||
for (const auto &it : stats_to_display) {
|
||||
for (size_t i = 0; i < count; i++) {
|
||||
const auto &stats = this->component_stats_[sorted[i]];
|
||||
ESP_LOGI(TAG, " %s: count=%" PRIu32 ", avg=%.2fms, max=%" PRIu32 "ms, total=%" PRIu32 "ms",
|
||||
LOG_STR_ARG(it.component->get_component_log_str()), it.stats->get_total_count(),
|
||||
it.stats->get_total_avg_time_ms(), it.stats->get_total_max_time_ms(), it.stats->get_total_time_ms());
|
||||
LOG_STR_ARG(sorted[i]->get_component_log_str()), stats.get_total_count(), stats.get_total_avg_time_ms(),
|
||||
stats.get_total_max_time_ms(), stats.get_total_time_ms());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
#ifdef USE_RUNTIME_STATS
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include "esphome/core/helpers.h"
|
||||
@@ -77,17 +76,6 @@ class ComponentRuntimeStats {
|
||||
uint32_t total_max_time_ms_;
|
||||
};
|
||||
|
||||
// For sorting components by run time
|
||||
struct ComponentStatPair {
|
||||
Component *component;
|
||||
const ComponentRuntimeStats *stats;
|
||||
|
||||
bool operator>(const ComponentStatPair &other) const {
|
||||
// Sort by period time as that's what we're displaying in the logs
|
||||
return stats->get_period_time_ms() > other.stats->get_period_time_ms();
|
||||
}
|
||||
};
|
||||
|
||||
class RuntimeStatsCollector {
|
||||
public:
|
||||
RuntimeStatsCollector();
|
||||
|
||||
@@ -27,6 +27,9 @@ void RealTimeClock::dump_config() {
|
||||
#ifdef USE_TIME_TIMEZONE
|
||||
ESP_LOGCONFIG(TAG, "Timezone: '%s'", this->timezone_.c_str());
|
||||
#endif
|
||||
auto time = this->now();
|
||||
ESP_LOGCONFIG(TAG, "Current time: %04d-%02d-%02d %02d:%02d:%02d", time.year, time.month, time.day_of_month, time.hour,
|
||||
time.minute, time.second);
|
||||
}
|
||||
|
||||
void RealTimeClock::synchronize_epoch_(uint32_t epoch) {
|
||||
|
||||
@@ -1 +1,6 @@
|
||||
CODEOWNERS = ["@clydebarrow"]
|
||||
|
||||
DEPRECATED_COMPONENT = """
|
||||
The 'waveshare_epaper' component is deprecated and no new models will be added to it.
|
||||
New model PRs should target the newer and more performant 'epaper_spi' component.
|
||||
"""
|
||||
|
||||
@@ -39,6 +39,10 @@
|
||||
#include "esphome/components/esp32_improv/esp32_improv_component.h"
|
||||
#endif
|
||||
|
||||
#ifdef USE_IMPROV_SERIAL
|
||||
#include "esphome/components/improv_serial/improv_serial_component.h"
|
||||
#endif
|
||||
|
||||
namespace esphome::wifi {
|
||||
|
||||
static const char *const TAG = "wifi";
|
||||
@@ -365,6 +369,75 @@ bool WiFiComponent::ssid_was_seen_in_scan_(const std::string &ssid) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool WiFiComponent::needs_full_scan_results_() const {
|
||||
// Components that require full scan results (for example, scan result listeners)
|
||||
// are expected to call request_wifi_scan_results(), which sets keep_scan_results_.
|
||||
if (this->keep_scan_results_) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef USE_CAPTIVE_PORTAL
|
||||
// Captive portal needs full results when active (showing network list to user)
|
||||
if (captive_portal::global_captive_portal != nullptr && captive_portal::global_captive_portal->is_active()) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef USE_IMPROV_SERIAL
|
||||
// Improv serial needs results during provisioning (before connected)
|
||||
if (improv_serial::global_improv_serial_component != nullptr && !this->is_connected()) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef USE_IMPROV
|
||||
// BLE improv also needs results during provisioning
|
||||
if (esp32_improv::global_improv_component != nullptr && esp32_improv::global_improv_component->is_active()) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool WiFiComponent::matches_configured_network_(const char *ssid, const uint8_t *bssid) const {
|
||||
// Hidden networks in scan results have empty SSIDs - skip them
|
||||
if (ssid[0] == '\0') {
|
||||
return false;
|
||||
}
|
||||
for (const auto &sta : this->sta_) {
|
||||
// Skip hidden network configs (they don't appear in normal scans)
|
||||
if (sta.get_hidden()) {
|
||||
continue;
|
||||
}
|
||||
// For BSSID-only configs (empty SSID), match by BSSID
|
||||
if (sta.get_ssid().empty()) {
|
||||
if (sta.has_bssid() && std::memcmp(sta.get_bssid().data(), bssid, 6) == 0) {
|
||||
return true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// Match by SSID
|
||||
if (sta.get_ssid() == ssid) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void WiFiComponent::log_discarded_scan_result_(const char *ssid, const uint8_t *bssid, int8_t rssi, uint8_t channel) {
|
||||
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_VERBOSE
|
||||
// Skip logging during roaming scans to avoid log buffer overflow
|
||||
// (roaming scans typically find many networks but only care about same-SSID APs)
|
||||
if (this->roaming_state_ == RoamingState::SCANNING) {
|
||||
return;
|
||||
}
|
||||
char bssid_s[MAC_ADDRESS_PRETTY_BUFFER_SIZE];
|
||||
format_mac_addr_upper(bssid, bssid_s);
|
||||
ESP_LOGV(TAG, "- " LOG_SECRET("'%s'") " " LOG_SECRET("(%s)") " %ddB Ch:%u", ssid, bssid_s, rssi, channel);
|
||||
#endif
|
||||
}
|
||||
|
||||
int8_t WiFiComponent::find_next_hidden_sta_(int8_t start_index) {
|
||||
// Find next SSID to try in RETRY_HIDDEN phase.
|
||||
//
|
||||
@@ -656,8 +729,12 @@ void WiFiComponent::loop() {
|
||||
ESP_LOGI(TAG, "Starting fallback AP");
|
||||
this->setup_ap_config_();
|
||||
#ifdef USE_CAPTIVE_PORTAL
|
||||
if (captive_portal::global_captive_portal != nullptr)
|
||||
if (captive_portal::global_captive_portal != nullptr) {
|
||||
// Reset so we force one full scan after captive portal starts
|
||||
// (previous scans were filtered because captive portal wasn't active yet)
|
||||
this->has_completed_scan_after_captive_portal_start_ = false;
|
||||
captive_portal::global_captive_portal->start();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -1195,7 +1272,7 @@ template<typename VectorType> static void insertion_sort_scan_results(VectorType
|
||||
// has overhead from UART transmission, so combining INFO+DEBUG into one line halves
|
||||
// the blocking time. Do NOT split this into separate ESP_LOGI/ESP_LOGD calls.
|
||||
__attribute__((noinline)) static void log_scan_result(const WiFiScanResult &res) {
|
||||
char bssid_s[18];
|
||||
char bssid_s[MAC_ADDRESS_PRETTY_BUFFER_SIZE];
|
||||
auto bssid = res.get_bssid();
|
||||
format_mac_addr_upper(bssid.data(), bssid_s);
|
||||
|
||||
@@ -1211,18 +1288,6 @@ __attribute__((noinline)) static void log_scan_result(const WiFiScanResult &res)
|
||||
#endif
|
||||
}
|
||||
|
||||
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_VERBOSE
|
||||
// Helper function to log non-matching scan results at verbose level
|
||||
__attribute__((noinline)) static void log_scan_result_non_matching(const WiFiScanResult &res) {
|
||||
char bssid_s[18];
|
||||
auto bssid = res.get_bssid();
|
||||
format_mac_addr_upper(bssid.data(), bssid_s);
|
||||
|
||||
ESP_LOGV(TAG, "- " LOG_SECRET("'%s'") " " LOG_SECRET("(%s) ") "%s", res.get_ssid().c_str(), bssid_s,
|
||||
LOG_STR_ARG(get_signal_bars(res.get_rssi())));
|
||||
}
|
||||
#endif
|
||||
|
||||
void WiFiComponent::check_scanning_finished() {
|
||||
if (!this->scan_done_) {
|
||||
if (millis() - this->action_started_ > WIFI_SCAN_TIMEOUT_MS) {
|
||||
@@ -1232,6 +1297,8 @@ void WiFiComponent::check_scanning_finished() {
|
||||
return;
|
||||
}
|
||||
this->scan_done_ = false;
|
||||
this->has_completed_scan_after_captive_portal_start_ =
|
||||
true; // Track that we've done a scan since captive portal started
|
||||
this->retry_hidden_mode_ = RetryHiddenMode::SCAN_BASED;
|
||||
|
||||
if (this->scan_result_.empty()) {
|
||||
@@ -1259,21 +1326,12 @@ void WiFiComponent::check_scanning_finished() {
|
||||
// Sort scan results using insertion sort for better memory efficiency
|
||||
insertion_sort_scan_results(this->scan_result_);
|
||||
|
||||
size_t non_matching_count = 0;
|
||||
// Log matching networks (non-matching already logged at VERBOSE in scan callback)
|
||||
for (auto &res : this->scan_result_) {
|
||||
if (res.get_matches()) {
|
||||
log_scan_result(res);
|
||||
} else {
|
||||
#if ESPHOME_LOG_LEVEL >= ESPHOME_LOG_LEVEL_VERBOSE
|
||||
log_scan_result_non_matching(res);
|
||||
#else
|
||||
non_matching_count++;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
if (non_matching_count > 0) {
|
||||
ESP_LOGD(TAG, "- %zu non-matching (VERBOSE to show)", non_matching_count);
|
||||
}
|
||||
|
||||
// SYNCHRONIZATION POINT: Establish link between scan_result_[0] and selected_sta_index_
|
||||
// After sorting, scan_result_[0] contains the best network. Now find which sta_[i] config
|
||||
@@ -1532,7 +1590,10 @@ WiFiRetryPhase WiFiComponent::determine_next_phase_() {
|
||||
if (this->went_through_explicit_hidden_phase_()) {
|
||||
return WiFiRetryPhase::EXPLICIT_HIDDEN;
|
||||
}
|
||||
// Skip scanning when captive portal/improv is active to avoid disrupting AP.
|
||||
// Skip scanning when captive portal/improv is active to avoid disrupting AP,
|
||||
// BUT only if we've already completed at least one scan AFTER the portal started.
|
||||
// When captive portal first starts, scan results may be filtered/stale, so we need
|
||||
// to do one full scan to populate available networks for the captive portal UI.
|
||||
//
|
||||
// WHY SCANNING DISRUPTS AP MODE:
|
||||
// WiFi scanning requires the radio to leave the AP's channel and hop through
|
||||
@@ -1549,7 +1610,16 @@ WiFiRetryPhase WiFiComponent::determine_next_phase_() {
|
||||
//
|
||||
// This allows users to configure WiFi via captive portal while the device keeps
|
||||
// attempting to connect to all configured networks in sequence.
|
||||
if (this->is_captive_portal_active_() || this->is_esp32_improv_active_()) {
|
||||
// Captive portal needs scan results to show available networks.
|
||||
// If captive portal is active, only skip scanning if we've done a scan after it started.
|
||||
// If only improv is active (no captive portal), skip scanning since improv doesn't need results.
|
||||
if (this->is_captive_portal_active_()) {
|
||||
if (this->has_completed_scan_after_captive_portal_start_) {
|
||||
return WiFiRetryPhase::RETRY_HIDDEN;
|
||||
}
|
||||
// Need to scan for captive portal
|
||||
} else if (this->is_esp32_improv_active_()) {
|
||||
// Improv doesn't need scan results
|
||||
return WiFiRetryPhase::RETRY_HIDDEN;
|
||||
}
|
||||
return WiFiRetryPhase::SCAN_CONNECTING;
|
||||
@@ -2096,7 +2166,7 @@ void WiFiComponent::clear_roaming_state_() {
|
||||
|
||||
void WiFiComponent::release_scan_results_() {
|
||||
if (!this->keep_scan_results_) {
|
||||
#ifdef USE_RP2040
|
||||
#if defined(USE_RP2040) || defined(USE_ESP32)
|
||||
// std::vector - use swap trick since shrink_to_fit is non-binding
|
||||
decltype(this->scan_result_)().swap(this->scan_result_);
|
||||
#else
|
||||
|
||||
@@ -161,9 +161,12 @@ struct EAPAuth {
|
||||
|
||||
using bssid_t = std::array<uint8_t, 6>;
|
||||
|
||||
// Use std::vector for RP2040 since scan count is unknown (callback-based)
|
||||
// Use FixedVector for other platforms where count is queried first
|
||||
#ifdef USE_RP2040
|
||||
/// Initial reserve size for filtered scan results (typical: 1-3 matching networks per SSID)
|
||||
static constexpr size_t WIFI_SCAN_RESULT_FILTERED_RESERVE = 8;
|
||||
|
||||
// Use std::vector for RP2040 (callback-based) and ESP32 (destructive scan API)
|
||||
// Use FixedVector for ESP8266 and LibreTiny where two-pass exact allocation is possible
|
||||
#if defined(USE_RP2040) || defined(USE_ESP32)
|
||||
template<typename T> using wifi_scan_vector_t = std::vector<T>;
|
||||
#else
|
||||
template<typename T> using wifi_scan_vector_t = FixedVector<T>;
|
||||
@@ -539,6 +542,13 @@ class WiFiComponent : public Component {
|
||||
/// Check if an SSID was seen in the most recent scan results
|
||||
/// Used to skip hidden mode for SSIDs we know are visible
|
||||
bool ssid_was_seen_in_scan_(const std::string &ssid) const;
|
||||
/// Check if full scan results are needed (captive portal active, improv, listeners)
|
||||
bool needs_full_scan_results_() const;
|
||||
/// Check if network matches any configured network (for scan result filtering)
|
||||
/// Matches by SSID when configured, or by BSSID for BSSID-only configs
|
||||
bool matches_configured_network_(const char *ssid, const uint8_t *bssid) const;
|
||||
/// Log a discarded scan result at VERBOSE level (skipped during roaming scans to avoid log overflow)
|
||||
void log_discarded_scan_result_(const char *ssid, const uint8_t *bssid, int8_t rssi, uint8_t channel);
|
||||
/// Find next SSID that wasn't in scan results (might be hidden)
|
||||
/// Returns index of next potentially hidden SSID, or -1 if none found
|
||||
/// @param start_index Start searching from index after this (-1 to start from beginning)
|
||||
@@ -710,6 +720,8 @@ class WiFiComponent : public Component {
|
||||
bool enable_on_boot_{true};
|
||||
bool got_ipv4_address_{false};
|
||||
bool keep_scan_results_{false};
|
||||
bool has_completed_scan_after_captive_portal_start_{
|
||||
false}; // Tracks if we've completed a scan after captive portal started
|
||||
RetryHiddenMode retry_hidden_mode_{RetryHiddenMode::BLIND_RETRY};
|
||||
bool skip_cooldown_next_cycle_{false};
|
||||
bool post_connect_roaming_{true}; // Enabled by default
|
||||
|
||||
@@ -756,24 +756,42 @@ void WiFiComponent::wifi_scan_done_callback_(void *arg, STATUS status) {
|
||||
|
||||
if (status != OK) {
|
||||
ESP_LOGV(TAG, "Scan failed: %d", status);
|
||||
this->retry_connect();
|
||||
// Don't call retry_connect() here - this callback runs in SDK system context
|
||||
// where yield() cannot be called. Instead, just set scan_done_ and let
|
||||
// check_scanning_finished() handle the empty scan_result_ from loop context.
|
||||
this->scan_done_ = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Count the number of results first
|
||||
auto *head = reinterpret_cast<bss_info *>(arg);
|
||||
bool needs_full = this->needs_full_scan_results_();
|
||||
|
||||
// First pass: count matching networks (linked list is non-destructive)
|
||||
size_t total = 0;
|
||||
size_t count = 0;
|
||||
for (bss_info *it = head; it != nullptr; it = STAILQ_NEXT(it, next)) {
|
||||
count++;
|
||||
total++;
|
||||
const char *ssid_cstr = reinterpret_cast<const char *>(it->ssid);
|
||||
if (needs_full || this->matches_configured_network_(ssid_cstr, it->bssid)) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
this->scan_result_.init(count);
|
||||
this->scan_result_.init(count); // Exact allocation
|
||||
|
||||
// Second pass: store matching networks
|
||||
for (bss_info *it = head; it != nullptr; it = STAILQ_NEXT(it, next)) {
|
||||
this->scan_result_.emplace_back(
|
||||
bssid_t{it->bssid[0], it->bssid[1], it->bssid[2], it->bssid[3], it->bssid[4], it->bssid[5]},
|
||||
std::string(reinterpret_cast<char *>(it->ssid), it->ssid_len), it->channel, it->rssi, it->authmode != AUTH_OPEN,
|
||||
it->is_hidden != 0);
|
||||
const char *ssid_cstr = reinterpret_cast<const char *>(it->ssid);
|
||||
if (needs_full || this->matches_configured_network_(ssid_cstr, it->bssid)) {
|
||||
this->scan_result_.emplace_back(
|
||||
bssid_t{it->bssid[0], it->bssid[1], it->bssid[2], it->bssid[3], it->bssid[4], it->bssid[5]},
|
||||
std::string(ssid_cstr, it->ssid_len), it->channel, it->rssi, it->authmode != AUTH_OPEN, it->is_hidden != 0);
|
||||
} else {
|
||||
this->log_discarded_scan_result_(ssid_cstr, it->bssid, it->rssi, it->channel);
|
||||
}
|
||||
}
|
||||
ESP_LOGV(TAG, "Scan complete: %zu found, %zu stored%s", total, this->scan_result_.size(),
|
||||
needs_full ? "" : " (filtered)");
|
||||
this->scan_done_ = true;
|
||||
#ifdef USE_WIFI_SCAN_RESULTS_LISTENERS
|
||||
for (auto *listener : global_wifi_component->scan_results_listeners_) {
|
||||
|
||||
@@ -828,11 +828,21 @@ void WiFiComponent::wifi_process_event_(IDFWiFiEvent *data) {
|
||||
}
|
||||
|
||||
uint16_t number = it.number;
|
||||
scan_result_.init(number);
|
||||
bool needs_full = this->needs_full_scan_results_();
|
||||
|
||||
// Smart reserve: full capacity if needed, small reserve otherwise
|
||||
if (needs_full) {
|
||||
this->scan_result_.reserve(number);
|
||||
} else {
|
||||
this->scan_result_.reserve(WIFI_SCAN_RESULT_FILTERED_RESERVE);
|
||||
}
|
||||
|
||||
#ifdef USE_ESP32_HOSTED
|
||||
// getting records one at a time fails on P4 with hosted esp32 WiFi coprocessor
|
||||
// Presumably an upstream bug, work-around by getting all records at once
|
||||
auto records = std::make_unique<wifi_ap_record_t[]>(number);
|
||||
// Use stack buffer (3904 bytes / ~80 bytes per record = ~48 records) with heap fallback
|
||||
static constexpr size_t SCAN_RECORD_STACK_COUNT = 3904 / sizeof(wifi_ap_record_t);
|
||||
SmallBufferWithHeapFallback<SCAN_RECORD_STACK_COUNT, wifi_ap_record_t> records(number);
|
||||
err = esp_wifi_scan_get_ap_records(&number, records.get());
|
||||
if (err != ESP_OK) {
|
||||
esp_wifi_clear_ap_list();
|
||||
@@ -840,7 +850,7 @@ void WiFiComponent::wifi_process_event_(IDFWiFiEvent *data) {
|
||||
return;
|
||||
}
|
||||
for (uint16_t i = 0; i < number; i++) {
|
||||
wifi_ap_record_t &record = records[i];
|
||||
wifi_ap_record_t &record = records.get()[i];
|
||||
#else
|
||||
// Process one record at a time to avoid large buffer allocation
|
||||
for (uint16_t i = 0; i < number; i++) {
|
||||
@@ -852,12 +862,23 @@ void WiFiComponent::wifi_process_event_(IDFWiFiEvent *data) {
|
||||
break;
|
||||
}
|
||||
#endif // USE_ESP32_HOSTED
|
||||
bssid_t bssid;
|
||||
std::copy(record.bssid, record.bssid + 6, bssid.begin());
|
||||
std::string ssid(reinterpret_cast<const char *>(record.ssid));
|
||||
scan_result_.emplace_back(bssid, ssid, record.primary, record.rssi, record.authmode != WIFI_AUTH_OPEN,
|
||||
ssid.empty());
|
||||
|
||||
// Check C string first - avoid std::string construction for non-matching networks
|
||||
const char *ssid_cstr = reinterpret_cast<const char *>(record.ssid);
|
||||
|
||||
// Only construct std::string and store if needed
|
||||
if (needs_full || this->matches_configured_network_(ssid_cstr, record.bssid)) {
|
||||
bssid_t bssid;
|
||||
std::copy(record.bssid, record.bssid + 6, bssid.begin());
|
||||
std::string ssid(ssid_cstr);
|
||||
this->scan_result_.emplace_back(bssid, std::move(ssid), record.primary, record.rssi,
|
||||
record.authmode != WIFI_AUTH_OPEN, ssid_cstr[0] == '\0');
|
||||
} else {
|
||||
this->log_discarded_scan_result_(ssid_cstr, record.bssid, record.rssi, record.primary);
|
||||
}
|
||||
}
|
||||
ESP_LOGV(TAG, "Scan complete: %u found, %zu stored%s", number, this->scan_result_.size(),
|
||||
needs_full ? "" : " (filtered)");
|
||||
#ifdef USE_WIFI_SCAN_RESULTS_LISTENERS
|
||||
for (auto *listener : this->scan_results_listeners_) {
|
||||
listener->on_wifi_scan_results(this->scan_result_);
|
||||
|
||||
@@ -670,18 +670,39 @@ void WiFiComponent::wifi_scan_done_callback_() {
|
||||
if (num < 0)
|
||||
return;
|
||||
|
||||
this->scan_result_.init(static_cast<unsigned int>(num));
|
||||
for (int i = 0; i < num; i++) {
|
||||
String ssid = WiFi.SSID(i);
|
||||
wifi_auth_mode_t authmode = WiFi.encryptionType(i);
|
||||
int32_t rssi = WiFi.RSSI(i);
|
||||
uint8_t *bssid = WiFi.BSSID(i);
|
||||
int32_t channel = WiFi.channel(i);
|
||||
bool needs_full = this->needs_full_scan_results_();
|
||||
|
||||
this->scan_result_.emplace_back(bssid_t{bssid[0], bssid[1], bssid[2], bssid[3], bssid[4], bssid[5]},
|
||||
std::string(ssid.c_str()), channel, rssi, authmode != WIFI_AUTH_OPEN,
|
||||
ssid.length() == 0);
|
||||
// Access scan results directly via WiFi.scan struct to avoid Arduino String allocations
|
||||
// WiFi.scan is public in LibreTiny for WiFiEvents & WiFiScan static handlers
|
||||
auto *scan = WiFi.scan;
|
||||
|
||||
// First pass: count matching networks
|
||||
size_t count = 0;
|
||||
for (int i = 0; i < num; i++) {
|
||||
const char *ssid_cstr = scan->ap[i].ssid;
|
||||
if (needs_full || this->matches_configured_network_(ssid_cstr, scan->ap[i].bssid.addr)) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
this->scan_result_.init(count); // Exact allocation
|
||||
|
||||
// Second pass: store matching networks
|
||||
for (int i = 0; i < num; i++) {
|
||||
const char *ssid_cstr = scan->ap[i].ssid;
|
||||
if (needs_full || this->matches_configured_network_(ssid_cstr, scan->ap[i].bssid.addr)) {
|
||||
auto &ap = scan->ap[i];
|
||||
this->scan_result_.emplace_back(bssid_t{ap.bssid.addr[0], ap.bssid.addr[1], ap.bssid.addr[2], ap.bssid.addr[3],
|
||||
ap.bssid.addr[4], ap.bssid.addr[5]},
|
||||
std::string(ssid_cstr), ap.channel, ap.rssi, ap.auth != WIFI_AUTH_OPEN,
|
||||
ssid_cstr[0] == '\0');
|
||||
} else {
|
||||
auto &ap = scan->ap[i];
|
||||
this->log_discarded_scan_result_(ssid_cstr, ap.bssid.addr, ap.rssi, ap.channel);
|
||||
}
|
||||
}
|
||||
ESP_LOGV(TAG, "Scan complete: %d found, %zu stored%s", num, this->scan_result_.size(),
|
||||
needs_full ? "" : " (filtered)");
|
||||
WiFi.scanDelete();
|
||||
#ifdef USE_WIFI_SCAN_RESULTS_LISTENERS
|
||||
for (auto *listener : this->scan_results_listeners_) {
|
||||
|
||||
@@ -21,6 +21,7 @@ static const char *const TAG = "wifi_pico_w";
|
||||
// Track previous state for detecting changes
|
||||
static bool s_sta_was_connected = false; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
static bool s_sta_had_ip = false; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
static size_t s_scan_result_count = 0; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
|
||||
bool WiFiComponent::wifi_mode_(optional<bool> sta, optional<bool> ap) {
|
||||
if (sta.has_value()) {
|
||||
@@ -137,10 +138,20 @@ int WiFiComponent::s_wifi_scan_result(void *env, const cyw43_ev_scan_result_t *r
|
||||
}
|
||||
|
||||
void WiFiComponent::wifi_scan_result(void *env, const cyw43_ev_scan_result_t *result) {
|
||||
s_scan_result_count++;
|
||||
const char *ssid_cstr = reinterpret_cast<const char *>(result->ssid);
|
||||
|
||||
// Skip networks that don't match any configured network (unless full results needed)
|
||||
if (!this->needs_full_scan_results_() && !this->matches_configured_network_(ssid_cstr, result->bssid)) {
|
||||
this->log_discarded_scan_result_(ssid_cstr, result->bssid, result->rssi, result->channel);
|
||||
return;
|
||||
}
|
||||
|
||||
bssid_t bssid;
|
||||
std::copy(result->bssid, result->bssid + 6, bssid.begin());
|
||||
std::string ssid(reinterpret_cast<const char *>(result->ssid));
|
||||
WiFiScanResult res(bssid, ssid, result->channel, result->rssi, result->auth_mode != CYW43_AUTH_OPEN, ssid.empty());
|
||||
std::string ssid(ssid_cstr);
|
||||
WiFiScanResult res(bssid, std::move(ssid), result->channel, result->rssi, result->auth_mode != CYW43_AUTH_OPEN,
|
||||
ssid_cstr[0] == '\0');
|
||||
if (std::find(this->scan_result_.begin(), this->scan_result_.end(), res) == this->scan_result_.end()) {
|
||||
this->scan_result_.push_back(res);
|
||||
}
|
||||
@@ -149,6 +160,7 @@ void WiFiComponent::wifi_scan_result(void *env, const cyw43_ev_scan_result_t *re
|
||||
bool WiFiComponent::wifi_scan_start_(bool passive) {
|
||||
this->scan_result_.clear();
|
||||
this->scan_done_ = false;
|
||||
s_scan_result_count = 0;
|
||||
cyw43_wifi_scan_options_t scan_options = {0};
|
||||
scan_options.scan_type = passive ? 1 : 0;
|
||||
int err = cyw43_wifi_scan(&cyw43_state, &scan_options, nullptr, &s_wifi_scan_result);
|
||||
@@ -244,7 +256,9 @@ void WiFiComponent::wifi_loop_() {
|
||||
// Handle scan completion
|
||||
if (this->state_ == WIFI_COMPONENT_STATE_STA_SCANNING && !cyw43_wifi_scan_active(&cyw43_state)) {
|
||||
this->scan_done_ = true;
|
||||
ESP_LOGV(TAG, "Scan done");
|
||||
bool needs_full = this->needs_full_scan_results_();
|
||||
ESP_LOGV(TAG, "Scan complete: %zu found, %zu stored%s", s_scan_result_count, this->scan_result_.size(),
|
||||
needs_full ? "" : " (filtered)");
|
||||
#ifdef USE_WIFI_SCAN_RESULTS_LISTENERS
|
||||
for (auto *listener : this->scan_results_listeners_) {
|
||||
listener->on_wifi_scan_results(this->scan_result_);
|
||||
|
||||
@@ -12,6 +12,7 @@ from esphome.core import CORE
|
||||
from esphome.types import ConfigType
|
||||
|
||||
from .const_zephyr import (
|
||||
CONF_IEEE802154_VENDOR_OUI,
|
||||
CONF_MAX_EP_NUMBER,
|
||||
CONF_ON_JOIN,
|
||||
CONF_POWER_SOURCE,
|
||||
@@ -58,6 +59,13 @@ CONFIG_SCHEMA = cv.All(
|
||||
cv.Optional(CONF_POWER_SOURCE, default="DC_SOURCE"): cv.enum(
|
||||
POWER_SOURCE, upper=True
|
||||
),
|
||||
cv.Optional(CONF_IEEE802154_VENDOR_OUI): cv.All(
|
||||
cv.Any(
|
||||
cv.int_range(min=0x000000, max=0xFFFFFF),
|
||||
cv.one_of(*["random"], lower=True),
|
||||
),
|
||||
cv.requires_component("nrf52"),
|
||||
),
|
||||
}
|
||||
).extend(cv.COMPONENT_SCHEMA),
|
||||
zigbee_set_core_data,
|
||||
@@ -120,7 +128,7 @@ async def setup_switch(entity: cg.MockObj, config: ConfigType) -> None:
|
||||
def consume_endpoint(config: ConfigType) -> ConfigType:
|
||||
if not config.get(CONF_ZIGBEE_ID) or config.get(CONF_INTERNAL):
|
||||
return config
|
||||
if " " in config[CONF_NAME]:
|
||||
if CONF_NAME in config and " " in config[CONF_NAME]:
|
||||
_LOGGER.warning(
|
||||
"Spaces in '%s' work with ZHA but not Zigbee2MQTT. For Zigbee2MQTT use '%s'",
|
||||
config[CONF_NAME],
|
||||
|
||||
@@ -22,6 +22,7 @@ POWER_SOURCE = {
|
||||
"EMERGENCY_MAINS_CONST": "ZB_ZCL_BASIC_POWER_SOURCE_EMERGENCY_MAINS_CONST",
|
||||
"EMERGENCY_MAINS_TRANSF": "ZB_ZCL_BASIC_POWER_SOURCE_EMERGENCY_MAINS_TRANSF",
|
||||
}
|
||||
CONF_IEEE802154_VENDOR_OUI = "ieee802154_vendor_oui"
|
||||
|
||||
# Keys for CORE.data storage
|
||||
KEY_ZIGBEE = "zigbee"
|
||||
|
||||
86
esphome/components/zigbee/time/__init__.py
Normal file
86
esphome/components/zigbee/time/__init__.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import esphome.codegen as cg
|
||||
from esphome.components import time as time_
|
||||
import esphome.config_validation as cv
|
||||
from esphome.const import CONF_ID
|
||||
from esphome.core import CORE
|
||||
from esphome.types import ConfigType
|
||||
|
||||
from .. import consume_endpoint
|
||||
from ..const_zephyr import CONF_ZIGBEE_ID, zigbee_ns
|
||||
from ..zigbee_zephyr import (
|
||||
ZigbeeClusterDesc,
|
||||
ZigbeeComponent,
|
||||
get_slot_index,
|
||||
zigbee_new_attr_list,
|
||||
zigbee_new_cluster_list,
|
||||
zigbee_new_variable,
|
||||
zigbee_register_ep,
|
||||
)
|
||||
|
||||
DEPENDENCIES = ["zigbee"]
|
||||
|
||||
ZigbeeTime = zigbee_ns.class_("ZigbeeTime", time_.RealTimeClock)
|
||||
|
||||
CONFIG_SCHEMA = cv.All(
|
||||
time_.TIME_SCHEMA.extend(
|
||||
{
|
||||
cv.GenerateID(): cv.declare_id(ZigbeeTime),
|
||||
cv.OnlyWith(CONF_ZIGBEE_ID, ["nrf52", "zigbee"]): cv.use_id(
|
||||
ZigbeeComponent
|
||||
),
|
||||
}
|
||||
)
|
||||
.extend(cv.COMPONENT_SCHEMA)
|
||||
.extend(cv.polling_component_schema("1s")),
|
||||
consume_endpoint,
|
||||
)
|
||||
|
||||
|
||||
async def to_code(config: ConfigType) -> None:
|
||||
CORE.add_job(_add_time, config)
|
||||
|
||||
|
||||
async def _add_time(config: ConfigType) -> None:
|
||||
slot_index = get_slot_index()
|
||||
|
||||
# Create unique names for this sensor's variables based on slot index
|
||||
prefix = f"zigbee_ep{slot_index + 1}"
|
||||
attrs_name = f"{prefix}_time_attrs"
|
||||
attr_list_name = f"{prefix}_time_attrib_list"
|
||||
cluster_list_name = f"{prefix}_cluster_list"
|
||||
ep_name = f"{prefix}_ep"
|
||||
|
||||
# Create the binary attributes structure
|
||||
time_attrs = zigbee_new_variable(attrs_name, "zb_zcl_time_attrs_t")
|
||||
attr_list = zigbee_new_attr_list(
|
||||
attr_list_name,
|
||||
"ZB_ZCL_DECLARE_TIME_ATTR_LIST",
|
||||
str(time_attrs),
|
||||
)
|
||||
|
||||
# Create cluster list and register endpoint
|
||||
cluster_list_name, clusters = zigbee_new_cluster_list(
|
||||
cluster_list_name,
|
||||
[
|
||||
ZigbeeClusterDesc("ZB_ZCL_CLUSTER_ID_TIME", attr_list),
|
||||
ZigbeeClusterDesc("ZB_ZCL_CLUSTER_ID_TIME"),
|
||||
],
|
||||
)
|
||||
zigbee_register_ep(
|
||||
ep_name,
|
||||
cluster_list_name,
|
||||
0,
|
||||
clusters,
|
||||
slot_index,
|
||||
"ZB_HA_CUSTOM_ATTR_DEVICE_ID",
|
||||
)
|
||||
|
||||
# Create the ZigbeeTime component
|
||||
var = cg.new_Pvariable(config[CONF_ID])
|
||||
await time_.register_time(var, config)
|
||||
await cg.register_component(var, config)
|
||||
|
||||
cg.add(var.set_endpoint(slot_index + 1))
|
||||
cg.add(var.set_cluster_attributes(time_attrs))
|
||||
hub = await cg.get_variable(config[CONF_ZIGBEE_ID])
|
||||
cg.add(var.set_parent(hub))
|
||||
87
esphome/components/zigbee/time/zigbee_time_zephyr.cpp
Normal file
87
esphome/components/zigbee/time/zigbee_time_zephyr.cpp
Normal file
@@ -0,0 +1,87 @@
|
||||
#include "zigbee_time_zephyr.h"
|
||||
#if defined(USE_ZIGBEE) && defined(USE_NRF52) && defined(USE_TIME)
|
||||
#include "esphome/core/log.h"
|
||||
|
||||
namespace esphome::zigbee {
|
||||
|
||||
static const char *const TAG = "zigbee.time";
|
||||
|
||||
// This time standard is the number of
|
||||
// seconds since 0 hrs 0 mins 0 sec on 1st January 2000 UTC (Universal Coordinated Time).
|
||||
constexpr time_t EPOCH_2000 = 946684800;
|
||||
|
||||
ZigbeeTime *global_time = nullptr; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
|
||||
|
||||
void ZigbeeTime::sync_time(zb_ret_t status, zb_uint32_t auth_level, zb_uint16_t short_addr, zb_uint8_t endpoint,
|
||||
zb_uint32_t nw_time) {
|
||||
if (status == RET_OK && auth_level >= ZB_ZCL_TIME_HAS_SYNCHRONIZED_BIT) {
|
||||
global_time->set_epoch_time(nw_time + EPOCH_2000);
|
||||
} else if (status != RET_TIMEOUT || !global_time->has_time_) {
|
||||
ESP_LOGE(TAG, "Status: %d, auth_level: %u, short_addr: %d, endpoint: %d, nw_time: %u", status, auth_level,
|
||||
short_addr, endpoint, nw_time);
|
||||
}
|
||||
}
|
||||
|
||||
void ZigbeeTime::setup() {
|
||||
global_time = this;
|
||||
this->parent_->add_callback(this->endpoint_, [this](zb_bufid_t bufid) { this->zcl_device_cb_(bufid); });
|
||||
synchronize_epoch_(EPOCH_2000);
|
||||
this->parent_->add_join_callback([this]() { zb_zcl_time_server_synchronize(this->endpoint_, sync_time); });
|
||||
}
|
||||
|
||||
void ZigbeeTime::dump_config() {
|
||||
ESP_LOGCONFIG(TAG,
|
||||
"Zigbee Time\n"
|
||||
" Endpoint: %d",
|
||||
this->endpoint_);
|
||||
RealTimeClock::dump_config();
|
||||
}
|
||||
|
||||
void ZigbeeTime::update() {
|
||||
time_t time = timestamp_now();
|
||||
this->cluster_attributes_->time = time - EPOCH_2000;
|
||||
}
|
||||
|
||||
void ZigbeeTime::set_epoch_time(uint32_t epoch) {
|
||||
this->defer([this, epoch]() {
|
||||
this->synchronize_epoch_(epoch);
|
||||
this->has_time_ = true;
|
||||
});
|
||||
}
|
||||
|
||||
void ZigbeeTime::zcl_device_cb_(zb_bufid_t bufid) {
|
||||
zb_zcl_device_callback_param_t *p_device_cb_param = ZB_BUF_GET_PARAM(bufid, zb_zcl_device_callback_param_t);
|
||||
zb_zcl_device_callback_id_t device_cb_id = p_device_cb_param->device_cb_id;
|
||||
zb_uint16_t cluster_id = p_device_cb_param->cb_param.set_attr_value_param.cluster_id;
|
||||
zb_uint16_t attr_id = p_device_cb_param->cb_param.set_attr_value_param.attr_id;
|
||||
|
||||
switch (device_cb_id) {
|
||||
/* ZCL set attribute value */
|
||||
case ZB_ZCL_SET_ATTR_VALUE_CB_ID:
|
||||
if (cluster_id == ZB_ZCL_CLUSTER_ID_TIME) {
|
||||
if (attr_id == ZB_ZCL_ATTR_TIME_TIME_ID) {
|
||||
zb_uint32_t value = p_device_cb_param->cb_param.set_attr_value_param.values.data32;
|
||||
ESP_LOGI(TAG, "Synchronize time to %u", value);
|
||||
this->defer([this, value]() { synchronize_epoch_(value + EPOCH_2000); });
|
||||
} else if (attr_id == ZB_ZCL_ATTR_TIME_TIME_STATUS_ID) {
|
||||
zb_uint8_t value = p_device_cb_param->cb_param.set_attr_value_param.values.data8;
|
||||
ESP_LOGI(TAG, "Time status %hd", value);
|
||||
this->defer([this, value]() { this->has_time_ = ZB_ZCL_TIME_TIME_STATUS_SYNCHRONIZED_BIT_IS_SET(value); });
|
||||
}
|
||||
} else {
|
||||
/* other clusters attribute handled here */
|
||||
ESP_LOGI(TAG, "Unhandled cluster attribute id: %d", cluster_id);
|
||||
p_device_cb_param->status = RET_NOT_IMPLEMENTED;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
p_device_cb_param->status = RET_NOT_IMPLEMENTED;
|
||||
break;
|
||||
}
|
||||
|
||||
ESP_LOGD(TAG, "Zcl_device_cb_ status: %hd", p_device_cb_param->status);
|
||||
}
|
||||
|
||||
} // namespace esphome::zigbee
|
||||
|
||||
#endif
|
||||
38
esphome/components/zigbee/time/zigbee_time_zephyr.h
Normal file
38
esphome/components/zigbee/time/zigbee_time_zephyr.h
Normal file
@@ -0,0 +1,38 @@
|
||||
#pragma once
|
||||
#include "esphome/core/defines.h"
|
||||
#if defined(USE_ZIGBEE) && defined(USE_NRF52) && defined(USE_TIME)
|
||||
#include "esphome/core/component.h"
|
||||
#include "esphome/components/time/real_time_clock.h"
|
||||
#include "esphome/components/zigbee/zigbee_zephyr.h"
|
||||
|
||||
extern "C" {
|
||||
#include <zboss_api.h>
|
||||
#include <zboss_api_addons.h>
|
||||
}
|
||||
|
||||
namespace esphome::zigbee {
|
||||
|
||||
class ZigbeeTime : public time::RealTimeClock, public ZigbeeEntity {
|
||||
public:
|
||||
void setup() override;
|
||||
void dump_config() override;
|
||||
void update() override;
|
||||
|
||||
void set_cluster_attributes(zb_zcl_time_attrs_t &cluster_attributes) {
|
||||
this->cluster_attributes_ = &cluster_attributes;
|
||||
}
|
||||
|
||||
void set_epoch_time(uint32_t epoch);
|
||||
|
||||
protected:
|
||||
static void sync_time(zb_ret_t status, zb_uint32_t auth_level, zb_uint16_t short_addr, zb_uint8_t endpoint,
|
||||
zb_uint32_t nw_time);
|
||||
void zcl_device_cb_(zb_bufid_t bufid);
|
||||
zb_zcl_time_attrs_t *cluster_attributes_{nullptr};
|
||||
|
||||
bool has_time_{false};
|
||||
};
|
||||
|
||||
} // namespace esphome::zigbee
|
||||
|
||||
#endif
|
||||
@@ -22,7 +22,7 @@ void ZigbeeBinarySensor::setup() {
|
||||
ZB_ZCL_SET_ATTRIBUTE(this->endpoint_, ZB_ZCL_CLUSTER_ID_BINARY_INPUT, ZB_ZCL_CLUSTER_SERVER_ROLE,
|
||||
ZB_ZCL_ATTR_BINARY_INPUT_PRESENT_VALUE_ID, &this->cluster_attributes_->present_value,
|
||||
ZB_FALSE);
|
||||
this->parent_->flush();
|
||||
this->parent_->force_report();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ void ZigbeeSensor::setup() {
|
||||
ZB_ZCL_SET_ATTRIBUTE(this->endpoint_, ZB_ZCL_CLUSTER_ID_ANALOG_INPUT, ZB_ZCL_CLUSTER_SERVER_ROLE,
|
||||
ZB_ZCL_ATTR_ANALOG_INPUT_PRESENT_VALUE_ID,
|
||||
(zb_uint8_t *) &this->cluster_attributes_->present_value, ZB_FALSE);
|
||||
this->parent_->flush();
|
||||
this->parent_->force_report();
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ void ZigbeeSwitch::setup() {
|
||||
ZB_ZCL_SET_ATTRIBUTE(this->endpoint_, ZB_ZCL_CLUSTER_ID_BINARY_OUTPUT, ZB_ZCL_CLUSTER_SERVER_ROLE,
|
||||
ZB_ZCL_ATTR_BINARY_OUTPUT_PRESENT_VALUE_ID, &this->cluster_attributes_->present_value,
|
||||
ZB_FALSE);
|
||||
this->parent_->flush();
|
||||
this->parent_->force_report();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -41,8 +41,6 @@ void ZigbeeSwitch::zcl_device_cb_(zb_bufid_t bufid) {
|
||||
zb_uint16_t cluster_id = p_device_cb_param->cb_param.set_attr_value_param.cluster_id;
|
||||
zb_uint16_t attr_id = p_device_cb_param->cb_param.set_attr_value_param.attr_id;
|
||||
|
||||
p_device_cb_param->status = RET_OK;
|
||||
|
||||
switch (device_cb_id) {
|
||||
/* ZCL set attribute value */
|
||||
case ZB_ZCL_SET_ATTR_VALUE_CB_ID:
|
||||
@@ -58,10 +56,11 @@ void ZigbeeSwitch::zcl_device_cb_(zb_bufid_t bufid) {
|
||||
} else {
|
||||
/* other clusters attribute handled here */
|
||||
ESP_LOGI(TAG, "Unhandled cluster attribute id: %d", cluster_id);
|
||||
p_device_cb_param->status = RET_NOT_IMPLEMENTED;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
p_device_cb_param->status = RET_ERROR;
|
||||
p_device_cb_param->status = RET_NOT_IMPLEMENTED;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
@@ -112,10 +112,10 @@ void ZigbeeComponent::zcl_device_cb(zb_bufid_t bufid) {
|
||||
const auto &cb = global_zigbee->callbacks_[endpoint - 1];
|
||||
if (cb) {
|
||||
cb(bufid);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
p_device_cb_param->status = RET_ERROR;
|
||||
p_device_cb_param->status = RET_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
void ZigbeeComponent::on_join_() {
|
||||
@@ -230,11 +230,11 @@ static void send_attribute_report(zb_bufid_t bufid, zb_uint16_t cmd_id) {
|
||||
zb_buf_free(bufid);
|
||||
}
|
||||
|
||||
void ZigbeeComponent::flush() { this->need_flush_ = true; }
|
||||
void ZigbeeComponent::force_report() { this->force_report_ = true; }
|
||||
|
||||
void ZigbeeComponent::loop() {
|
||||
if (this->need_flush_) {
|
||||
this->need_flush_ = false;
|
||||
if (this->force_report_) {
|
||||
this->force_report_ = false;
|
||||
zb_buf_get_out_delayed_ext(send_attribute_report, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ class ZigbeeComponent : public Component {
|
||||
void zboss_signal_handler_esphome(zb_bufid_t bufid);
|
||||
void factory_reset();
|
||||
Trigger<> *get_join_trigger() { return &this->join_trigger_; };
|
||||
void flush();
|
||||
void force_report();
|
||||
void loop() override;
|
||||
|
||||
protected:
|
||||
@@ -84,7 +84,7 @@ class ZigbeeComponent : public Component {
|
||||
std::array<std::function<void(zb_bufid_t bufid)>, ZIGBEE_ENDPOINTS_COUNT> callbacks_{};
|
||||
CallbackManager<void()> join_cb_;
|
||||
Trigger<> join_trigger_;
|
||||
bool need_flush_{false};
|
||||
bool force_report_{false};
|
||||
};
|
||||
|
||||
class ZigbeeEntity {
|
||||
|
||||
@@ -49,6 +49,7 @@ from esphome.cpp_generator import (
|
||||
from esphome.types import ConfigType
|
||||
|
||||
from .const_zephyr import (
|
||||
CONF_IEEE802154_VENDOR_OUI,
|
||||
CONF_ON_JOIN,
|
||||
CONF_POWER_SOURCE,
|
||||
CONF_WIPE_ON_BOOT,
|
||||
@@ -152,6 +153,13 @@ async def zephyr_to_code(config: ConfigType) -> None:
|
||||
zephyr_add_prj_conf("NET_IP_ADDR_CHECK", False)
|
||||
zephyr_add_prj_conf("NET_UDP", False)
|
||||
|
||||
if CONF_IEEE802154_VENDOR_OUI in config:
|
||||
zephyr_add_prj_conf("IEEE802154_VENDOR_OUI_ENABLE", True)
|
||||
random_number = config[CONF_IEEE802154_VENDOR_OUI]
|
||||
if random_number == "random":
|
||||
random_number = random.randint(0x000000, 0xFFFFFF)
|
||||
zephyr_add_prj_conf("IEEE802154_VENDOR_OUI", random_number)
|
||||
|
||||
if config[CONF_WIPE_ON_BOOT]:
|
||||
if config[CONF_WIPE_ON_BOOT] == "once":
|
||||
cg.add_define(
|
||||
@@ -336,14 +344,14 @@ async def zephyr_setup_switch(entity: cg.MockObj, config: ConfigType) -> None:
|
||||
CORE.add_job(_add_switch, entity, config)
|
||||
|
||||
|
||||
def _slot_index() -> int:
|
||||
"""Find the next available endpoint slot"""
|
||||
def get_slot_index() -> int:
|
||||
"""Find the next available endpoint slot."""
|
||||
slot = next(
|
||||
(i for i, v in enumerate(CORE.data[KEY_ZIGBEE][KEY_EP_NUMBER]) if v == ""), None
|
||||
)
|
||||
if slot is None:
|
||||
raise cv.Invalid(
|
||||
f"Not found empty slot, size ({len(CORE.data[KEY_ZIGBEE][KEY_EP_NUMBER])})"
|
||||
f"No available Zigbee endpoint slots ({len(CORE.data[KEY_ZIGBEE][KEY_EP_NUMBER])} in use)"
|
||||
)
|
||||
return slot
|
||||
|
||||
@@ -358,7 +366,7 @@ async def _add_zigbee_ep(
|
||||
app_device_id: str,
|
||||
extra_field_values: dict[str, int] | None = None,
|
||||
) -> None:
|
||||
slot_index = _slot_index()
|
||||
slot_index = get_slot_index()
|
||||
|
||||
prefix = f"zigbee_ep{slot_index + 1}"
|
||||
attrs_name = f"{prefix}_attrs"
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
#include <concepts>
|
||||
|
||||
#include <strings.h>
|
||||
|
||||
#include "esphome/core/optional.h"
|
||||
|
||||
@@ -10,6 +10,7 @@ globals:
|
||||
type: int
|
||||
restore_value: true
|
||||
initial_value: "0"
|
||||
update_interval: 5s
|
||||
- id: glob_float
|
||||
type: float
|
||||
restore_value: true
|
||||
|
||||
@@ -8,8 +8,6 @@ binary_sensor:
|
||||
name: "Garage Door Open 3"
|
||||
- platform: template
|
||||
name: "Garage Door Open 4"
|
||||
- platform: template
|
||||
name: "Garage Door Open 5"
|
||||
- platform: template
|
||||
name: "Garage Door Internal"
|
||||
internal: True
|
||||
@@ -21,6 +19,10 @@ sensor:
|
||||
- platform: template
|
||||
name: "Analog 2"
|
||||
lambda: return 11.0;
|
||||
- platform: template
|
||||
name: "Analog 3"
|
||||
lambda: return 12.0;
|
||||
internal: True
|
||||
|
||||
zigbee:
|
||||
wipe_on_boot: true
|
||||
@@ -35,6 +37,9 @@ output:
|
||||
write_action:
|
||||
- zigbee.factory_reset
|
||||
|
||||
time:
|
||||
- platform: zigbee
|
||||
|
||||
switch:
|
||||
- platform: template
|
||||
name: "Template Switch"
|
||||
|
||||
@@ -3,3 +3,4 @@
|
||||
zigbee:
|
||||
wipe_on_boot: once
|
||||
power_source: battery
|
||||
ieee802154_vendor_oui: 0x231
|
||||
|
||||
Reference in New Issue
Block a user