mirror of
https://github.com/esphome/esphome.git
synced 2026-02-10 11:37:37 -07:00
Compare commits
8 Commits
api-server
...
api-dedup-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
39bde3eb73 | ||
|
|
c53baf70c7 | ||
|
|
041c43fb32 | ||
|
|
b4741ade0d | ||
|
|
2c3a92db97 | ||
|
|
fc91a4d7a3 | ||
|
|
9bf90eff01 | ||
|
|
dcbb020479 |
@@ -219,35 +219,8 @@ void APIConnection::loop() {
|
||||
this->process_batch_();
|
||||
}
|
||||
|
||||
switch (this->active_iterator_) {
|
||||
case ActiveIterator::LIST_ENTITIES:
|
||||
if (this->iterator_storage_.list_entities.completed()) {
|
||||
this->destroy_active_iterator_();
|
||||
if (this->flags_.state_subscription) {
|
||||
this->begin_iterator_(ActiveIterator::INITIAL_STATE);
|
||||
}
|
||||
} else {
|
||||
this->process_iterator_batch_(this->iterator_storage_.list_entities);
|
||||
}
|
||||
break;
|
||||
case ActiveIterator::INITIAL_STATE:
|
||||
if (this->iterator_storage_.initial_state.completed()) {
|
||||
this->destroy_active_iterator_();
|
||||
// Process any remaining batched messages immediately
|
||||
if (!this->deferred_batch_.empty()) {
|
||||
this->process_batch_();
|
||||
}
|
||||
// Now that everything is sent, enable immediate sending for future state changes
|
||||
this->flags_.should_try_send_immediately = true;
|
||||
// Release excess memory from buffers that grew during initial sync
|
||||
this->deferred_batch_.release_buffer();
|
||||
this->helper_->release_buffers();
|
||||
} else {
|
||||
this->process_iterator_batch_(this->iterator_storage_.initial_state);
|
||||
}
|
||||
break;
|
||||
case ActiveIterator::NONE:
|
||||
break;
|
||||
if (this->active_iterator_ != ActiveIterator::NONE) {
|
||||
this->process_active_iterator_();
|
||||
}
|
||||
|
||||
if (this->flags_.sent_ping) {
|
||||
@@ -283,6 +256,49 @@ void APIConnection::loop() {
|
||||
#endif
|
||||
}
|
||||
|
||||
void APIConnection::process_active_iterator_() {
|
||||
// Caller ensures active_iterator_ != NONE
|
||||
if (this->active_iterator_ == ActiveIterator::LIST_ENTITIES) {
|
||||
if (this->iterator_storage_.list_entities.completed()) {
|
||||
this->destroy_active_iterator_();
|
||||
if (this->flags_.state_subscription) {
|
||||
this->begin_iterator_(ActiveIterator::INITIAL_STATE);
|
||||
}
|
||||
} else {
|
||||
this->process_iterator_batch_(this->iterator_storage_.list_entities);
|
||||
}
|
||||
} else { // INITIAL_STATE
|
||||
if (this->iterator_storage_.initial_state.completed()) {
|
||||
this->destroy_active_iterator_();
|
||||
// Process any remaining batched messages immediately
|
||||
if (!this->deferred_batch_.empty()) {
|
||||
this->process_batch_();
|
||||
}
|
||||
// Now that everything is sent, enable immediate sending for future state changes
|
||||
this->flags_.should_try_send_immediately = true;
|
||||
// Release excess memory from buffers that grew during initial sync
|
||||
this->deferred_batch_.release_buffer();
|
||||
this->helper_->release_buffers();
|
||||
} else {
|
||||
this->process_iterator_batch_(this->iterator_storage_.initial_state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void APIConnection::process_iterator_batch_(ComponentIterator &iterator) {
|
||||
size_t initial_size = this->deferred_batch_.size();
|
||||
size_t max_batch = this->get_max_batch_size_();
|
||||
while (!iterator.completed() && (this->deferred_batch_.size() - initial_size) < max_batch) {
|
||||
iterator.advance();
|
||||
}
|
||||
|
||||
// If the batch is full, process it immediately
|
||||
// Note: iterator.advance() already calls schedule_batch_() via schedule_message_()
|
||||
if (this->deferred_batch_.size() >= max_batch) {
|
||||
this->process_batch_();
|
||||
}
|
||||
}
|
||||
|
||||
bool APIConnection::send_disconnect_response_() {
|
||||
// remote initiated disconnect_client
|
||||
// don't close yet, we still need to send the disconnect response
|
||||
|
||||
@@ -15,6 +15,10 @@
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
namespace esphome {
|
||||
class ComponentIterator;
|
||||
} // namespace esphome
|
||||
|
||||
namespace esphome::api {
|
||||
|
||||
// Keepalive timeout in milliseconds
|
||||
@@ -364,20 +368,13 @@ class APIConnection final : public APIServerConnectionBase {
|
||||
return this->client_supports_api_version(1, 14) ? MAX_INITIAL_PER_BATCH : MAX_INITIAL_PER_BATCH_LEGACY;
|
||||
}
|
||||
|
||||
// Helper method to process multiple entities from an iterator in a batch
|
||||
template<typename Iterator> void process_iterator_batch_(Iterator &iterator) {
|
||||
size_t initial_size = this->deferred_batch_.size();
|
||||
size_t max_batch = this->get_max_batch_size_();
|
||||
while (!iterator.completed() && (this->deferred_batch_.size() - initial_size) < max_batch) {
|
||||
iterator.advance();
|
||||
}
|
||||
// Process active iterator (list_entities/initial_state) during connection setup.
|
||||
// Extracted from loop() — only runs during initial handshake, NONE in steady state.
|
||||
void __attribute__((noinline)) process_active_iterator_();
|
||||
|
||||
// If the batch is full, process it immediately
|
||||
// Note: iterator.advance() already calls schedule_batch_() via schedule_message_()
|
||||
if (this->deferred_batch_.size() >= max_batch) {
|
||||
this->process_batch_();
|
||||
}
|
||||
}
|
||||
// Helper method to process multiple entities from an iterator in a batch.
|
||||
// Takes ComponentIterator base class reference to avoid duplicate template instantiations.
|
||||
void process_iterator_batch_(ComponentIterator &iterator);
|
||||
|
||||
#ifdef USE_BINARY_SENSOR
|
||||
static uint16_t try_send_binary_sensor_state(EntityBase *entity, APIConnection *conn, uint32_t remaining_size);
|
||||
|
||||
@@ -94,7 +94,6 @@ class ListEntitiesIterator : public ComponentIterator {
|
||||
bool on_update(update::UpdateEntity *entity) override;
|
||||
#endif
|
||||
bool on_end() override;
|
||||
bool completed() { return this->state_ == IteratorState::NONE; }
|
||||
|
||||
protected:
|
||||
APIConnection *client_;
|
||||
|
||||
@@ -88,7 +88,6 @@ class InitialStateIterator : public ComponentIterator {
|
||||
#ifdef USE_UPDATE
|
||||
bool on_update(update::UpdateEntity *entity) override;
|
||||
#endif
|
||||
bool completed() { return this->state_ == IteratorState::NONE; }
|
||||
|
||||
protected:
|
||||
APIConnection *client_;
|
||||
|
||||
@@ -16,8 +16,8 @@ void CSE7766Component::loop() {
|
||||
}
|
||||
|
||||
// Early return prevents updating last_transmission_ when no data is available.
|
||||
int avail = this->available();
|
||||
if (avail <= 0) {
|
||||
size_t avail = this->available();
|
||||
if (avail == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ void CSE7766Component::loop() {
|
||||
// At 4800 baud (~480 bytes/sec) with ~122 Hz loop rate, typically ~4 bytes per call.
|
||||
uint8_t buf[CSE7766_RAW_DATA_SIZE];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -133,10 +133,10 @@ void DFPlayer::send_cmd_(uint8_t cmd, uint16_t argument) {
|
||||
|
||||
void DFPlayer::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -120,9 +120,9 @@ void Dsmr::stop_requesting_data_() {
|
||||
|
||||
void Dsmr::drain_rx_buffer_() {
|
||||
uint8_t buf[64];
|
||||
int avail;
|
||||
size_t avail;
|
||||
while ((avail = this->available()) > 0) {
|
||||
if (!this->read_array(buf, std::min(static_cast<size_t>(avail), sizeof(buf)))) {
|
||||
if (!this->read_array(buf, std::min(avail, sizeof(buf)))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -140,9 +140,9 @@ void Dsmr::receive_telegram_() {
|
||||
while (this->available_within_timeout_()) {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
uint8_t buf[64];
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read))
|
||||
return;
|
||||
avail -= to_read;
|
||||
@@ -206,9 +206,9 @@ void Dsmr::receive_encrypted_telegram_() {
|
||||
while (this->available_within_timeout_()) {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
uint8_t buf[64];
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read))
|
||||
return;
|
||||
avail -= to_read;
|
||||
|
||||
@@ -276,10 +276,10 @@ void LD2410Component::restart_and_read_all_info() {
|
||||
|
||||
void LD2410Component::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[MAX_LINE_LENGTH];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -311,10 +311,10 @@ void LD2412Component::restart_and_read_all_info() {
|
||||
|
||||
void LD2412Component::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[MAX_LINE_LENGTH];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -277,10 +277,10 @@ void LD2450Component::dump_config() {
|
||||
|
||||
void LD2450Component::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[MAX_LINE_LENGTH];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -20,10 +20,10 @@ void Modbus::loop() {
|
||||
const uint32_t now = App.get_loop_component_start_time();
|
||||
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -398,10 +398,10 @@ bool Nextion::remove_from_q_(bool report_empty) {
|
||||
|
||||
void Nextion::process_serial_() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -14,9 +14,9 @@ void Pipsolar::setup() {
|
||||
|
||||
void Pipsolar::empty_uart_buffer_() {
|
||||
uint8_t buf[64];
|
||||
int avail;
|
||||
size_t avail;
|
||||
while ((avail = this->available()) > 0) {
|
||||
if (!this->read_array(buf, std::min(static_cast<size_t>(avail), sizeof(buf)))) {
|
||||
if (!this->read_array(buf, std::min(avail, sizeof(buf)))) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -97,10 +97,10 @@ void Pipsolar::loop() {
|
||||
}
|
||||
|
||||
if (this->state_ == STATE_COMMAND || this->state_ == STATE_POLL) {
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
while (avail > 0) {
|
||||
uint8_t buf[64];
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -56,14 +56,14 @@ void PylontechComponent::setup() {
|
||||
void PylontechComponent::update() { this->write_str("pwr\n"); }
|
||||
|
||||
void PylontechComponent::loop() {
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
if (avail > 0) {
|
||||
// pylontech sends a lot of data very suddenly
|
||||
// we need to quickly put it all into our own buffer, otherwise the uart's buffer will overflow
|
||||
int recv = 0;
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -82,10 +82,10 @@ void RD03DComponent::dump_config() {
|
||||
|
||||
void RD03DComponent::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -136,10 +136,10 @@ void RFBridgeComponent::loop() {
|
||||
this->last_bridge_byte_ = now;
|
||||
}
|
||||
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
while (avail > 0) {
|
||||
uint8_t buf[64];
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -107,10 +107,10 @@ void MR24HPC1Component::update_() {
|
||||
// main loop
|
||||
void MR24HPC1Component::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -31,10 +31,10 @@ void MR60BHA2Component::dump_config() {
|
||||
// main loop
|
||||
void MR60BHA2Component::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -50,10 +50,10 @@ void MR60FDA2Component::setup() {
|
||||
// main loop
|
||||
void MR60FDA2Component::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -32,10 +32,10 @@ void Tuya::setup() {
|
||||
|
||||
void Tuya::loop() {
|
||||
// Read all available bytes in batches to reduce UART call overhead.
|
||||
int avail = this->available();
|
||||
size_t avail = this->available();
|
||||
uint8_t buf[64];
|
||||
while (avail > 0) {
|
||||
size_t to_read = std::min(static_cast<size_t>(avail), sizeof(buf));
|
||||
size_t to_read = std::min(avail, sizeof(buf));
|
||||
if (!this->read_array(buf, to_read)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@ class ComponentIterator {
|
||||
public:
|
||||
void begin(bool include_internal = false);
|
||||
void advance();
|
||||
bool completed() const { return this->state_ == IteratorState::NONE; }
|
||||
virtual bool on_begin();
|
||||
#ifdef USE_BINARY_SENSOR
|
||||
virtual bool on_binary_sensor(binary_sensor::BinarySensor *binary_sensor) = 0;
|
||||
|
||||
Reference in New Issue
Block a user