diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 1c4aaac2013a..4f575affe919 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -37,6 +37,24 @@ config SPI_RTIO This option enables the RTIO API calls. RTIO support is experimental as the API itself is unstable. +if SPI_RTIO + +config SPI_RTIO_FALLBACK_MSGS + int "Number of available spi_buf structs for the default handler to use" + default 4 + help + When RTIO is used with a driver that does not yet implement the submit API + natively the submissions are converted back to struct spi_buf values that + are given to spi_transfer. This requires some number of msgs be available to convert + the submissions into on the stack. MISRA rules dictate we must know this in + advance. + + In all likelihood 4 is going to work for everyone, but in case you do end up with + an issue where you are using RTIO, your driver does not implement submit natively, + and get an error relating to not enough spi msgs this is the Kconfig to manipulate. + +endif # SPI_RTIO + config SPI_SLAVE bool "Slave support [EXPERIMENTAL]" select EXPERIMENTAL diff --git a/drivers/spi/spi_rtio.c b/drivers/spi/spi_rtio.c index 975636a098ab..2526183e78a5 100644 --- a/drivers/spi/spi_rtio.c +++ b/drivers/spi/spi_rtio.c @@ -22,6 +22,7 @@ static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe) { struct spi_dt_spec *dt_spec = iodev_sqe->sqe.iodev->data; const struct device *dev = dt_spec->bus; + uint8_t num_msgs = 0; int err = 0; LOG_DBG("Sync RTIO work item for: %p", (void *)dev); @@ -33,67 +34,103 @@ static void spi_rtio_iodev_default_submit_sync(struct rtio_iodev_sqe *iodev_sqe) struct rtio_iodev_sqe *txn_head = iodev_sqe; struct rtio_iodev_sqe *txn_curr = iodev_sqe; + /* We allocate the spi_buf's on the stack, to do so + * the count of messages needs to be determined to + * ensure we don't go over the statically sized array. + */ do { - struct rtio_sqe *sqe = &txn_curr->sqe; - struct spi_buf tx_buf = {0}; - struct spi_buf_set tx_buf_set = { - .buffers = &tx_buf, - }; + switch (txn_curr->sqe.op) { + case RTIO_OP_RX: + case RTIO_OP_TX: + case RTIO_OP_TINY_TX: + case RTIO_OP_TXRX: + num_msgs++; + break; + default: + LOG_ERR("Invalid op code %d for submission %p", txn_curr->sqe.op, + (void *)&txn_curr->sqe); + err = -EIO; + break; + } + txn_curr = rtio_txn_next(txn_curr); + } while (err == 0 && txn_curr != NULL); - struct spi_buf rx_buf = {0}; - struct spi_buf_set rx_buf_set = { - .buffers = &rx_buf, - }; + if (err != 0) { + rtio_iodev_sqe_err(txn_head, err); + return; + } + + /* Allocate msgs on the stack, MISRA doesn't like VLAs so we need a statically + * sized array here. It's pretty unlikely we have more than 4 spi messages + * in a transaction as we typically would only have 2, one to write a + * register address, and another to read/write the register into an array + */ + if (num_msgs > CONFIG_SPI_RTIO_FALLBACK_MSGS) { + LOG_ERR("At most CONFIG_SPI_RTIO_FALLBACK_MSGS" + " submissions in a transaction are" + " allowed in the default handler"); + rtio_iodev_sqe_err(txn_head, -ENOMEM); + return; + } + + struct spi_buf tx_bufs[CONFIG_SPI_RTIO_FALLBACK_MSGS]; + struct spi_buf rx_bufs[CONFIG_SPI_RTIO_FALLBACK_MSGS]; + struct spi_buf_set tx_buf_set = { + .buffers = tx_bufs, + .count = num_msgs, + }; + struct spi_buf_set rx_buf_set = { + .buffers = rx_bufs, + .count = num_msgs, + }; - LOG_DBG("Preparing transfer: %p", txn_curr); + txn_curr = txn_head; + + for (size_t i = 0 ; i < num_msgs ; i++) { + struct rtio_sqe *sqe = &txn_curr->sqe; switch (sqe->op) { case RTIO_OP_RX: - rx_buf.buf = sqe->rx.buf; - rx_buf.len = sqe->rx.buf_len; - rx_buf_set.count = 1; + rx_bufs[i].buf = sqe->rx.buf; + rx_bufs[i].len = sqe->rx.buf_len; + tx_bufs[i].buf = NULL; + tx_bufs[i].len = sqe->rx.buf_len; break; case RTIO_OP_TX: - tx_buf.buf = (uint8_t *)sqe->tx.buf; - tx_buf.len = sqe->tx.buf_len; - tx_buf_set.count = 1; + rx_bufs[i].buf = NULL; + rx_bufs[i].len = sqe->tx.buf_len; + tx_bufs[i].buf = (uint8_t *)sqe->tx.buf; + tx_bufs[i].len = sqe->tx.buf_len; break; case RTIO_OP_TINY_TX: - tx_buf.buf = (uint8_t *)sqe->tiny_tx.buf; - tx_buf.len = sqe->tiny_tx.buf_len; - tx_buf_set.count = 1; + rx_bufs[i].buf = NULL; + rx_bufs[i].len = sqe->tiny_tx.buf_len; + tx_bufs[i].buf = (uint8_t *)sqe->tiny_tx.buf; + tx_bufs[i].len = sqe->tiny_tx.buf_len; break; case RTIO_OP_TXRX: - rx_buf.buf = sqe->txrx.rx_buf; - rx_buf.len = sqe->txrx.buf_len; - tx_buf.buf = (uint8_t *)sqe->txrx.tx_buf; - tx_buf.len = sqe->txrx.buf_len; - rx_buf_set.count = 1; - tx_buf_set.count = 1; + rx_bufs[i].buf = sqe->txrx.rx_buf; + rx_bufs[i].len = sqe->txrx.buf_len; + tx_bufs[i].buf = (uint8_t *)sqe->txrx.tx_buf; + tx_bufs[i].len = sqe->txrx.buf_len; break; default: - LOG_ERR("Invalid op code %d for submission %p\n", sqe->op, (void *)sqe); err = -EIO; break; } - if (!err) { - struct spi_buf_set *tx_buf_ptr = tx_buf_set.count > 0 ? &tx_buf_set : NULL; - struct spi_buf_set *rx_buf_ptr = rx_buf_set.count > 0 ? &rx_buf_set : NULL; - - err = spi_transceive_dt(dt_spec, tx_buf_ptr, rx_buf_ptr); + txn_curr = rtio_txn_next(txn_curr); + } - /* NULL if this submission is not a transaction */ - txn_curr = rtio_txn_next(txn_curr); - } - } while (err >= 0 && txn_curr != NULL); + if (err == 0) { + __ASSERT_NO_MSG(num_msgs > 0); + err = spi_transceive_dt(dt_spec, &tx_buf_set, &rx_buf_set); + } - if (err < 0) { - LOG_ERR("Transfer failed: %d", err); + if (err != 0) { rtio_iodev_sqe_err(txn_head, err); } else { - LOG_DBG("Transfer OK: %d", err); - rtio_iodev_sqe_ok(txn_head, err); + rtio_iodev_sqe_ok(txn_head, 0); } }