From c2aee1a485394cab07198a276f2b444e1cb06168 Mon Sep 17 00:00:00 2001 From: Hubert Badocha Date: Tue, 25 Jul 2023 16:16:37 +0200 Subject: [PATCH] stm32: change names in libdma to camelCase JIRA: DEND-42 --- .../libmulti/include/libmulti/libdma.h | 14 +- multi/stm32l4-multi/libmulti/libdma.c | 131 +++++++++--------- multi/stm32l4-multi/libmulti/libuart.c | 4 +- 3 files changed, 74 insertions(+), 75 deletions(-) diff --git a/multi/stm32l4-multi/libmulti/include/libmulti/libdma.h b/multi/stm32l4-multi/libmulti/include/libmulti/libdma.h index fabdcc10c..d15f59f0d 100644 --- a/multi/stm32l4-multi/libmulti/include/libmulti/libdma.h +++ b/multi/stm32l4-multi/libmulti/include/libmulti/libdma.h @@ -34,7 +34,7 @@ enum { dma_ht = 0, dma_tc }; enum {dma_priorityLow = 0, dma_priorityMedium, dma_priorityHigh, dma_priorityVeryHigh }; -enum { dma_mode_normal = 0, dma_mode_noblock }; +enum { dma_modeNormal = 0, dma_modeNoBlock }; /* clang-format on */ @@ -48,28 +48,28 @@ int libdma_configurePeripheral(const struct libdma_per *per, int dir, int priori /* SYNC FUNCTIONS */ -int libdma_transfer(const struct libdma_per *per, void *rx_maddr, const void *tx_maddr, size_t len); +int libdma_transfer(const struct libdma_per *per, void *rxMAddr, const void *txMAddr, size_t len); -int libdma_tx(const struct libdma_per *per, const void *tx_maddr, size_t len, int mode, time_t timeout); +int libdma_tx(const struct libdma_per *per, const void *txMAddr, size_t len, int mode, time_t timeout); -int libdma_rx(const struct libdma_per *per, void *rx_maddr, size_t len, int mode, time_t timeout); +int libdma_rx(const struct libdma_per *per, void *rxMAddr, size_t len, int mode, time_t timeout); /* ASYNC FUNCTIONS */ /* Only one tx request per channel may be issued at a time. */ -int libdma_txAsync(const struct libdma_per *per, const void *tx_maddr, size_t len, volatile int *done_flag); +int libdma_txAsync(const struct libdma_per *per, const void *txMAddr, size_t len, volatile int *doneFlag); /* Only one rx request per channel may be issued at a time. */ -int libdma_rxAsync(const struct libdma_per *per, void *rx_maddr, size_t len, volatile int *done_flag); +int libdma_rxAsync(const struct libdma_per *per, void *rxMAddr, size_t len, volatile int *doneFlag); /* Receive infinite rx into circular buffer. Fn is called at each Transfer Complete and Half Transfer interrupt. */ -int libdma_infiniteRxAsync(const struct libdma_per *per, void *rx_maddr, size_t len, void fn(void *arg, int type), void *arg); +int libdma_infiniteRxAsync(const struct libdma_per *per, void *rxMAddr, size_t len, void fn(void *arg, int type), void *arg); /* UTILITY FUNCTIONS */ diff --git a/multi/stm32l4-multi/libmulti/libdma.c b/multi/stm32l4-multi/libmulti/libdma.c index 96dce5fca..5bd2aaebd 100644 --- a/multi/stm32l4-multi/libmulti/libdma.c +++ b/multi/stm32l4-multi/libmulti/libdma.c @@ -74,7 +74,7 @@ static const struct libdma_per libdma_persUart[] = { static const struct { uintptr_t base; - int irq_base; + int irqBase; } dmainfo[2] = { { 0x40020000, 11 }, { 0x40020400, 56 } }; @@ -90,28 +90,28 @@ static struct { void *arg; } inf; struct { - volatile int *done_flag; - volatile unsigned int *channel_base; + volatile int *doneFlag; + volatile unsigned int *channelBase; } once; }; } dma_transfers[2][8]; -static void libdma_prepareTransfer(volatile unsigned int *channel_base, void *maddr, size_t len, int flags) +static void libdma_prepareTransfer(volatile unsigned int *channelBase, void *maddr, size_t len, int flags) { - *(channel_base + cmar) = (unsigned int)maddr; - *(channel_base + cndtr) = len; + *(channelBase + cmar) = (unsigned int)maddr; + *(channelBase + cndtr) = len; dataBarier(); - *(channel_base + ccr) |= flags | 0x1; + *(channelBase + ccr) |= flags | 0x1; dataBarier(); } -static void libdma_unprepareTransfer(volatile unsigned int *channel_base) +static void libdma_unprepareTransfer(volatile unsigned int *channelBase) { dataBarier(); /* Disable interrupts, disable channel */ - *(channel_base + ccr) &= ~(DMA_TCIE_FLAG | DMA_HTIE_FLAG | 0x1); + *(channelBase + ccr) &= ~(DMA_TCIE_FLAG | DMA_HTIE_FLAG | 0x1); dataBarier(); } @@ -122,7 +122,7 @@ static unsigned int libdma_irqnum(int dma, int channel) if ((dma == dma2) && (channel >= 5)) { return 16 + 68 - 5 + channel; } - return 16 + dmainfo[dma].irq_base + channel; + return 16 + dmainfo[dma].irqBase + channel; } @@ -132,7 +132,7 @@ static int libdma_irqnumToChannel(int dma, int irqnum) if ((dma == dma2) && ((irqnum == 16 + 68) || (irqnum <= 16 + 68 + 1))) { return irqnum - (16 + 68 - 5); } - return irqnum - dmainfo[dma].irq_base - 16; + return irqnum - dmainfo[dma].irqBase - 16; } @@ -151,9 +151,9 @@ static int libdma_irqHandler(unsigned int n, void *arg) switch (dma_transfers[dma][channel].type) { case dma_transferOnce: - libdma_unprepareTransfer(dma_transfers[dma][channel].once.channel_base); + libdma_unprepareTransfer(dma_transfers[dma][channel].once.channelBase); - *dma_transfers[dma][channel].once.done_flag = 1; + *dma_transfers[dma][channel].once.doneFlag = 1; dma_transfers[dma][channel].type = dma_transferNull; break; @@ -178,14 +178,14 @@ static int libdma_irqHandler(unsigned int n, void *arg) static void libdma_configureChannel(int dma, int channel, int dir, int priority, void *paddr, int msize, int psize, int minc, int pinc, unsigned char reqmap, handle_t *cond) { unsigned int tmp, irqnum = libdma_irqnum(dma, channel); - volatile unsigned int *channel_base = dma_common[dma].base + 2 + (5 * channel); + volatile unsigned int *channelBase = dma_common[dma].base + 2 + (5 * channel); handle_t interruptCond = (cond == NULL) ? dma_common[dma].channels[channel].cond : *cond; interrupt(irqnum, libdma_irqHandler, (void *)dma, interruptCond, NULL); - *(channel_base + ccr) = ((priority & 0x3) << 12) | ((msize & 0x3) << 10) | ((psize & 0x3) << 8) | + *(channelBase + ccr) = ((priority & 0x3) << 12) | ((msize & 0x3) << 10) | ((psize & 0x3) << 8) | ((minc & 0x1) << 7) | ((pinc & 0x1) << 6) | ((dir & 0x1) << 4); - *(channel_base + cpar) = (unsigned int)paddr; + *(channelBase + cpar) = (unsigned int)paddr; tmp = *(dma_common[dma].base + cselr) & ~(0xF << channel * 4); *(dma_common[dma].base + cselr) = tmp | ((unsigned int)reqmap << channel * 4); } @@ -202,102 +202,101 @@ int libdma_configurePeripheral(const struct libdma_per *per, int dir, int priori } -static int libdma_hasChannelFinished(volatile unsigned int *channel_base) +static int libdma_hasChannelFinished(volatile unsigned int *channelBase) { - return (*(channel_base + cndtr) == 0) ? 1 : 0; + return (*(channelBase + cndtr) == 0) ? 1 : 0; } -static int libdma_transferHelper(int dma, int rx_channel, int tx_channel, void *rx_maddr, const void *tx_maddr, size_t len) +static int libdma_transferHelper(int dma, int rxChannel, int txChannel, void *rxMAddr, const void *txMAddr, size_t len) { /* Empirically chosen value to avoid mutex+cond overhead for short transactions */ - int use_interrupts = len > 24; - int interrupts_flags = (use_interrupts == 0) ? 0 : DMA_TCIE_FLAG; - volatile int rx_done = 0; + int useInterrupts = len > 24; + int interruptsFlags = (useInterrupts == 0) ? 0 : DMA_TCIE_FLAG; + volatile int rxDone = 0; - volatile unsigned int *rx_channel_base = dma_common[dma].base + 2 + (5 * rx_channel); - volatile unsigned int *tx_channel_base = dma_common[dma].base + 2 + (5 * tx_channel); + volatile unsigned int *rxChannelBase = dma_common[dma].base + 2 + (5 * rxChannel); + volatile unsigned int *txChannelBase = dma_common[dma].base + 2 + (5 * txChannel); /* Only one request may be issued on one channel at one time. */ - if ((dma_transfers[dma][rx_channel].type != dma_transferNull) || - (dma_transfers[dma][tx_channel].type != dma_transferNull)) { + if ((dma_transfers[dma][rxChannel].type != dma_transferNull) || + (dma_transfers[dma][txChannel].type != dma_transferNull)) { return -EINVAL; } - dma_transfers[dma][rx_channel].type = dma_transferOnce; - dma_transfers[dma][rx_channel].once.done_flag = &rx_done; - dma_transfers[dma][rx_channel].once.channel_base = rx_channel_base; - libdma_prepareTransfer(rx_channel_base, rx_maddr, len, interrupts_flags); + dma_transfers[dma][rxChannel].type = dma_transferOnce; + dma_transfers[dma][rxChannel].once.doneFlag = &rxDone; + dma_transfers[dma][rxChannel].once.channelBase = rxChannelBase; + libdma_prepareTransfer(rxChannelBase, rxMAddr, len, interruptsFlags); - dma_transfers[dma][tx_channel].type = dma_transferOnce; + dma_transfers[dma][txChannel].type = dma_transferOnce; /* When doing rw transfer, avoid unnecessary interrupt handling and condSignal() by waiting only for RX transfer completion, ignoring TX */ - libdma_prepareTransfer(tx_channel_base, (void *)tx_maddr, len, 0); + libdma_prepareTransfer(txChannelBase, (void *)txMAddr, len, 0); - if (use_interrupts != 0) { + if (useInterrupts != 0) { mutexLock(dma_common[dma].channels[rx_channel].irqLock); - while ((rx_done == 0) || (libdma_hasChannelFinished(tx_channel_base) == 0)) { - condWait(dma_common[dma].channels[rx_channel].cond, dma_common[dma].channels[rx_channel].irqLock, DMA_TIMEOUT_1S); + while ((rxDone == 0) || (libdma_hasChannelFinished(txChannelBase) == 0)) { + condWait(dma_common[dma].cond, dma_common[dma].channels[rx_channel].irqLock, DMA_TIMEOUT_1S); } mutexUnlock(dma_common[dma].channels[rx_channel].irqLock); } else { - while ((libdma_hasChannelFinished(rx_channel_base) == 0) || (libdma_hasChannelFinished(tx_channel_base) == 0)) { + while ((libdma_hasChannelFinished(rxChannelBase) == 0) || (libdma_hasChannelFinished(txChannelBase) == 0)) { ; } - libdma_unprepareTransfer(rx_channel_base); - dma_transfers[dma][rx_channel].type = dma_transferNull; + libdma_unprepareTransfer(rxChannelBase); + dma_transfers[dma][rxChannel].type = dma_transferNull; } - libdma_unprepareTransfer(tx_channel_base); - dma_transfers[dma][tx_channel].type = dma_transferNull; - + libdma_unprepareTransfer(txChannelBase); + dma_transfers[dma][txChannel].type = dma_transferNull; return len; } -static int libdma_transferAsync(const struct libdma_per *per, void *maddr, int dir, size_t len, volatile int *done_flag) +static int libdma_transferAsync(const struct libdma_per *per, void *maddr, int dir, size_t len, volatile int *doneFlag) { int dma = per->dma; int channel = per->channel[dir]; - volatile unsigned int *channel_base = dma_common[dma].base + 2 + (5 * channel); + volatile unsigned int *channelBase = dma_common[dma].base + 2 + (5 * channel); /* Only one request may be issued on one channel at one time. */ if ((dma_transfers[dma][channel].type != dma_transferNull) || (DMA_MAX_LEN < len)) { return -EINVAL; } - *done_flag = 0; + *doneFlag = 0; dma_transfers[dma][channel].type = dma_transferOnce; - dma_transfers[dma][channel].once.done_flag = done_flag; - dma_transfers[dma][channel].once.channel_base = channel_base; + dma_transfers[dma][channel].once.doneFlag = doneFlag; + dma_transfers[dma][channel].once.channelBase = channelBase; - libdma_prepareTransfer(channel_base, maddr, len, DMA_TCIE_FLAG); + libdma_prepareTransfer(channelBase, maddr, len, DMA_TCIE_FLAG); return 0; } -int libdma_txAsync(const struct libdma_per *per, const void *tx_maddr, size_t len, volatile int *done_flag) +int libdma_txAsync(const struct libdma_per *per, const void *txMAddr, size_t len, volatile int *doneFlag) { - return libdma_transferAsync(per, (void *)tx_maddr, dma_mem2per, len, done_flag); + return libdma_transferAsync(per, (void *)txMAddr, dma_mem2per, len, doneFlag); } -int libdma_rxAsync(const struct libdma_per *per, void *rx_maddr, size_t len, volatile int *done_flag) +int libdma_rxAsync(const struct libdma_per *per, void *rxMAddr, size_t len, volatile int *doneFlag) { - return libdma_transferAsync(per, rx_maddr, dma_per2mem, len, done_flag); + return libdma_transferAsync(per, rxMAddr, dma_per2mem, len, doneFlag); } -int libdma_infiniteRxAsync(const struct libdma_per *per, void *rx_maddr, size_t len, void fn(void *arg, int type), void *arg) +int libdma_infiniteRxAsync(const struct libdma_per *per, void *rxMAddr, size_t len, void fn(void *arg, int type), void *arg) { int dma = per->dma; int channel = per->channel[dma_per2mem]; - volatile unsigned int *channel_base = dma_common[dma].base + 2 + (5 * channel); + volatile unsigned int *channelBase = dma_common[dma].base + 2 + (5 * channel); /* Only one request may be issued on one channel at one time. */ if ((dma_transfers[dma][channel].type != dma_transferNull) || (DMA_MAX_LEN < len)) { @@ -308,35 +307,35 @@ int libdma_infiniteRxAsync(const struct libdma_per *per, void *rx_maddr, size_t dma_transfers[dma][channel].inf.fn = fn; dma_transfers[dma][channel].inf.arg = arg; - libdma_prepareTransfer(channel_base, rx_maddr, len, DMA_TCIE_FLAG | DMA_HTIE_FLAG | DMA_CIRCULAR_FLAG); + libdma_prepareTransfer(channelBase, rxMAddr, len, DMA_TCIE_FLAG | DMA_HTIE_FLAG | DMA_CIRCULAR_FLAG); return 0; } -int libdma_transfer(const struct libdma_per *per, void *rx_maddr, const void *tx_maddr, size_t len) +int libdma_transfer(const struct libdma_per *per, void *rxMAddr, const void *txMAddr, size_t len) { int res; - volatile unsigned int *tx_channel_base; + volatile unsigned int *txChannelBase; int dma = per->dma; - int rx_channel = per->channel[dma_per2mem]; - int tx_channel = per->channel[dma_mem2per]; + int rxChannel = per->channel[dma_per2mem]; + int txChannel = per->channel[dma_mem2per]; unsigned char txbuf = 0; if (DMA_MAX_LEN < len) { return -EINVAL; } - if (tx_maddr == NULL) { + if (txMAddr == NULL) { /* In case no tx buffer is provided, use a 1-byte dummy and configure DMA not to increment the memory address. */ - tx_channel_base = dma_common[dma].base + 2 + (5 * tx_channel); - *(tx_channel_base + ccr) &= ~(1 << 7); - res = libdma_transferHelper(dma, rx_channel, tx_channel, rx_maddr, &txbuf, len); - *(tx_channel_base + ccr) |= (1 << 7); + txChannelBase = dma_common[dma].base + 2 + (5 * txChannel); + *(txChannelBase + ccr) &= ~(1 << 7); + res = libdma_transferHelper(dma, rxChannel, txChannel, rxMAddr, &txbuf, len); + *(txChannelBase + ccr) |= (1 << 7); } else { - res = libdma_transferHelper(dma, rx_channel, tx_channel, rx_maddr, tx_maddr, len); + res = libdma_transferHelper(dma, rxChannel, txChannel, rxMAddr, txMAddr, len); } return res; @@ -423,10 +422,10 @@ uint16_t libdma_leftToRx(const struct libdma_per *per) { int dma = per->dma; int channel = per->channel[dma_per2mem]; - volatile unsigned int *channel_base = dma_common[dma].base + 2 + (5 * channel); + volatile unsigned int *channelBase = dma_common[dma].base + 2 + (5 * channel); /* Only bottom 16 bits contain data. */ - return (uint16_t)(*(channel_base + cndtr)); + return (uint16_t)(*(channelBase + cndtr)); } diff --git a/multi/stm32l4-multi/libmulti/libuart.c b/multi/stm32l4-multi/libmulti/libuart.c index 6b17fa149..acd9382a9 100644 --- a/multi/stm32l4-multi/libmulti/libuart.c +++ b/multi/stm32l4-multi/libmulti/libuart.c @@ -242,7 +242,7 @@ static int libuart_dmaWrite(libuart_ctx *ctx, const void *buff, unsigned int buf for (written = 0; written < bufflen; written += writesz) { *(ctx->base + icr) |= (1 << 6); - writesz = libdma_tx(ctx->data.dma.per, ((char *)buff) + written, min(DMA_MAX_LEN, bufflen - written), dma_mode_normal, 0); + writesz = libdma_tx(ctx->data.dma.per, ((char *)buff) + written, min(DMA_MAX_LEN, bufflen - written), dma_modeNormal, 0); if (writesz == 0) { break; } @@ -293,7 +293,7 @@ static int libuart_dmaRead(libuart_ctx *ctx, void *buff, unsigned int count, cha int read, readsz; int timedout = 0; time_t now, end, rxtimeout; - int dmamode = (mode == uart_mnblock) ? dma_mode_noblock : dma_mode_normal; + int dmamode = (mode == uart_mnblock) ? dma_modeNoBlock : dma_modeNormal; mutexLock(ctx->data.dma.rxlock);