nwl_engine.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. #define _BSD_SOURCE
  2. #include <stdio.h>
  3. #include <stdlib.h>
  4. #include <string.h>
  5. #include <unistd.h>
  6. #include <sys/time.h>
  7. #include "pci.h"
  8. #include "pcilib.h"
  9. #include "error.h"
  10. #include "tools.h"
  11. #include "nwl_private.h"
  12. #include "nwl_defines.h"
  13. #include "nwl_engine_buffers.h"
  14. int dma_nwl_read_engine_config(nwl_dma_t *ctx, pcilib_dma_engine_description_t *info, const char *base) {
  15. uint32_t val;
  16. nwl_read_register(val, ctx, base, REG_DMA_ENG_CAP);
  17. if ((val & DMA_ENG_PRESENT_MASK) == 0) return PCILIB_ERROR_NOTAVAILABLE;
  18. info->addr = (val & DMA_ENG_NUMBER) >> DMA_ENG_NUMBER_SHIFT;
  19. if ((info->addr > PCILIB_MAX_DMA_ENGINES)||(info->addr < 0)) return PCILIB_ERROR_INVALID_DATA;
  20. switch (val & DMA_ENG_DIRECTION_MASK) {
  21. case DMA_ENG_C2S:
  22. info->direction = PCILIB_DMA_FROM_DEVICE;
  23. break;
  24. default:
  25. info->direction = PCILIB_DMA_TO_DEVICE;
  26. }
  27. switch (val & DMA_ENG_TYPE_MASK) {
  28. case DMA_ENG_BLOCK:
  29. info->type = PCILIB_DMA_TYPE_BLOCK;
  30. break;
  31. case DMA_ENG_PACKET:
  32. info->type = PCILIB_DMA_TYPE_PACKET;
  33. break;
  34. default:
  35. info->type = PCILIB_DMA_TYPE_UNKNOWN;
  36. }
  37. info->addr_bits = (val & DMA_ENG_BD_MAX_BC) >> DMA_ENG_BD_MAX_BC_SHIFT;
  38. return 0;
  39. }
  40. int dma_nwl_start_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
  41. int err;
  42. uint32_t val;
  43. uint32_t ring_pa;
  44. struct timeval start, cur;
  45. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  46. char *base = ctx->engines[dma].base_addr;
  47. if (ectx->started) return 0;
  48. // This will only successed if there are no parallel access to DMA engine
  49. err = dma_nwl_allocate_engine_buffers(ctx, ectx);
  50. if (err) {
  51. ectx->started = 1;
  52. dma_nwl_stop_engine(ctx, dma);
  53. return err;
  54. }
  55. if (ectx->reused) {
  56. ectx->preserve = 1;
  57. dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
  58. #ifdef NWL_GENERATE_DMA_IRQ
  59. dma_nwl_enable_engine_irq(ctx, dma);
  60. #endif /* NWL_GENERATE_DMA_IRQ */
  61. } else {
  62. // Disable IRQs
  63. err = dma_nwl_disable_engine_irq(ctx, dma);
  64. if (err) {
  65. ectx->started = 1;
  66. dma_nwl_stop_engine(ctx, dma);
  67. return err;
  68. }
  69. // Disable Engine & Reseting
  70. val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET;
  71. nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  72. gettimeofday(&start, NULL);
  73. do {
  74. nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  75. gettimeofday(&cur, NULL);
  76. } while ((val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
  77. if (val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET)) {
  78. pcilib_error("Timeout during reset of DMA engine %i", ectx->desc->addr);
  79. ectx->started = 1;
  80. dma_nwl_stop_engine(ctx, dma);
  81. return PCILIB_ERROR_TIMEOUT;
  82. }
  83. val = DMA_ENG_RESET;
  84. nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  85. gettimeofday(&start, NULL);
  86. do {
  87. nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  88. gettimeofday(&cur, NULL);
  89. } while ((val & DMA_ENG_RESET)&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
  90. if (val & DMA_ENG_RESET) {
  91. pcilib_error("Timeout during reset of DMA engine %i", ectx->desc->addr);
  92. ectx->started = 1;
  93. dma_nwl_stop_engine(ctx, dma);
  94. return PCILIB_ERROR_TIMEOUT;
  95. }
  96. dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
  97. ring_pa = pcilib_kmem_get_pa(ctx->dmactx.pcilib, ectx->ring);
  98. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_DMA_ENG_NEXT_BD);
  99. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_SW_NEXT_BD);
  100. __sync_synchronize();
  101. nwl_read_register(val, ctx, ectx->base_addr, REG_DMA_ENG_CTRL_STATUS);
  102. val |= (DMA_ENG_ENABLE);
  103. nwl_write_register(val, ctx, ectx->base_addr, REG_DMA_ENG_CTRL_STATUS);
  104. __sync_synchronize();
  105. #ifdef NWL_GENERATE_DMA_IRQ
  106. dma_nwl_enable_engine_irq(ctx, dma);
  107. #endif /* NWL_GENERATE_DMA_IRQ */
  108. if (ectx->desc->direction == PCILIB_DMA_FROM_DEVICE) {
  109. ring_pa += (ectx->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  110. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_SW_NEXT_BD);
  111. ectx->tail = 0;
  112. ectx->head = (ectx->ring_size - 1);
  113. } else {
  114. ectx->tail = 0;
  115. ectx->head = 0;
  116. }
  117. }
  118. ectx->started = 1;
  119. return 0;
  120. }
  121. int dma_nwl_stop_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
  122. int err;
  123. uint32_t val;
  124. uint32_t ring_pa;
  125. struct timeval start, cur;
  126. pcilib_kmem_flags_t flags;
  127. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  128. char *base = ctx->engines[dma].base_addr;
  129. if (!ectx->started) return 0;
  130. ectx->started = 0;
  131. err = dma_nwl_disable_engine_irq(ctx, dma);
  132. if (err) return err;
  133. if (!ectx->preserve) {
  134. // Stopping DMA is not enough reset is required
  135. val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET|DMA_ENG_RESET;
  136. nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  137. gettimeofday(&start, NULL);
  138. do {
  139. nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  140. gettimeofday(&cur, NULL);
  141. } while ((val & (DMA_ENG_RUNNING))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_REGISTER_TIMEOUT));
  142. if (ectx->ring) {
  143. ring_pa = pcilib_kmem_get_pa(ctx->dmactx.pcilib, ectx->ring);
  144. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_DMA_ENG_NEXT_BD);
  145. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_SW_NEXT_BD);
  146. }
  147. }
  148. dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
  149. if (ectx->preserve) {
  150. flags = PCILIB_KMEM_FLAG_REUSE;
  151. } else {
  152. flags = PCILIB_KMEM_FLAG_HARDWARE|PCILIB_KMEM_FLAG_PERSISTENT;
  153. }
  154. // Clean buffers
  155. if (ectx->ring) {
  156. pcilib_free_kernel_memory(ctx->dmactx.pcilib, ectx->ring, flags);
  157. ectx->ring = NULL;
  158. }
  159. if (ectx->pages) {
  160. pcilib_free_kernel_memory(ctx->dmactx.pcilib, ectx->pages, flags);
  161. ectx->pages = NULL;
  162. }
  163. return 0;
  164. }
  165. int dma_nwl_write_fragment(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, void *data, size_t *written) {
  166. int err;
  167. size_t pos;
  168. size_t bufnum;
  169. nwl_dma_t *ctx = (nwl_dma_t*)vctx;
  170. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  171. err = dma_nwl_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
  172. if (err) return err;
  173. if (data) {
  174. for (pos = 0; pos < size; pos += ectx->page_size) {
  175. int block_size = min2(size - pos, ectx->page_size);
  176. bufnum = dma_nwl_get_next_buffer(ctx, ectx, 1, timeout);
  177. if (bufnum == PCILIB_DMA_BUFFER_INVALID) {
  178. if (written) *written = pos;
  179. return PCILIB_ERROR_TIMEOUT;
  180. }
  181. void *buf = pcilib_kmem_get_block_ua(ctx->dmactx.pcilib, ectx->pages, bufnum);
  182. pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, bufnum);
  183. memcpy(buf, data, block_size);
  184. pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_TODEVICE, bufnum);
  185. err = dma_nwl_push_buffer(ctx, ectx, block_size, (flags&PCILIB_DMA_FLAG_EOP)&&((pos + block_size) == size), timeout);
  186. if (err) {
  187. if (written) *written = pos;
  188. return err;
  189. }
  190. }
  191. }
  192. if (written) *written = size;
  193. if (flags&PCILIB_DMA_FLAG_WAIT) {
  194. bufnum = dma_nwl_get_next_buffer(ctx, ectx, PCILIB_NWL_DMA_PAGES - 1, timeout);
  195. if (bufnum == PCILIB_DMA_BUFFER_INVALID) return PCILIB_ERROR_TIMEOUT;
  196. }
  197. return 0;
  198. }
  199. int dma_nwl_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr) {
  200. int err, ret = PCILIB_STREAMING_REQ_PACKET;
  201. pcilib_timeout_t wait = 0;
  202. size_t res = 0;
  203. size_t bufnum;
  204. size_t bufsize;
  205. nwl_dma_t *ctx = (nwl_dma_t*)vctx;
  206. int eop;
  207. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  208. err = dma_nwl_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
  209. if (err) return err;
  210. do {
  211. switch (ret&PCILIB_STREAMING_TIMEOUT_MASK) {
  212. case PCILIB_STREAMING_CONTINUE: wait = PCILIB_DMA_TIMEOUT; break;
  213. case PCILIB_STREAMING_WAIT: wait = timeout; break;
  214. // case PCILIB_STREAMING_CHECK: wait = 0; break;
  215. }
  216. bufnum = dma_nwl_wait_buffer(ctx, ectx, &bufsize, &eop, wait);
  217. if (bufnum == PCILIB_DMA_BUFFER_INVALID) {
  218. return (ret&PCILIB_STREAMING_FAIL)?PCILIB_ERROR_TIMEOUT:0;
  219. }
  220. // EOP is not respected in IPE Camera
  221. if (ctx->ignore_eop) eop = 1;
  222. pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, bufnum);
  223. void *buf = pcilib_kmem_get_block_ua(ctx->dmactx.pcilib, ectx->pages, bufnum);
  224. ret = cb(cbattr, (eop?PCILIB_DMA_FLAG_EOP:0), bufsize, buf);
  225. if (ret < 0) return -ret;
  226. // DS: Fixme, it looks like we can avoid calling this for the sake of performance
  227. // pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_TODEVICE, bufnum);
  228. dma_nwl_return_buffer(ctx, ectx);
  229. res += bufsize;
  230. } while (ret);
  231. return 0;
  232. }
  233. int dma_nwl_wait_completion(nwl_dma_t * ctx, pcilib_dma_engine_t dma, pcilib_timeout_t timeout) {
  234. if (dma_nwl_get_next_buffer(ctx, ctx->engines + dma, PCILIB_NWL_DMA_PAGES - 1, PCILIB_DMA_TIMEOUT) == (PCILIB_NWL_DMA_PAGES - 1)) return 0;
  235. else return PCILIB_ERROR_TIMEOUT;
  236. }