nwl_engine.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. #define _BSD_SOURCE
  2. #define _DEFAULT_SOURCE
  3. #include <stdio.h>
  4. #include <stdlib.h>
  5. #include <string.h>
  6. #include <unistd.h>
  7. #include <sys/time.h>
  8. #include "pci.h"
  9. #include "pcilib.h"
  10. #include "error.h"
  11. #include "tools.h"
  12. #include "debug.h"
  13. #include "nwl_private.h"
  14. #include "nwl_defines.h"
  15. #include "nwl_engine_buffers.h"
  16. int dma_nwl_read_engine_config(nwl_dma_t *ctx, pcilib_dma_engine_description_t *info, const char *base) {
  17. uint32_t val;
  18. nwl_read_register(val, ctx, base, REG_DMA_ENG_CAP);
  19. if ((val & DMA_ENG_PRESENT_MASK) == 0) return PCILIB_ERROR_NOTAVAILABLE;
  20. info->addr = (val & DMA_ENG_NUMBER) >> DMA_ENG_NUMBER_SHIFT;
  21. if ((info->addr > PCILIB_MAX_DMA_ENGINES)||(info->addr < 0)) return PCILIB_ERROR_INVALID_DATA;
  22. switch (val & DMA_ENG_DIRECTION_MASK) {
  23. case DMA_ENG_C2S:
  24. info->direction = PCILIB_DMA_FROM_DEVICE;
  25. break;
  26. default:
  27. info->direction = PCILIB_DMA_TO_DEVICE;
  28. }
  29. switch (val & DMA_ENG_TYPE_MASK) {
  30. case DMA_ENG_BLOCK:
  31. info->type = PCILIB_DMA_TYPE_BLOCK;
  32. break;
  33. case DMA_ENG_PACKET:
  34. info->type = PCILIB_DMA_TYPE_PACKET;
  35. break;
  36. default:
  37. info->type = PCILIB_DMA_TYPE_UNKNOWN;
  38. }
  39. info->addr_bits = (val & DMA_ENG_BD_MAX_BC) >> DMA_ENG_BD_MAX_BC_SHIFT;
  40. return 0;
  41. }
  42. int dma_nwl_start_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
  43. int err;
  44. uint32_t val;
  45. uint32_t ring_pa;
  46. struct timeval start, cur;
  47. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  48. char *base = ctx->engines[dma].base_addr;
  49. if (ectx->started) return 0;
  50. // This will only successed if there are no parallel access to DMA engine
  51. err = dma_nwl_allocate_engine_buffers(ctx, ectx);
  52. if (err) {
  53. ectx->started = 1;
  54. dma_nwl_stop_engine(ctx, dma);
  55. return err;
  56. }
  57. if (ectx->reused) {
  58. ectx->preserve = 1;
  59. dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
  60. #ifdef NWL_GENERATE_DMA_IRQ
  61. dma_nwl_enable_engine_irq(ctx, dma);
  62. #endif /* NWL_GENERATE_DMA_IRQ */
  63. } else {
  64. // Disable IRQs
  65. err = dma_nwl_disable_engine_irq(ctx, dma);
  66. if (err) {
  67. ectx->started = 1;
  68. dma_nwl_stop_engine(ctx, dma);
  69. return err;
  70. }
  71. // Disable Engine & Reseting
  72. val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET;
  73. nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  74. gettimeofday(&start, NULL);
  75. do {
  76. nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  77. gettimeofday(&cur, NULL);
  78. } while ((val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_NWL_REGISTER_TIMEOUT));
  79. if (val & (DMA_ENG_STATE_MASK|DMA_ENG_USER_RESET)) {
  80. pcilib_error("Timeout during reset of DMA engine %i", ectx->desc->addr);
  81. ectx->started = 1;
  82. dma_nwl_stop_engine(ctx, dma);
  83. return PCILIB_ERROR_TIMEOUT;
  84. }
  85. val = DMA_ENG_RESET;
  86. nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  87. gettimeofday(&start, NULL);
  88. do {
  89. nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  90. gettimeofday(&cur, NULL);
  91. } while ((val & DMA_ENG_RESET)&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_NWL_REGISTER_TIMEOUT));
  92. if (val & DMA_ENG_RESET) {
  93. pcilib_error("Timeout during reset of DMA engine %i", ectx->desc->addr);
  94. ectx->started = 1;
  95. dma_nwl_stop_engine(ctx, dma);
  96. return PCILIB_ERROR_TIMEOUT;
  97. }
  98. dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
  99. ring_pa = pcilib_kmem_get_ba(ctx->dmactx.pcilib, ectx->ring);
  100. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_DMA_ENG_NEXT_BD);
  101. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_SW_NEXT_BD);
  102. __sync_synchronize();
  103. nwl_read_register(val, ctx, ectx->base_addr, REG_DMA_ENG_CTRL_STATUS);
  104. val |= (DMA_ENG_ENABLE);
  105. nwl_write_register(val, ctx, ectx->base_addr, REG_DMA_ENG_CTRL_STATUS);
  106. __sync_synchronize();
  107. #ifdef NWL_GENERATE_DMA_IRQ
  108. dma_nwl_enable_engine_irq(ctx, dma);
  109. #endif /* NWL_GENERATE_DMA_IRQ */
  110. if (ectx->desc->direction == PCILIB_DMA_FROM_DEVICE) {
  111. ring_pa += (ectx->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  112. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_SW_NEXT_BD);
  113. ectx->tail = 0;
  114. ectx->head = (ectx->ring_size - 1);
  115. } else {
  116. ectx->tail = 0;
  117. ectx->head = 0;
  118. }
  119. }
  120. ectx->started = 1;
  121. return 0;
  122. }
  123. int dma_nwl_stop_engine(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
  124. int err;
  125. uint32_t val;
  126. uint32_t ring_pa;
  127. struct timeval start, cur;
  128. pcilib_kmem_flags_t flags;
  129. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  130. char *base = ctx->engines[dma].base_addr;
  131. if (!ectx->started) return 0;
  132. ectx->started = 0;
  133. err = dma_nwl_disable_engine_irq(ctx, dma);
  134. if (err) return err;
  135. if (!ectx->preserve) {
  136. // Stopping DMA is not enough reset is required
  137. val = DMA_ENG_DISABLE|DMA_ENG_USER_RESET|DMA_ENG_RESET;
  138. nwl_write_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  139. gettimeofday(&start, NULL);
  140. do {
  141. nwl_read_register(val, ctx, base, REG_DMA_ENG_CTRL_STATUS);
  142. gettimeofday(&cur, NULL);
  143. } while ((val & (DMA_ENG_RUNNING))&&(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < PCILIB_NWL_REGISTER_TIMEOUT));
  144. if (ectx->ring) {
  145. ring_pa = pcilib_kmem_get_ba(ctx->dmactx.pcilib, ectx->ring);
  146. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_DMA_ENG_NEXT_BD);
  147. nwl_write_register(ring_pa, ctx, ectx->base_addr, REG_SW_NEXT_BD);
  148. }
  149. }
  150. dma_nwl_acknowledge_irq((pcilib_dma_context_t*)ctx, PCILIB_DMA_IRQ, dma);
  151. if (ectx->preserve) {
  152. flags = PCILIB_KMEM_FLAG_REUSE;
  153. } else {
  154. flags = PCILIB_KMEM_FLAG_HARDWARE|PCILIB_KMEM_FLAG_PERSISTENT;
  155. }
  156. // Clean buffers
  157. if (ectx->ring) {
  158. pcilib_free_kernel_memory(ctx->dmactx.pcilib, ectx->ring, flags);
  159. ectx->ring = NULL;
  160. }
  161. if (ectx->pages) {
  162. pcilib_free_kernel_memory(ctx->dmactx.pcilib, ectx->pages, flags);
  163. ectx->pages = NULL;
  164. }
  165. return 0;
  166. }
  167. int dma_nwl_write_fragment(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, void *data, size_t *written) {
  168. int err;
  169. size_t pos;
  170. size_t bufnum;
  171. nwl_dma_t *ctx = (nwl_dma_t*)vctx;
  172. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  173. err = dma_nwl_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
  174. if (err) return err;
  175. if (data) {
  176. for (pos = 0; pos < size; pos += ectx->page_size) {
  177. int block_size = min2(size - pos, ectx->page_size);
  178. bufnum = dma_nwl_get_next_buffer(ctx, ectx, 1, timeout);
  179. if (bufnum == PCILIB_DMA_BUFFER_INVALID) {
  180. if (written) *written = pos;
  181. return PCILIB_ERROR_TIMEOUT;
  182. }
  183. void *buf = (void*)pcilib_kmem_get_block_ua(ctx->dmactx.pcilib, ectx->pages, bufnum);
  184. pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, bufnum);
  185. memcpy(buf, data, block_size);
  186. pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_TODEVICE, bufnum);
  187. err = dma_nwl_push_buffer(ctx, ectx, block_size, (flags&PCILIB_DMA_FLAG_EOP)&&((pos + block_size) == size), timeout);
  188. if (err) {
  189. if (written) *written = pos;
  190. return err;
  191. }
  192. }
  193. }
  194. if (written) *written = size;
  195. if (flags&PCILIB_DMA_FLAG_WAIT) {
  196. bufnum = dma_nwl_get_next_buffer(ctx, ectx, PCILIB_NWL_DMA_PAGES - 1, timeout);
  197. if (bufnum == PCILIB_DMA_BUFFER_INVALID) return PCILIB_ERROR_TIMEOUT;
  198. }
  199. return 0;
  200. }
  201. int dma_nwl_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr) {
  202. int err, ret = PCILIB_STREAMING_REQ_PACKET;
  203. pcilib_timeout_t wait = 0;
  204. size_t res = 0;
  205. size_t bufnum;
  206. size_t bufsize;
  207. nwl_dma_t *ctx = (nwl_dma_t*)vctx;
  208. int eop;
  209. pcilib_nwl_engine_context_t *ectx = ctx->engines + dma;
  210. err = dma_nwl_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
  211. if (err) return err;
  212. do {
  213. switch (ret&PCILIB_STREAMING_TIMEOUT_MASK) {
  214. case PCILIB_STREAMING_CONTINUE: wait = PCILIB_DMA_TIMEOUT; break;
  215. case PCILIB_STREAMING_WAIT: wait = timeout; break;
  216. // case PCILIB_STREAMING_CHECK: wait = 0; break;
  217. }
  218. bufnum = dma_nwl_wait_buffer(ctx, ectx, &bufsize, &eop, wait);
  219. if (bufnum == PCILIB_DMA_BUFFER_INVALID) {
  220. return (ret&PCILIB_STREAMING_FAIL)?PCILIB_ERROR_TIMEOUT:0;
  221. }
  222. // EOP is not respected in IPE Camera
  223. if (ctx->ignore_eop) eop = 1;
  224. pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, bufnum);
  225. void *buf = (void*)pcilib_kmem_get_block_ua(ctx->dmactx.pcilib, ectx->pages, bufnum);
  226. ret = cb(cbattr, (eop?PCILIB_DMA_FLAG_EOP:0), bufsize, buf);
  227. if (ret < 0) return -ret;
  228. // DS: Fixme, it looks like we can avoid calling this for the sake of performance
  229. // pcilib_kmem_sync_block(ctx->dmactx.pcilib, ectx->pages, PCILIB_KMEM_SYNC_TODEVICE, bufnum);
  230. dma_nwl_return_buffer(ctx, ectx);
  231. res += bufsize;
  232. } while (ret);
  233. return 0;
  234. }
  235. int dma_nwl_wait_completion(nwl_dma_t * ctx, pcilib_dma_engine_t dma, pcilib_timeout_t timeout) {
  236. if (dma_nwl_get_next_buffer(ctx, ctx->engines + dma, PCILIB_NWL_DMA_PAGES - 1, PCILIB_DMA_TIMEOUT) == (PCILIB_NWL_DMA_PAGES - 1)) return 0;
  237. else return PCILIB_ERROR_TIMEOUT;
  238. }