nwl_engine_buffers.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. #define NWL_RING_GET(data, offset) *(uint32_t*)(((char*)(data)) + (offset))
  2. #define NWL_RING_SET(data, offset, val) *(uint32_t*)(((char*)(data)) + (offset)) = (val)
  3. #define NWL_RING_UPDATE(data, offset, mask, val) *(uint32_t*)(((char*)(data)) + (offset)) = ((*(uint32_t*)(((char*)(data)) + (offset)))&(mask))|(val)
  4. static int dma_nwl_compute_read_s2c_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
  5. uint32_t val;
  6. char *base = info->base_addr;
  7. nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
  8. if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
  9. if (val < ring_pa) pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_SW_NEXT_BD register value (%lx) is below start of ring [%lx,%lx])", val, ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  10. else pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu / %u) is fractal)", val - ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  11. return PCILIB_ERROR_INVALID_STATE;
  12. }
  13. info->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  14. if (info->head >= PCILIB_NWL_DMA_PAGES) {
  15. pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu) out of range)", info->head);
  16. return PCILIB_ERROR_INVALID_STATE;
  17. }
  18. nwl_read_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
  19. if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
  20. if (val < ring_pa) pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%lx) is below start of ring [%lx,%lx])", val, ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  21. else pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%zu / %u) is fractal)", val - ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  22. return PCILIB_ERROR_INVALID_STATE;
  23. }
  24. info->tail = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  25. if (info->tail >= PCILIB_NWL_DMA_PAGES) {
  26. pcilib_warning("Inconsistent S2C DMA Ring buffer is found (REG_DMA_ENG_NEXT_BD register value (%zu) out of range)", info->tail);
  27. return PCILIB_ERROR_INVALID_STATE;
  28. }
  29. #ifdef DEBUG_NWL
  30. printf("S2C: %lu %lu\n", info->tail, info->head);
  31. #endif /* DEBUG_NWL */
  32. return 0;
  33. }
  34. static int dma_nwl_compute_read_c2s_pointers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, unsigned char *ring, uint32_t ring_pa) {
  35. uint32_t val;
  36. char *base = info->base_addr;
  37. nwl_read_register(val, ctx, base, REG_SW_NEXT_BD);
  38. if ((val < ring_pa)||((val - ring_pa) % PCILIB_NWL_DMA_DESCRIPTOR_SIZE)) {
  39. if (val < ring_pa) pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%lx) is below start of the ring [%lx,%lx])", val, ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  40. else pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu / %u) is fractal)", val - ring_pa, PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  41. return PCILIB_ERROR_INVALID_STATE;
  42. }
  43. info->head = (val - ring_pa) / PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  44. if (info->head >= PCILIB_NWL_DMA_PAGES) {
  45. pcilib_warning("Inconsistent C2S DMA Ring buffer is found (REG_SW_NEXT_BD register value (%zu) out of range)", info->head);
  46. return PCILIB_ERROR_INVALID_STATE;
  47. }
  48. info->tail = info->head + 1;
  49. if (info->tail == PCILIB_NWL_DMA_PAGES) info->tail = 0;
  50. #ifdef DEBUG_NWL
  51. printf("C2S: %lu %lu\n", info->tail, info->head);
  52. #endif /* DEBUG_NWL */
  53. return 0;
  54. }
  55. static int dma_nwl_allocate_engine_buffers(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
  56. int err = 0;
  57. int i;
  58. int preserve = 0;
  59. uint16_t sub_use;
  60. uint32_t val;
  61. uint32_t buf_sz;
  62. uint64_t buf_pa;
  63. pcilib_kmem_reuse_state_t reuse_ring, reuse_pages;
  64. pcilib_kmem_flags_t flags;
  65. pcilib_kmem_type_t type;
  66. char *base = info->base_addr;
  67. if (info->pages) return 0;
  68. // Or bidirectional specified by 0x0|addr, or read 0x0|addr and write 0x80|addr
  69. type = (info->desc.direction == PCILIB_DMA_TO_DEVICE)?PCILIB_KMEM_TYPE_DMA_S2C_PAGE:PCILIB_KMEM_TYPE_DMA_C2S_PAGE;
  70. sub_use = info->desc.addr|((info->desc.direction == PCILIB_DMA_TO_DEVICE)?0x80:0x00);
  71. flags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(info->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
  72. pcilib_kmem_handle_t *ring = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, PCILIB_NWL_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, sub_use), flags);
  73. pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->pcilib, type, PCILIB_NWL_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, sub_use), flags);
  74. if (!ring||!pages) {
  75. if (pages) pcilib_free_kernel_memory(ctx->pcilib, pages, 0);
  76. if (ring) pcilib_free_kernel_memory(ctx->pcilib, ring, 0);
  77. return PCILIB_ERROR_MEMORY;
  78. }
  79. reuse_ring = pcilib_kmem_is_reused(ctx->pcilib, ring);
  80. reuse_pages = pcilib_kmem_is_reused(ctx->pcilib, pages);
  81. // I guess idea here was that we not need to check all that stuff during the second iteration
  82. // which is basicaly true (shall we expect any driver-triggered changes or parallel accesses?)
  83. // but still we need to set preserve flag (and that if we enforcing preservation --start-dma).
  84. // Probably having checks anyway is not harming...
  85. // if (!info->preserve) {
  86. if (reuse_ring == reuse_pages) {
  87. if (reuse_ring & PCILIB_KMEM_REUSE_PARTIAL) pcilib_warning("Inconsistent DMA buffers are found (only part of required buffers is available), reinitializing...");
  88. else if (reuse_ring & PCILIB_KMEM_REUSE_REUSED) {
  89. if ((reuse_ring & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
  90. else if ((reuse_ring & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
  91. else {
  92. nwl_read_register(val, ctx, info->base_addr, REG_DMA_ENG_CTRL_STATUS);
  93. if ((val&DMA_ENG_RUNNING) == 0) pcilib_warning("Lost DMA buffers are found (DMA engine is stopped), reinitializing...");
  94. else preserve = 1;
  95. }
  96. }
  97. } else pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
  98. // }
  99. unsigned char *data = (unsigned char*)pcilib_kmem_get_ua(ctx->pcilib, ring);
  100. uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, ring);
  101. if (preserve) {
  102. if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) err = dma_nwl_compute_read_c2s_pointers(ctx, info, data, ring_pa);
  103. else err = dma_nwl_compute_read_s2c_pointers(ctx, info, data, ring_pa);
  104. if (err) preserve = 0;
  105. }
  106. if (preserve) {
  107. info->reused = 1;
  108. buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, 0);
  109. } else {
  110. info->reused = 0;
  111. memset(data, 0, PCILIB_NWL_DMA_PAGES * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  112. for (i = 0; i < PCILIB_NWL_DMA_PAGES; i++, data += PCILIB_NWL_DMA_DESCRIPTOR_SIZE) {
  113. buf_pa = pcilib_kmem_get_block_pa(ctx->pcilib, pages, i);
  114. buf_sz = pcilib_kmem_get_block_size(ctx->pcilib, pages, i);
  115. NWL_RING_SET(data, DMA_BD_NDESC_OFFSET, ring_pa + ((i + 1) % PCILIB_NWL_DMA_PAGES) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE);
  116. NWL_RING_SET(data, DMA_BD_BUFAL_OFFSET, buf_pa&0xFFFFFFFF);
  117. NWL_RING_SET(data, DMA_BD_BUFAH_OFFSET, buf_pa>>32);
  118. #ifdef NWL_GENERATE_DMA_IRQ
  119. NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz | DMA_BD_INT_ERROR_MASK | DMA_BD_INT_COMP_MASK);
  120. #else /* NWL_GENERATE_DMA_IRQ */
  121. NWL_RING_SET(data, DMA_BD_BUFL_CTRL_OFFSET, buf_sz);
  122. #endif /* NWL_GENERATE_DMA_IRQ */
  123. }
  124. val = ring_pa;
  125. nwl_write_register(val, ctx, base, REG_DMA_ENG_NEXT_BD);
  126. nwl_write_register(val, ctx, base, REG_SW_NEXT_BD);
  127. info->head = 0;
  128. info->tail = 0;
  129. }
  130. info->ring = ring;
  131. info->pages = pages;
  132. info->page_size = buf_sz;
  133. info->ring_size = PCILIB_NWL_DMA_PAGES;
  134. return 0;
  135. }
  136. static size_t dma_nwl_clean_buffers(nwl_dma_t * ctx, pcilib_nwl_engine_description_t *info) {
  137. size_t res = 0;
  138. uint32_t status;
  139. unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
  140. ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  141. next_buffer:
  142. status = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET)&DMA_BD_STATUS_MASK;
  143. // control = NWL_RING_GET(ring, DMA_BD_BUFL_CTRL_OFFSET)&DMA_BD_CTRL_MASK;
  144. if (status & DMA_BD_ERROR_MASK) {
  145. pcilib_error("NWL DMA Engine reported error in ring descriptor");
  146. return (size_t)-1;
  147. }
  148. if (status & DMA_BD_SHORT_MASK) {
  149. pcilib_error("NWL DMA Engine reported short error");
  150. return (size_t)-1;
  151. }
  152. if (status & DMA_BD_COMP_MASK) {
  153. info->tail++;
  154. if (info->tail == info->ring_size) {
  155. ring -= (info->tail - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  156. info->tail = 0;
  157. } else {
  158. ring += PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  159. }
  160. res++;
  161. if (info->tail != info->head) goto next_buffer;
  162. }
  163. // printf("====> Cleaned: %i\n", res);
  164. return res;
  165. }
  166. static size_t dma_nwl_get_next_buffer(nwl_dma_t * ctx, pcilib_nwl_engine_description_t *info, size_t n_buffers, pcilib_timeout_t timeout) {
  167. struct timeval start, cur;
  168. size_t res, n = 0;
  169. size_t head;
  170. for (head = info->head; (((head + 1)%info->ring_size) != info->tail)&&(n < n_buffers); head++, n++);
  171. if (n == n_buffers) return info->head;
  172. gettimeofday(&start, NULL);
  173. res = dma_nwl_clean_buffers(ctx, info);
  174. if (res == (size_t)-1) return PCILIB_DMA_BUFFER_INVALID;
  175. else n += res;
  176. while (n < n_buffers) {
  177. if (timeout != PCILIB_TIMEOUT_INFINITE) {
  178. gettimeofday(&cur, NULL);
  179. if (((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) > timeout) break;
  180. }
  181. usleep (10);
  182. res = dma_nwl_clean_buffers(ctx, info);
  183. if (res == (size_t)-1) return PCILIB_DMA_BUFFER_INVALID;
  184. else if (res > 0) {
  185. gettimeofday(&start, NULL);
  186. n += res;
  187. }
  188. }
  189. if (n < n_buffers) return PCILIB_DMA_BUFFER_INVALID;
  190. return info->head;
  191. }
  192. static int dma_nwl_push_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, size_t size, int eop, pcilib_timeout_t timeout) {
  193. int flags = 0;
  194. uint32_t val;
  195. unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
  196. uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
  197. ring += info->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  198. if (!info->writting) {
  199. flags |= DMA_BD_SOP_MASK;
  200. info->writting = 1;
  201. }
  202. if (eop) {
  203. flags |= DMA_BD_EOP_MASK;
  204. info->writting = 0;
  205. }
  206. NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, size|flags);
  207. NWL_RING_SET(ring, DMA_BD_BUFL_STATUS_OFFSET, size);
  208. info->head++;
  209. if (info->head == info->ring_size) info->head = 0;
  210. val = ring_pa + info->head * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  211. nwl_write_register(val, ctx, info->base_addr, REG_SW_NEXT_BD);
  212. return 0;
  213. }
  214. static size_t dma_nwl_wait_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info, size_t *size, int *eop, pcilib_timeout_t timeout) {
  215. struct timeval start, cur;
  216. uint32_t status_size, status;
  217. unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
  218. ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  219. gettimeofday(&start, NULL);
  220. do {
  221. status_size = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
  222. status = status_size & DMA_BD_STATUS_MASK;
  223. if (status & DMA_BD_ERROR_MASK) {
  224. pcilib_error("NWL DMA Engine reported error in ring descriptor");
  225. return (size_t)-1;
  226. }
  227. if (status & DMA_BD_COMP_MASK) {
  228. if (status & DMA_BD_EOP_MASK) *eop = 1;
  229. else *eop = 0;
  230. *size = status_size & DMA_BD_BUFL_MASK;
  231. /*
  232. if (mrd) {
  233. if ((info->tail + 1) == info->ring_size) ring -= info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  234. else ring += PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  235. *mrd = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET)&DMA_BD_COMP_MASK;
  236. }
  237. */
  238. return info->tail;
  239. }
  240. usleep(10);
  241. gettimeofday(&cur, NULL);
  242. } while ((timeout == PCILIB_TIMEOUT_INFINITE)||(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < timeout));
  243. return (size_t)-1;
  244. }
  245. /*
  246. // This function is not used now, but we may need it in the future
  247. static int dma_nwl_is_overflown(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
  248. uint32_t status;
  249. unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
  250. if (info->tail > 0) ring += (info->tail - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  251. else ring += (info->ring_size - 1) * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  252. status = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
  253. return status&DMA_BD_COMP_MASK?1:0;
  254. }
  255. */
  256. static int dma_nwl_return_buffer(nwl_dma_t *ctx, pcilib_nwl_engine_description_t *info) {
  257. uint32_t val;
  258. unsigned char *ring = pcilib_kmem_get_ua(ctx->pcilib, info->ring);
  259. uint32_t ring_pa = pcilib_kmem_get_pa(ctx->pcilib, info->ring);
  260. size_t bufsz = pcilib_kmem_get_block_size(ctx->pcilib, info->pages, info->tail);
  261. ring += info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  262. #ifdef NWL_GENERATE_DMA_IRQ
  263. NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, bufsz | DMA_BD_INT_ERROR_MASK | DMA_BD_INT_COMP_MASK);
  264. #else /* NWL_GENERATE_DMA_IRQ */
  265. NWL_RING_SET(ring, DMA_BD_BUFL_CTRL_OFFSET, bufsz);
  266. #endif /* NWL_GENERATE_DMA_IRQ */
  267. NWL_RING_SET(ring, DMA_BD_BUFL_STATUS_OFFSET, 0);
  268. val = ring_pa + info->tail * PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  269. nwl_write_register(val, ctx, info->base_addr, REG_SW_NEXT_BD);
  270. info->tail++;
  271. if (info->tail == info->ring_size) info->tail = 0;
  272. return 0;
  273. }
  274. int dma_nwl_get_status(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_engine_status_t *status, size_t n_buffers, pcilib_dma_buffer_status_t *buffers) {
  275. size_t i;
  276. uint32_t bstatus;
  277. nwl_dma_t *ctx = (nwl_dma_t*)vctx;
  278. pcilib_nwl_engine_description_t *info = ctx->engines + dma;
  279. unsigned char *ring = (unsigned char*)pcilib_kmem_get_ua(ctx->pcilib, info->ring);
  280. if (!status) return -1;
  281. status->started = info->started;
  282. status->ring_size = info->ring_size;
  283. status->buffer_size = info->page_size;
  284. status->ring_tail = info->tail;
  285. if (info->desc.direction == PCILIB_DMA_FROM_DEVICE) {
  286. size_t pos = 0;
  287. for (i = 0; i < info->ring_size; i++) {
  288. pos = status->ring_tail + i;
  289. if (pos >= info->ring_size) pos -= info->ring_size;
  290. bstatus = NWL_RING_GET(ring + pos * PCILIB_NWL_DMA_DESCRIPTOR_SIZE, DMA_BD_BUFL_STATUS_OFFSET);
  291. if ((bstatus&(DMA_BD_ERROR_MASK|DMA_BD_COMP_MASK)) == 0) break;
  292. }
  293. status->ring_head = pos;
  294. } else {
  295. status->ring_head = info->head;
  296. }
  297. if (buffers) {
  298. for (i = 0; (i < info->ring_size)&&(i < n_buffers); i++) {
  299. bstatus = NWL_RING_GET(ring, DMA_BD_BUFL_STATUS_OFFSET);
  300. buffers[i].error = bstatus & (DMA_BD_ERROR_MASK/*|DMA_BD_SHORT_MASK*/);
  301. buffers[i].used = bstatus & DMA_BD_COMP_MASK;
  302. buffers[i].size = bstatus & DMA_BD_BUFL_MASK;
  303. buffers[i].first = bstatus & DMA_BD_SOP_MASK;
  304. buffers[i].last = bstatus & DMA_BD_EOP_MASK;
  305. ring += PCILIB_NWL_DMA_DESCRIPTOR_SIZE;
  306. }
  307. }
  308. return 0;
  309. }