ipe.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542
  1. #define _PCILIB_DMA_IPE_C
  2. #define _BSD_SOURCE
  3. #include <stdio.h>
  4. #include <stdlib.h>
  5. #include <string.h>
  6. #include <unistd.h>
  7. #include <sys/time.h>
  8. #include <arpa/inet.h>
  9. #include "pci.h"
  10. #include "pcilib.h"
  11. #include "error.h"
  12. #include "tools.h"
  13. #include "ipe.h"
  14. #include "ipe_private.h"
  15. #define WR(addr, value) { *(uint32_t*)(ctx->base_addr + addr) = value; }
  16. #define RD(addr, value) { value = *(uint32_t*)(ctx->base_addr + addr); }
  17. pcilib_dma_context_t *dma_ipe_init(pcilib_t *pcilib, const char *model, const void *arg) {
  18. // int err = 0;
  19. const pcilib_model_description_t *model_info = pcilib_get_model_description(pcilib);
  20. ipe_dma_t *ctx = malloc(sizeof(ipe_dma_t));
  21. if (ctx) {
  22. memset(ctx, 0, sizeof(ipe_dma_t));
  23. ctx->dmactx.pcilib = pcilib;
  24. // ctx->mode64 = 1;
  25. /*
  26. memset(ctx->engine, 0, 2 * sizeof(pcilib_dma_engine_description_t));
  27. ctx->engine[0].addr = 0;
  28. ctx->engine[0].type = PCILIB_DMA_TYPE_PACKET;
  29. ctx->engine[0].direction = PCILIB_DMA_FROM_DEVICE;
  30. ctx->engine[0].addr_bits = 32;
  31. pcilib_set_dma_engine_description(pcilib, 0, &ctx->engine[0]);
  32. pcilib_set_dma_engine_description(pcilib, 1, NULL);
  33. */
  34. pcilib_register_bank_t dma_bank = pcilib_find_register_bank_by_addr(pcilib, PCILIB_REGISTER_BANK_DMA);
  35. /*
  36. if (dma_bank == PCILIB_REGISTER_BANK_INVALID) {
  37. err = pcilib_add_register_banks(ctx->pcilib, 0, ipe_dma_register_banks);
  38. if (err) {
  39. free(ctx);
  40. pcilib_error("Error (%i) adding DMA register bank");
  41. return NULL;
  42. }
  43. }
  44. dma_bank = pcilib_find_bank_by_addr(pcilib, PCILIB_REGISTER_BANK_DMA);
  45. */
  46. if (dma_bank == PCILIB_REGISTER_BANK_INVALID) {
  47. free(ctx);
  48. pcilib_error("DMA Register Bank could not be found");
  49. return NULL;
  50. }
  51. ctx->dma_bank = model_info->banks + dma_bank;
  52. ctx->base_addr = pcilib_resolve_register_address(pcilib, ctx->dma_bank->bar, ctx->dma_bank->read_addr);
  53. /*
  54. err = pcilib_add_registers(ctx->pcilib, 0, ipe_dma_registers);
  55. if (err) {
  56. free(ctx);
  57. pcilib_error("Error adding DMA registers");
  58. return NULL;
  59. }
  60. */
  61. }
  62. return (pcilib_dma_context_t*)ctx;
  63. }
  64. void dma_ipe_free(pcilib_dma_context_t *vctx) {
  65. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  66. if (ctx) {
  67. dma_ipe_stop(vctx, PCILIB_DMA_ENGINE_ALL, PCILIB_DMA_FLAGS_DEFAULT);
  68. free(ctx);
  69. }
  70. }
  71. int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
  72. size_t i;
  73. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  74. int preserve = 0;
  75. pcilib_kmem_flags_t kflags;
  76. pcilib_kmem_reuse_state_t reuse_desc, reuse_pages;
  77. volatile void *desc_va;
  78. volatile uint32_t *last_written_addr_ptr;
  79. pcilib_register_value_t value;
  80. uint32_t address64;
  81. if (dma == PCILIB_DMA_ENGINE_INVALID) return 0;
  82. else if (dma > 1) return PCILIB_ERROR_INVALID_BANK;
  83. if (!ctx->started) ctx->started = 1;
  84. if (flags&PCILIB_DMA_FLAG_PERSISTENT) ctx->preserve = 1;
  85. if (ctx->pages) return 0;
  86. kflags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(ctx->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
  87. pcilib_kmem_handle_t *desc = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, IPEDMA_DESCRIPTOR_SIZE, IPEDMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags);
  88. pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->dmactx.pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, IPEDMA_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);
  89. if (!desc||!pages) {
  90. if (pages) pcilib_free_kernel_memory(ctx->dmactx.pcilib, pages, 0);
  91. if (desc) pcilib_free_kernel_memory(ctx->dmactx.pcilib, desc, 0);
  92. return PCILIB_ERROR_MEMORY;
  93. }
  94. reuse_desc = pcilib_kmem_is_reused(ctx->dmactx.pcilib, desc);
  95. reuse_pages = pcilib_kmem_is_reused(ctx->dmactx.pcilib, pages);
  96. if (reuse_desc == reuse_pages) {
  97. if (reuse_desc & PCILIB_KMEM_REUSE_PARTIAL) pcilib_warning("Inconsistent DMA buffers are found (only part of required buffers is available), reinitializing...");
  98. else if (reuse_desc & PCILIB_KMEM_REUSE_REUSED) {
  99. if ((reuse_desc & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
  100. else if ((reuse_desc & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
  101. else {
  102. #ifndef IPEDMA_BUG_DMARD
  103. RD(IPEDMA_REG_PAGE_COUNT, value);
  104. if (value != IPEDMA_DMA_PAGES) pcilib_warning("Inconsistent DMA buffers are found (Number of allocated buffers (%lu) does not match current request (%lu)), reinitializing...", value + 1, IPEDMA_DMA_PAGES);
  105. else
  106. #endif /* IPEDMA_BUG_DMARD */
  107. preserve = 1;
  108. }
  109. }
  110. } else pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
  111. desc_va = pcilib_kmem_get_ua(ctx->dmactx.pcilib, desc);
  112. if (ctx->mode64) last_written_addr_ptr = desc_va + 3 * sizeof(uint32_t);
  113. else last_written_addr_ptr = desc_va + 4 * sizeof(uint32_t);
  114. if (preserve) {
  115. ctx->reused = 1;
  116. ctx->preserve = 1;
  117. // usleep(100000);
  118. // Detect the current state of DMA engine
  119. #ifdef IPEDMA_BUG_DMARD
  120. FILE *f = fopen("/tmp/pcitool_lastread", "r");
  121. if (!f) pcilib_error("Can't read current status");
  122. fread(&value, 1, sizeof(pcilib_register_value_t), f);
  123. fclose(f);
  124. #else /* IPEDMA_BUG_DMARD */
  125. RD(IPEDMA_REG_LAST_READ, value);
  126. // Numbered from 1 in FPGA
  127. value--;
  128. #endif /* IPEDMA_BUG_DMARD */
  129. ctx->last_read = value;
  130. } else {
  131. ctx->reused = 0;
  132. // Disable DMA
  133. WR(IPEDMA_REG_CONTROL, 0x0);
  134. usleep(100000);
  135. // Reset DMA engine
  136. WR(IPEDMA_REG_RESET, 0x1);
  137. usleep(100000);
  138. WR(IPEDMA_REG_RESET, 0x0);
  139. usleep(100000);
  140. #ifndef IPEDMA_BUG_DMARD
  141. // Verify PCIe link status
  142. RD(IPEDMA_REG_RESET, value);
  143. if (value != 0x14031700) pcilib_warning("PCIe is not ready, code is %lx", value);
  144. #endif /* IPEDMA_BUG_DMARD */
  145. // Enable 64 bit addressing and configure TLP and PACKET sizes (40 bit mode can be used with big pre-allocated buffers later)
  146. if (ctx->mode64) address64 = 0x8000 | (0<<24);
  147. else address64 = 0;
  148. WR(IPEDMA_REG_TLP_SIZE, address64 | IPEDMA_TLP_SIZE);
  149. WR(IPEDMA_REG_TLP_COUNT, IPEDMA_PAGE_SIZE / (4 * IPEDMA_TLP_SIZE * IPEDMA_CORES));
  150. // Setting progress register threshold
  151. WR(IPEDMA_REG_UPDATE_THRESHOLD, IPEDMA_DMA_PROGRESS_THRESHOLD);
  152. // Reseting configured DMA pages
  153. WR(IPEDMA_REG_PAGE_COUNT, 0);
  154. // Setting current read position and configuring progress register
  155. WR(IPEDMA_REG_LAST_READ, IPEDMA_DMA_PAGES);
  156. WR(IPEDMA_REG_UPDATE_ADDR, pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, desc, 0));
  157. // Instructing DMA engine that writting should start from the first DMA page
  158. *last_written_addr_ptr = 0;//htonl(pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, pages, IPEDMA_DMA_PAGES - 1));
  159. for (i = 0; i < IPEDMA_DMA_PAGES; i++) {
  160. uintptr_t bus_addr_check, bus_addr = pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, pages, i);
  161. WR(IPEDMA_REG_PAGE_ADDR, bus_addr);
  162. if (bus_addr%4096) printf("Bad address %lu: %lx\n", i, bus_addr);
  163. RD(IPEDMA_REG_PAGE_ADDR, bus_addr_check);
  164. if (bus_addr_check != bus_addr) {
  165. pcilib_error("Written (%x) and read (%x) bus addresses does not match\n", bus_addr, bus_addr_check);
  166. }
  167. usleep(1000);
  168. }
  169. // Enable DMA
  170. // WR(IPEDMA_REG_CONTROL, 0x1);
  171. ctx->last_read = IPEDMA_DMA_PAGES - 1;
  172. #ifdef IPEDMA_BUG_DMARD
  173. FILE *f = fopen("/tmp/pcitool_lastread", "w");
  174. if (!f) pcilib_error("Can't write current status");
  175. value = ctx->last_read;
  176. fwrite(&value, 1, sizeof(pcilib_register_value_t), f);
  177. fclose(f);
  178. #endif /* IPEDMA_BUG_DMARD */
  179. }
  180. // ctx->last_read_addr = htonl(pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, pages, ctx->last_read));
  181. ctx->last_read_addr = pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, pages, ctx->last_read);
  182. ctx->desc = desc;
  183. ctx->pages = pages;
  184. ctx->page_size = pcilib_kmem_get_block_size(ctx->dmactx.pcilib, pages, 0);;
  185. ctx->ring_size = IPEDMA_DMA_PAGES;
  186. return 0;
  187. }
  188. int dma_ipe_stop(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
  189. pcilib_kmem_flags_t kflags;
  190. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  191. if (!ctx->started) return 0;
  192. if ((dma != PCILIB_DMA_ENGINE_INVALID)&&(dma > 1)) return PCILIB_ERROR_INVALID_BANK;
  193. // ignoring previous setting if flag specified
  194. if (flags&PCILIB_DMA_FLAG_PERSISTENT) {
  195. ctx->preserve = 0;
  196. }
  197. if (ctx->preserve) {
  198. kflags = PCILIB_KMEM_FLAG_REUSE;
  199. } else {
  200. kflags = PCILIB_KMEM_FLAG_HARDWARE|PCILIB_KMEM_FLAG_PERSISTENT;
  201. ctx->started = 0;
  202. // Disable DMA
  203. WR(IPEDMA_REG_CONTROL, 0);
  204. usleep(100000);
  205. // Reset DMA engine
  206. WR(IPEDMA_REG_RESET, 0x1);
  207. usleep(100000);
  208. WR(IPEDMA_REG_RESET, 0x0);
  209. usleep(100000);
  210. // Reseting configured DMA pages
  211. WR(IPEDMA_REG_PAGE_COUNT, 0);
  212. usleep(100000);
  213. }
  214. // Clean buffers
  215. if (ctx->desc) {
  216. pcilib_free_kernel_memory(ctx->dmactx.pcilib, ctx->desc, kflags);
  217. ctx->desc = NULL;
  218. }
  219. if (ctx->pages) {
  220. pcilib_free_kernel_memory(ctx->dmactx.pcilib, ctx->pages, kflags);
  221. ctx->pages = NULL;
  222. }
  223. return 0;
  224. }
  225. int dma_ipe_get_status(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_engine_status_t *status, size_t n_buffers, pcilib_dma_buffer_status_t *buffers) {
  226. size_t i;
  227. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  228. void *desc_va = (void*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, ctx->desc);
  229. uint32_t *last_written_addr_ptr;
  230. uint32_t last_written_addr;
  231. if (!status) return -1;
  232. if (ctx->mode64) last_written_addr_ptr = desc_va + 3 * sizeof(uint32_t);
  233. else last_written_addr_ptr = desc_va + 4 * sizeof(uint32_t);
  234. last_written_addr = ntohl(*last_written_addr_ptr);
  235. status->started = ctx->started;
  236. status->ring_size = ctx->ring_size;
  237. status->buffer_size = ctx->page_size;
  238. status->ring_tail = ctx->last_read + 1;
  239. if (status->ring_tail == status->ring_size) status->ring_tail = 0;
  240. // Find where the ring head is actually are
  241. for (i = 0; i < ctx->ring_size; i++) {
  242. uintptr_t bus_addr = pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, ctx->pages, i);
  243. if (bus_addr == last_written_addr) {
  244. status->ring_head = bus_addr;
  245. break;
  246. }
  247. }
  248. if (i == ctx->ring_size) {
  249. // ERROR
  250. }
  251. if (n_buffers > ctx->ring_size) n_buffers = ctx->ring_size;
  252. memset(buffers, 0, n_buffers * sizeof(pcilib_dma_engine_status_t));
  253. if (status->ring_head > status->ring_tail) {
  254. for (i = status->ring_tail; i <= status->ring_head; i++) {
  255. buffers[i].used = 1;
  256. buffers[i].size = ctx->page_size;
  257. buffers[i].first = 1;
  258. buffers[i].last = 1;
  259. }
  260. } else {
  261. for (i = 0; i <= status->ring_tail; i++) {
  262. buffers[i].used = 1;
  263. buffers[i].size = ctx->page_size;
  264. buffers[i].first = 1;
  265. buffers[i].last = 1;
  266. }
  267. for (i = status->ring_head; i < status->ring_size; i++) {
  268. buffers[i].used = 1;
  269. buffers[i].size = ctx->page_size;
  270. buffers[i].first = 1;
  271. buffers[i].last = 1;
  272. }
  273. }
  274. return 0;
  275. }
  276. int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr) {
  277. int err, ret = PCILIB_STREAMING_REQ_PACKET;
  278. pcilib_timeout_t wait = 0;
  279. struct timeval start, cur;
  280. volatile void *desc_va;
  281. volatile uint32_t *last_written_addr_ptr;
  282. volatile uint32_t *empty_detected_ptr;
  283. pcilib_dma_flags_t packet_flags = PCILIB_DMA_FLAG_EOP;
  284. #ifdef IPEDMA_BUG_DMARD
  285. pcilib_register_value_t value;
  286. #endif /* IPEDMA_BUG_DMARD */
  287. size_t cur_read;
  288. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  289. err = dma_ipe_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
  290. if (err) return err;
  291. desc_va = (void*)pcilib_kmem_get_ua(ctx->dmactx.pcilib, ctx->desc);
  292. if (ctx->mode64) last_written_addr_ptr = desc_va + 3 * sizeof(uint32_t);
  293. else last_written_addr_ptr = desc_va + 4 * sizeof(uint32_t);
  294. empty_detected_ptr = last_written_addr_ptr - 2;
  295. do {
  296. switch (ret&PCILIB_STREAMING_TIMEOUT_MASK) {
  297. case PCILIB_STREAMING_CONTINUE:
  298. // Hardware indicates that there is no more data pending and we can safely stop if there is no data in the kernel buffers already
  299. #ifdef IPEDMA_SUPPORT_EMPTY_DETECTED
  300. if (*empty_detected_ptr)
  301. wait = 0;
  302. else
  303. #endif /* IPEDMA_SUPPORT_EMPTY_DETECTED */
  304. wait = IPEDMA_DMA_TIMEOUT;
  305. break;
  306. case PCILIB_STREAMING_WAIT:
  307. wait = (timeout > IPEDMA_DMA_TIMEOUT)?timeout:IPEDMA_DMA_TIMEOUT;
  308. break;
  309. // case PCILIB_STREAMING_CHECK: wait = 0; break;
  310. }
  311. #ifdef IPEDMA_DEBUG
  312. printf("Waiting for data: %u (last read) 0x%x (last read addr) 0x%x (last_written)\n", ctx->last_read, ctx->last_read_addr, *last_written_addr_ptr);
  313. #endif /* IPEDMA_DEBUG */
  314. gettimeofday(&start, NULL);
  315. memcpy(&cur, &start, sizeof(struct timeval));
  316. while (((*last_written_addr_ptr == 0)||(ctx->last_read_addr == (*last_written_addr_ptr)))&&((wait == PCILIB_TIMEOUT_INFINITE)||(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < wait))) {
  317. usleep(10);
  318. gettimeofday(&cur, NULL);
  319. }
  320. // Failing out if we exited on timeout
  321. if ((ctx->last_read_addr == (*last_written_addr_ptr))||(*last_written_addr_ptr == 0)) {
  322. #ifdef IPEDMA_SUPPORT_EMPTY_DETECTED
  323. //# ifdef IPEDMA_DEBUG
  324. if ((wait)&&(*last_written_addr_ptr))
  325. pcilib_warning("The empty_detected flag is not set, but no data arrived within %lu us\n", wait);
  326. //# endif /* IPEDMA_DEBUG */
  327. #endif /* IPEDMA_SUPPORT_EMPTY_DETECTED */
  328. return (ret&PCILIB_STREAMING_FAIL)?PCILIB_ERROR_TIMEOUT:0;
  329. }
  330. // Getting next page to read
  331. cur_read = ctx->last_read + 1;
  332. if (cur_read == ctx->ring_size) cur_read = 0;
  333. #ifdef IPEDMA_DEBUG
  334. printf("Reading: %u (last read) 0x%x (last read addr) 0x%x (last_written)\n", cur_read, ctx->last_read_addr, *last_written_addr_ptr);
  335. #endif /* IPEDMA_DEBUG */
  336. #ifdef IPEDMA_DETECT_PACKETS
  337. if ((*empty_detected_ptr)&&(pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, ctx->pages, cur_read) == (*last_written_addr_ptr))) packet_flags = PCILIB_DMA_FLAG_EOP;
  338. else packet_flags = 0;
  339. #endif /* IPEDMA_DETECT_PACKETS */
  340. pcilib_kmem_sync_block(ctx->dmactx.pcilib, ctx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, cur_read);
  341. void *buf = pcilib_kmem_get_block_ua(ctx->dmactx.pcilib, ctx->pages, cur_read);
  342. ret = cb(cbattr, packet_flags, ctx->page_size, buf);
  343. if (ret < 0) return -ret;
  344. // DS: Fixme, it looks like we can avoid calling this for the sake of performance
  345. // pcilib_kmem_sync_block(ctx->dmactx.pcilib, ctx->pages, PCILIB_KMEM_SYNC_TODEVICE, cur_read);
  346. // Numbered from 1
  347. WR(IPEDMA_REG_LAST_READ, cur_read + 1);
  348. ctx->last_read = cur_read;
  349. // ctx->last_read_addr = htonl(pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, ctx->pages, cur_read));
  350. ctx->last_read_addr = pcilib_kmem_get_block_ba(ctx->dmactx.pcilib, ctx->pages, cur_read);
  351. #ifdef IPEDMA_BUG_DMARD
  352. FILE *f = fopen("/tmp/pcitool_lastread", "w");
  353. if (!f) pcilib_error("Can't write current status");
  354. value = cur_read;
  355. fwrite(&value, 1, sizeof(pcilib_register_value_t), f);
  356. fclose(f);
  357. #endif /* IPEDMA_BUG_DMARD */
  358. } while (ret);
  359. return 0;
  360. }
  361. double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dma, uintptr_t addr, size_t size, size_t iterations, pcilib_dma_direction_t direction) {
  362. int err = 0;
  363. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  364. int iter;
  365. size_t us = 0;
  366. struct timeval start, cur;
  367. void *buf;
  368. size_t bytes, rbytes;
  369. if ((direction == PCILIB_DMA_TO_DEVICE)||(direction == PCILIB_DMA_BIDIRECTIONAL)) return -1.;
  370. if ((dma != PCILIB_DMA_ENGINE_INVALID)&&(dma > 1)) return -1.;
  371. err = dma_ipe_start(vctx, 0, PCILIB_DMA_FLAGS_DEFAULT);
  372. if (err) return err;
  373. WR(IPEDMA_REG_CONTROL, 0x0);
  374. err = pcilib_skip_dma(ctx->dmactx.pcilib, 0);
  375. if (err) {
  376. pcilib_error("Can't start benchmark, devices continuously writes unexpected data using DMA engine");
  377. return -1;
  378. }
  379. if (size%IPEDMA_PAGE_SIZE) size = (1 + size / IPEDMA_PAGE_SIZE) * IPEDMA_PAGE_SIZE;
  380. // Allocate memory and prepare data
  381. buf = malloc(size);
  382. if (!buf) return -1;
  383. for (iter = 0; iter < iterations; iter++) {
  384. gettimeofday(&start, NULL);
  385. // Starting DMA
  386. WR(IPEDMA_REG_CONTROL, 0x1);
  387. for (bytes = 0; bytes < size; bytes += rbytes) {
  388. err = pcilib_read_dma(ctx->dmactx.pcilib, 0, addr, size - bytes, buf + bytes, &rbytes);
  389. if (err) {
  390. pcilib_error("Can't read data from DMA, error %i", err);
  391. return -1;
  392. }
  393. }
  394. // Stopping DMA
  395. WR(IPEDMA_REG_CONTROL, 0x0);
  396. if (err) break;
  397. gettimeofday(&cur, NULL);
  398. us += ((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec));
  399. err = pcilib_skip_dma(ctx->dmactx.pcilib, 0);
  400. if (err) {
  401. pcilib_error("Can't start iteration, devices continuously writes unexpected data using DMA engine");
  402. break;
  403. }
  404. }
  405. free(buf);
  406. return err?-1:((1. * size * iterations * 1000000) / (1024. * 1024. * us));
  407. }