ipe.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541
  1. #define _PCILIB_DMA_IPE_C
  2. #define _BSD_SOURCE
  3. #include <stdio.h>
  4. #include <stdlib.h>
  5. #include <string.h>
  6. #include <unistd.h>
  7. #include <sys/time.h>
  8. #include <arpa/inet.h>
  9. #include "pci.h"
  10. #include "pcilib.h"
  11. #include "error.h"
  12. #include "tools.h"
  13. #include "ipe.h"
  14. #include "ipe_private.h"
  15. #include "ipe_registers.h"
  16. #define WR(addr, value) { *(uint32_t*)(ctx->base_addr + addr) = value; }
  17. #define RD(addr, value) { value = *(uint32_t*)(ctx->base_addr + addr); }
  18. pcilib_dma_context_t *dma_ipe_init(pcilib_t *pcilib, pcilib_dma_modification_t type, void *arg) {
  19. int err = 0;
  20. pcilib_model_description_t *model_info = pcilib_get_model_description(pcilib);
  21. ipe_dma_t *ctx = malloc(sizeof(ipe_dma_t));
  22. if (ctx) {
  23. memset(ctx, 0, sizeof(ipe_dma_t));
  24. ctx->pcilib = pcilib;
  25. // ctx->mode64 = 1;
  26. memset(ctx->engine, 0, 2 * sizeof(pcilib_dma_engine_description_t));
  27. ctx->engine[0].addr = 0;
  28. ctx->engine[0].type = PCILIB_DMA_TYPE_PACKET;
  29. ctx->engine[0].direction = PCILIB_DMA_FROM_DEVICE;
  30. ctx->engine[0].addr_bits = 32;
  31. pcilib_set_dma_engine_description(pcilib, 0, &ctx->engine[0]);
  32. pcilib_set_dma_engine_description(pcilib, 1, NULL);
  33. pcilib_register_bank_t dma_bank = pcilib_find_bank_by_addr(pcilib, PCILIB_REGISTER_BANK_DMA);
  34. if (dma_bank == PCILIB_REGISTER_BANK_INVALID) {
  35. free(ctx);
  36. pcilib_error("DMA Register Bank could not be found");
  37. return NULL;
  38. }
  39. ctx->dma_bank = model_info->banks + dma_bank;
  40. ctx->base_addr = pcilib_resolve_register_address(pcilib, ctx->dma_bank->bar, ctx->dma_bank->read_addr);
  41. err = pcilib_add_registers(ctx->pcilib, 0, ipe_dma_registers);
  42. if (err) {
  43. free(ctx);
  44. pcilib_error("Error adding DMA registers");
  45. return NULL;
  46. }
  47. }
  48. return (pcilib_dma_context_t*)ctx;
  49. }
  50. void dma_ipe_free(pcilib_dma_context_t *vctx) {
  51. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  52. if (ctx) {
  53. dma_ipe_stop(vctx, PCILIB_DMA_ENGINE_ALL, PCILIB_DMA_FLAGS_DEFAULT);
  54. free(ctx);
  55. }
  56. }
  57. int dma_ipe_start(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
  58. size_t i;
  59. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  60. int preserve = 0;
  61. pcilib_kmem_flags_t kflags;
  62. pcilib_kmem_reuse_state_t reuse_desc, reuse_pages;
  63. volatile void *desc_va;
  64. volatile uint32_t *last_written_addr_ptr;
  65. pcilib_register_value_t value;
  66. uint32_t address64;
  67. if (dma == PCILIB_DMA_ENGINE_INVALID) return 0;
  68. else if (dma > 1) return PCILIB_ERROR_INVALID_BANK;
  69. if (!ctx->started) ctx->started = 1;
  70. if (flags&PCILIB_DMA_FLAG_PERSISTENT) ctx->preserve = 1;
  71. if (ctx->pages) return 0;
  72. kflags = PCILIB_KMEM_FLAG_REUSE|PCILIB_KMEM_FLAG_EXCLUSIVE|PCILIB_KMEM_FLAG_HARDWARE|(ctx->preserve?PCILIB_KMEM_FLAG_PERSISTENT:0);
  73. pcilib_kmem_handle_t *desc = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_CONSISTENT, 1, IPEDMA_DESCRIPTOR_SIZE, IPEDMA_DESCRIPTOR_ALIGNMENT, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_RING, 0x00), kflags);
  74. pcilib_kmem_handle_t *pages = pcilib_alloc_kernel_memory(ctx->pcilib, PCILIB_KMEM_TYPE_DMA_C2S_PAGE, IPEDMA_DMA_PAGES, 0, 0, PCILIB_KMEM_USE(PCILIB_KMEM_USE_DMA_PAGES, 0x00), kflags);
  75. if (!desc||!pages) {
  76. if (pages) pcilib_free_kernel_memory(ctx->pcilib, pages, 0);
  77. if (desc) pcilib_free_kernel_memory(ctx->pcilib, desc, 0);
  78. return PCILIB_ERROR_MEMORY;
  79. }
  80. reuse_desc = pcilib_kmem_is_reused(ctx->pcilib, desc);
  81. reuse_pages = pcilib_kmem_is_reused(ctx->pcilib, pages);
  82. if (reuse_desc == reuse_pages) {
  83. if (reuse_desc & PCILIB_KMEM_REUSE_PARTIAL) pcilib_warning("Inconsistent DMA buffers are found (only part of required buffers is available), reinitializing...");
  84. else if (reuse_desc & PCILIB_KMEM_REUSE_REUSED) {
  85. if ((reuse_desc & PCILIB_KMEM_REUSE_PERSISTENT) == 0) pcilib_warning("Lost DMA buffers are found (non-persistent mode), reinitializing...");
  86. else if ((reuse_desc & PCILIB_KMEM_REUSE_HARDWARE) == 0) pcilib_warning("Lost DMA buffers are found (missing HW reference), reinitializing...");
  87. else {
  88. #ifndef IPEDMA_BUG_DMARD
  89. RD(IPEDMA_REG_PAGE_COUNT, value);
  90. if (value != IPEDMA_DMA_PAGES) pcilib_warning("Inconsistent DMA buffers are found (Number of allocated buffers (%lu) does not match current request (%lu)), reinitializing...", value + 1, IPEDMA_DMA_PAGES);
  91. else
  92. #endif /* IPEDMA_BUG_DMARD */
  93. preserve = 1;
  94. }
  95. }
  96. } else pcilib_warning("Inconsistent DMA buffers (modes of ring and page buffers does not match), reinitializing....");
  97. desc_va = pcilib_kmem_get_ua(ctx->pcilib, desc);
  98. if (ctx->mode64) last_written_addr_ptr = desc_va + 3 * sizeof(uint32_t);
  99. else last_written_addr_ptr = desc_va + 4 * sizeof(uint32_t);
  100. if (preserve) {
  101. ctx->reused = 1;
  102. ctx->preserve = 1;
  103. // usleep(100000);
  104. // Detect the current state of DMA engine
  105. #ifdef IPEDMA_BUG_DMARD
  106. FILE *f = fopen("/tmp/pcitool_lastread", "r");
  107. if (!f) pcilib_error("Can't read current status");
  108. fread(&value, 1, sizeof(pcilib_register_value_t), f);
  109. fclose(f);
  110. #else /* IPEDMA_BUG_DMARD */
  111. RD(IPEDMA_REG_LAST_READ, value);
  112. // Numbered from 1 in FPGA
  113. value--;
  114. #endif /* IPEDMA_BUG_DMARD */
  115. ctx->last_read = value;
  116. } else {
  117. ctx->reused = 0;
  118. // Disable DMA
  119. WR(IPEDMA_REG_CONTROL, 0x0);
  120. usleep(100000);
  121. // Reset DMA engine
  122. WR(IPEDMA_REG_RESET, 0x1);
  123. usleep(100000);
  124. WR(IPEDMA_REG_RESET, 0x0);
  125. usleep(100000);
  126. #ifndef IPEDMA_BUG_DMARD
  127. // Verify PCIe link status
  128. RD(IPEDMA_REG_RESET, value);
  129. if (value != 0x14031700) pcilib_warning("PCIe is not ready, code is %lx", value);
  130. #endif /* IPEDMA_BUG_DMARD */
  131. // Enable 64 bit addressing and configure TLP and PACKET sizes (40 bit mode can be used with big pre-allocated buffers later)
  132. if (ctx->mode64) address64 = 0x8000 | (0<<24);
  133. else address64 = 0;
  134. WR(IPEDMA_REG_TLP_SIZE, address64 | IPEDMA_TLP_SIZE);
  135. WR(IPEDMA_REG_TLP_COUNT, IPEDMA_PAGE_SIZE / (4 * IPEDMA_TLP_SIZE * IPEDMA_CORES));
  136. // Setting progress register threshold
  137. WR(IPEDMA_REG_UPDATE_THRESHOLD, IPEDMA_DMA_PROGRESS_THRESHOLD);
  138. // Reseting configured DMA pages
  139. WR(IPEDMA_REG_PAGE_COUNT, 0);
  140. // Setting current read position and configuring progress register
  141. WR(IPEDMA_REG_LAST_READ, IPEDMA_DMA_PAGES);
  142. WR(IPEDMA_REG_UPDATE_ADDR, pcilib_kmem_get_block_ba(ctx->pcilib, desc, 0));
  143. // Instructing DMA engine that writting should start from the first DMA page
  144. *last_written_addr_ptr = 0;//htonl(pcilib_kmem_get_block_ba(ctx->pcilib, pages, IPEDMA_DMA_PAGES - 1));
  145. for (i = 0; i < IPEDMA_DMA_PAGES; i++) {
  146. uintptr_t bus_addr_check, bus_addr = pcilib_kmem_get_block_ba(ctx->pcilib, pages, i);
  147. WR(IPEDMA_REG_PAGE_ADDR, bus_addr);
  148. if (bus_addr%4096) printf("Bad address %lu: %lx\n", i, bus_addr);
  149. RD(IPEDMA_REG_PAGE_ADDR, bus_addr_check);
  150. if (bus_addr_check != bus_addr) {
  151. pcilib_error("Written (%x) and read (%x) bus addresses does not match\n", bus_addr, bus_addr_check);
  152. }
  153. usleep(1000);
  154. }
  155. // Enable DMA
  156. WR(IPEDMA_REG_CONTROL, 0x1);
  157. ctx->last_read = IPEDMA_DMA_PAGES - 1;
  158. #ifdef IPEDMA_BUG_DMARD
  159. FILE *f = fopen("/tmp/pcitool_lastread", "w");
  160. if (!f) pcilib_error("Can't write current status");
  161. value = ctx->last_read;
  162. fwrite(&value, 1, sizeof(pcilib_register_value_t), f);
  163. fclose(f);
  164. #endif /* IPEDMA_BUG_DMARD */
  165. }
  166. // ctx->last_read_addr = htonl(pcilib_kmem_get_block_ba(ctx->pcilib, pages, ctx->last_read));
  167. ctx->last_read_addr = pcilib_kmem_get_block_ba(ctx->pcilib, pages, ctx->last_read);
  168. ctx->desc = desc;
  169. ctx->pages = pages;
  170. ctx->page_size = pcilib_kmem_get_block_size(ctx->pcilib, pages, 0);;
  171. ctx->ring_size = IPEDMA_DMA_PAGES;
  172. return 0;
  173. }
  174. int dma_ipe_stop(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
  175. pcilib_kmem_flags_t kflags;
  176. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  177. if (!ctx->started) return 0;
  178. if ((dma != PCILIB_DMA_ENGINE_INVALID)&&(dma > 1)) return PCILIB_ERROR_INVALID_BANK;
  179. // ignoring previous setting if flag specified
  180. if (flags&PCILIB_DMA_FLAG_PERSISTENT) {
  181. ctx->preserve = 0;
  182. }
  183. if (ctx->preserve) {
  184. kflags = PCILIB_KMEM_FLAG_REUSE;
  185. } else {
  186. kflags = PCILIB_KMEM_FLAG_HARDWARE|PCILIB_KMEM_FLAG_PERSISTENT;
  187. ctx->started = 0;
  188. // Disable DMA
  189. WR(IPEDMA_REG_CONTROL, 0);
  190. usleep(100000);
  191. // Reset DMA engine
  192. WR(IPEDMA_REG_RESET, 0x1);
  193. usleep(100000);
  194. WR(IPEDMA_REG_RESET, 0x0);
  195. usleep(100000);
  196. // Reseting configured DMA pages
  197. WR(IPEDMA_REG_PAGE_COUNT, 0);
  198. usleep(100000);
  199. }
  200. // Clean buffers
  201. if (ctx->desc) {
  202. pcilib_free_kernel_memory(ctx->pcilib, ctx->desc, kflags);
  203. ctx->desc = NULL;
  204. }
  205. if (ctx->pages) {
  206. pcilib_free_kernel_memory(ctx->pcilib, ctx->pages, kflags);
  207. ctx->pages = NULL;
  208. }
  209. return 0;
  210. }
  211. int dma_ipe_get_status(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, pcilib_dma_engine_status_t *status, size_t n_buffers, pcilib_dma_buffer_status_t *buffers) {
  212. size_t i;
  213. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  214. void *desc_va = (void*)pcilib_kmem_get_ua(ctx->pcilib, ctx->desc);
  215. uint32_t *last_written_addr_ptr;
  216. uint32_t last_written_addr;
  217. if (!status) return -1;
  218. if (ctx->mode64) last_written_addr_ptr = desc_va + 3 * sizeof(uint32_t);
  219. else last_written_addr_ptr = desc_va + 4 * sizeof(uint32_t);
  220. last_written_addr = *last_written_addr_ptr;
  221. status->started = ctx->started;
  222. status->ring_size = ctx->ring_size;
  223. status->buffer_size = ctx->page_size;
  224. // For simplicity, we keep last_read here, and fix in the end
  225. status->ring_tail = ctx->last_read;
  226. // Find where the ring head is actually are
  227. for (i = 0; i < ctx->ring_size; i++) {
  228. uintptr_t bus_addr = pcilib_kmem_get_block_ba(ctx->pcilib, ctx->pages, i);
  229. if (bus_addr == last_written_addr) {
  230. status->ring_head = i;
  231. break;
  232. }
  233. }
  234. if (i == ctx->ring_size) {
  235. if (last_written_addr) {
  236. pcilib_warning("DMA is in unknown state, last_written_addr does not correspond any of available buffers");
  237. return -1;
  238. }
  239. status->ring_head = 0;
  240. status->ring_tail = 0;
  241. }
  242. if (n_buffers > ctx->ring_size) n_buffers = ctx->ring_size;
  243. if (buffers) {
  244. memset(buffers, 0, n_buffers * sizeof(pcilib_dma_buffer_status_t));
  245. if (status->ring_head >= status->ring_tail) {
  246. for (i = status->ring_tail + 1; (i <= status->ring_head)&&(i < n_buffers); i++) {
  247. buffers[i].used = 1;
  248. buffers[i].size = ctx->page_size;
  249. buffers[i].first = 1;
  250. buffers[i].last = 1;
  251. }
  252. } else {
  253. for (i = 0; (i <= status->ring_head)&&(i < n_buffers); i++) {
  254. buffers[i].used = 1;
  255. buffers[i].size = ctx->page_size;
  256. buffers[i].first = 1;
  257. buffers[i].last = 1;
  258. }
  259. for (i = status->ring_tail + 1; (i < status->ring_size)&&(i < n_buffers); i++) {
  260. buffers[i].used = 1;
  261. buffers[i].size = ctx->page_size;
  262. buffers[i].first = 1;
  263. buffers[i].last = 1;
  264. }
  265. }
  266. }
  267. // We actually keep last_read in the ring_tail, so need to increase
  268. if (status->ring_tail != status->ring_head) {
  269. status->ring_tail++;
  270. if (status->ring_tail == status->ring_size) status->ring_tail = 0;
  271. }
  272. return 0;
  273. }
  274. int dma_ipe_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr) {
  275. int err, ret = PCILIB_STREAMING_REQ_PACKET;
  276. pcilib_timeout_t wait = 0;
  277. struct timeval start, cur;
  278. volatile void *desc_va;
  279. volatile uint32_t *last_written_addr_ptr;
  280. volatile uint32_t *empty_detected_ptr;
  281. pcilib_dma_flags_t packet_flags = PCILIB_DMA_FLAG_EOP;
  282. #ifdef IPEDMA_BUG_DMARD
  283. pcilib_register_value_t value;
  284. #endif /* IPEDMA_BUG_DMARD */
  285. size_t cur_read;
  286. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  287. err = dma_ipe_start(vctx, dma, PCILIB_DMA_FLAGS_DEFAULT);
  288. if (err) return err;
  289. desc_va = (void*)pcilib_kmem_get_ua(ctx->pcilib, ctx->desc);
  290. if (ctx->mode64) last_written_addr_ptr = desc_va + 3 * sizeof(uint32_t);
  291. else last_written_addr_ptr = desc_va + 4 * sizeof(uint32_t);
  292. empty_detected_ptr = last_written_addr_ptr - 2;
  293. do {
  294. switch (ret&PCILIB_STREAMING_TIMEOUT_MASK) {
  295. case PCILIB_STREAMING_CONTINUE:
  296. // Hardware indicates that there is no more data pending and we can safely stop if there is no data in the kernel buffers already
  297. #ifdef IPEDMA_SUPPORT_EMPTY_DETECTED
  298. if (*empty_detected_ptr)
  299. wait = 0;
  300. else
  301. #endif /* IPEDMA_SUPPORT_EMPTY_DETECTED */
  302. wait = IPEDMA_DMA_TIMEOUT;
  303. break;
  304. case PCILIB_STREAMING_WAIT:
  305. wait = (timeout > IPEDMA_DMA_TIMEOUT)?timeout:IPEDMA_DMA_TIMEOUT;
  306. break;
  307. // case PCILIB_STREAMING_CHECK: wait = 0; break;
  308. }
  309. #ifdef IPEDMA_DEBUG
  310. printf("Waiting for data: %u (last read) 0x%x (last read addr) 0x%x (last_written)\n", ctx->last_read, ctx->last_read_addr, *last_written_addr_ptr);
  311. #endif /* IPEDMA_DEBUG */
  312. gettimeofday(&start, NULL);
  313. memcpy(&cur, &start, sizeof(struct timeval));
  314. while (((*last_written_addr_ptr == 0)||(ctx->last_read_addr == (*last_written_addr_ptr)))&&((wait == PCILIB_TIMEOUT_INFINITE)||(((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec)) < wait))) {
  315. usleep(10);
  316. #ifdef IPEDMA_SUPPORT_EMPTY_DETECTED
  317. if ((ret != PCILIB_STREAMING_REQ_PACKET)&&(*empty_detected_ptr)) break;
  318. #endif /* IPEDMA_SUPPORT_EMPTY_DETECTED */
  319. gettimeofday(&cur, NULL);
  320. }
  321. // Failing out if we exited on timeout
  322. if ((ctx->last_read_addr == (*last_written_addr_ptr))||(*last_written_addr_ptr == 0)) {
  323. #ifdef IPEDMA_SUPPORT_EMPTY_DETECTED
  324. # ifdef IPEDMA_DEBUG
  325. if ((wait)&&(*last_written_addr_ptr)&&(!*empty_detected_ptr))
  326. pcilib_warning("The empty_detected flag is not set, but no data arrived within %lu us\n", wait);
  327. # endif /* IPEDMA_DEBUG */
  328. #endif /* IPEDMA_SUPPORT_EMPTY_DETECTED */
  329. return (ret&PCILIB_STREAMING_FAIL)?PCILIB_ERROR_TIMEOUT:0;
  330. }
  331. // Getting next page to read
  332. cur_read = ctx->last_read + 1;
  333. if (cur_read == ctx->ring_size) cur_read = 0;
  334. #ifdef IPEDMA_DEBUG
  335. printf("Reading: %u (last read) 0x%x (last read addr) 0x%x (last_written)\n", cur_read, ctx->last_read_addr, *last_written_addr_ptr);
  336. #endif /* IPEDMA_DEBUG */
  337. #ifdef IPEDMA_DETECT_PACKETS
  338. if ((*empty_detected_ptr)&&(pcilib_kmem_get_block_ba(ctx->pcilib, ctx->pages, cur_read) == (*last_written_addr_ptr))) packet_flags = PCILIB_DMA_FLAG_EOP;
  339. else packet_flags = 0;
  340. #endif /* IPEDMA_DETECT_PACKETS */
  341. pcilib_kmem_sync_block(ctx->pcilib, ctx->pages, PCILIB_KMEM_SYNC_FROMDEVICE, cur_read);
  342. void *buf = pcilib_kmem_get_block_ua(ctx->pcilib, ctx->pages, cur_read);
  343. ret = cb(cbattr, packet_flags, ctx->page_size, buf);
  344. if (ret < 0) return -ret;
  345. // DS: Fixme, it looks like we can avoid calling this for the sake of performance
  346. // pcilib_kmem_sync_block(ctx->pcilib, ctx->pages, PCILIB_KMEM_SYNC_TODEVICE, cur_read);
  347. // Numbered from 1
  348. WR(IPEDMA_REG_LAST_READ, cur_read + 1);
  349. ctx->last_read = cur_read;
  350. // ctx->last_read_addr = htonl(pcilib_kmem_get_block_ba(ctx->pcilib, ctx->pages, cur_read));
  351. ctx->last_read_addr = pcilib_kmem_get_block_ba(ctx->pcilib, ctx->pages, cur_read);
  352. #ifdef IPEDMA_BUG_DMARD
  353. FILE *f = fopen("/tmp/pcitool_lastread", "w");
  354. if (!f) pcilib_error("Can't write current status");
  355. value = cur_read;
  356. fwrite(&value, 1, sizeof(pcilib_register_value_t), f);
  357. fclose(f);
  358. #endif /* IPEDMA_BUG_DMARD */
  359. } while (ret);
  360. return 0;
  361. }
  362. double dma_ipe_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dma, uintptr_t addr, size_t size, size_t iterations, pcilib_dma_direction_t direction) {
  363. int err = 0;
  364. ipe_dma_t *ctx = (ipe_dma_t*)vctx;
  365. int iter;
  366. size_t us = 0;
  367. struct timeval start, cur;
  368. void *buf;
  369. size_t bytes, rbytes;
  370. if ((direction == PCILIB_DMA_TO_DEVICE)||(direction == PCILIB_DMA_BIDIRECTIONAL)) return -1.;
  371. if ((dma != PCILIB_DMA_ENGINE_INVALID)&&(dma > 1)) return -1.;
  372. err = dma_ipe_start(vctx, 0, PCILIB_DMA_FLAGS_DEFAULT);
  373. if (err) return err;
  374. WR(IPEDMA_REG_CONTROL, 0x0);
  375. err = pcilib_skip_dma(ctx->pcilib, 0);
  376. if (err) {
  377. pcilib_error("Can't start benchmark, devices continuously writes unexpected data using DMA engine");
  378. return -1;
  379. }
  380. if (size%IPEDMA_PAGE_SIZE) size = (1 + size / IPEDMA_PAGE_SIZE) * IPEDMA_PAGE_SIZE;
  381. // Allocate memory and prepare data
  382. buf = malloc(size);
  383. if (!buf) return -1;
  384. for (iter = 0; iter < iterations; iter++) {
  385. gettimeofday(&start, NULL);
  386. // Starting DMA
  387. WR(IPEDMA_REG_CONTROL, 0x1);
  388. for (bytes = 0; bytes < size; bytes += rbytes) {
  389. err = pcilib_read_dma(ctx->pcilib, 0, addr, size - bytes, buf + bytes, &rbytes);
  390. if (err) {
  391. pcilib_error("Can't read data from DMA, error %i", err);
  392. return -1;
  393. }
  394. }
  395. // Stopping DMA
  396. WR(IPEDMA_REG_CONTROL, 0x0);
  397. if (err) break;
  398. gettimeofday(&cur, NULL);
  399. us += ((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec));
  400. err = pcilib_skip_dma(ctx->pcilib, 0);
  401. if (err) {
  402. pcilib_error("Can't start iteration, devices continuously writes unexpected data using DMA engine");
  403. break;
  404. }
  405. }
  406. free(buf);
  407. return err?-1:((1. * size * iterations * 1000000) / (1024. * 1024. * us));
  408. }