kmem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /**
  2. *
  3. * @file kmem.c
  4. * @brief This file contains all functions dealing with kernel memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include "config.h" /* compile-time configuration */
  20. #include "compat.h" /* compatibility definitions for older linux */
  21. #include "pciDriver.h" /* external interface for the driver */
  22. #include "common.h" /* internal definitions for all parts */
  23. #include "kmem.h" /* prototypes for kernel memory */
  24. #include "sysfs.h" /* prototypes for sysfs */
  25. /**
  26. *
  27. * Allocates new kernel memory including the corresponding management structure, makes
  28. * it available via sysfs if possible.
  29. *
  30. */
  31. int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  32. {
  33. int flags;
  34. pcidriver_kmem_entry_t *kmem_entry;
  35. void *retptr;
  36. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  37. kmem_entry = pcidriver_kmem_find_entry_use(privdata, kmem_handle->use, kmem_handle->item);
  38. if (kmem_entry) {
  39. unsigned long flags = kmem_handle->flags;
  40. if (flags&KMEM_FLAG_TRY) {
  41. kmem_handle->type = kmem_entry->type;
  42. kmem_handle->size = kmem_entry->size;
  43. kmem_handle->align = kmem_entry->align;
  44. } else {
  45. if (kmem_handle->type != kmem_entry->type) {
  46. mod_info("Invalid type of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->type, kmem_handle->type);
  47. kmem_handle->type = kmem_entry->type;
  48. return -EINVAL;
  49. }
  50. if (((kmem_handle->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_PAGE)&&(kmem_handle->size == 0)) {
  51. kmem_handle->size = kmem_entry->size;
  52. } else if (kmem_handle->size != kmem_entry->size) {
  53. mod_info("Invalid size of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->size, kmem_handle->size);
  54. kmem_handle->size = kmem_entry->size;
  55. return -EINVAL;
  56. }
  57. if (kmem_handle->align != kmem_entry->align) {
  58. mod_info("Invalid alignment of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->align, kmem_handle->align);
  59. kmem_handle->align = kmem_entry->align;
  60. return -EINVAL;
  61. }
  62. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?1:0) != ((flags&KMEM_FLAG_EXCLUSIVE)?1:0)) {
  63. mod_info("Invalid mode of reusable kmem_entry\n");
  64. kmem_handle->flags = (kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?KMEM_FLAG_EXCLUSIVE:0;
  65. return -EINVAL;
  66. }
  67. }
  68. if ((kmem_entry->mode&KMEM_MODE_COUNT)==KMEM_MODE_COUNT) {
  69. mod_info("Reuse counter of kmem_entry is overflown");
  70. return -EBUSY;
  71. }
  72. kmem_handle->handle_id = kmem_entry->id;
  73. kmem_handle->ba = (unsigned long)(kmem_entry->dma_handle);
  74. kmem_handle->pa = virt_to_phys((void*)kmem_entry->cpua);
  75. kmem_handle->flags = KMEM_FLAG_REUSED;
  76. if (kmem_entry->refs&KMEM_REF_HW) kmem_handle->flags |= KMEM_FLAG_REUSED_HW;
  77. if (kmem_entry->mode&KMEM_MODE_PERSISTENT) kmem_handle->flags |= KMEM_FLAG_REUSED_PERSISTENT;
  78. kmem_entry->mode += 1;
  79. if (flags&KMEM_FLAG_HW) {
  80. if ((kmem_entry->refs&KMEM_REF_HW)==0)
  81. pcidriver_module_get(privdata);
  82. kmem_entry->refs |= KMEM_REF_HW;
  83. }
  84. if (flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  85. privdata->kmem_cur_id = kmem_entry->id;
  86. return 0;
  87. }
  88. if (kmem_handle->flags&KMEM_FLAG_TRY) return -ENOENT;
  89. }
  90. /* First, allocate zeroed memory for the kmem_entry */
  91. if ((kmem_entry = kcalloc(1, sizeof(pcidriver_kmem_entry_t), GFP_KERNEL)) == NULL)
  92. goto kmem_alloc_entry_fail;
  93. /* Initialize the kmem_entry */
  94. kmem_entry->id = atomic_inc_return(&privdata->kmem_count) - 1;
  95. privdata->kmem_cur_id = kmem_entry->id;
  96. kmem_handle->handle_id = kmem_entry->id;
  97. kmem_entry->use = kmem_handle->use;
  98. kmem_entry->item = kmem_handle->item;
  99. kmem_entry->type = kmem_handle->type;
  100. kmem_entry->align = kmem_handle->align;
  101. kmem_entry->direction = PCI_DMA_NONE;
  102. /* Initialize sysfs if possible */
  103. if (pcidriver_sysfs_initialize_kmem(privdata, kmem_entry->id, &(kmem_entry->sysfs_attr)) != 0)
  104. goto kmem_alloc_mem_fail;
  105. /* ...and allocate the DMA memory */
  106. /* note this is a memory pair, referencing the same area: the cpu address (cpua)
  107. * and the PCI bus address (pa). The CPU and PCI addresses may not be the same.
  108. * The CPU sees only CPU addresses, while the device sees only PCI addresses.
  109. * CPU address is used for the mmap (internal to the driver), and
  110. * PCI address is the address passed to the DMA Controller in the device.
  111. */
  112. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  113. case PCILIB_KMEM_TYPE_CONSISTENT:
  114. #ifdef PCIDRIVER_DUMMY_DEVICE
  115. retptr = kmalloc( kmem_handle->size, GFP_KERNEL);
  116. #else /* PCIDRIVER_DUMMY_DEVICE */
  117. retptr = pci_alloc_consistent( privdata->pdev, kmem_handle->size, &(kmem_entry->dma_handle) );
  118. #endif /* PCIDRIVER_DUMMY_DEVICE */
  119. break;
  120. case PCILIB_KMEM_TYPE_REGION:
  121. retptr = ioremap(kmem_handle->pa, kmem_handle->size);
  122. kmem_entry->dma_handle = kmem_handle->pa;
  123. if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_S2C) {
  124. kmem_entry->direction = PCI_DMA_TODEVICE;
  125. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_C2S) {
  126. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  127. }
  128. break;
  129. case PCILIB_KMEM_TYPE_PAGE:
  130. flags = GFP_KERNEL;
  131. if (kmem_handle->size == 0)
  132. kmem_handle->size = PAGE_SIZE;
  133. else if (kmem_handle->size%PAGE_SIZE)
  134. goto kmem_alloc_mem_fail;
  135. else
  136. flags |= __GFP_COMP;
  137. retptr = (void*)__get_free_pages(flags, get_order(kmem_handle->size));
  138. kmem_entry->dma_handle = 0;
  139. if (retptr) {
  140. #ifndef PCIDRIVER_DUMMY_DEVICE
  141. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  142. kmem_entry->direction = PCI_DMA_TODEVICE;
  143. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_TODEVICE);
  144. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  145. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  146. goto kmem_alloc_mem_fail;
  147. }
  148. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  149. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  150. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_FROMDEVICE);
  151. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  152. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  153. goto kmem_alloc_mem_fail;
  154. }
  155. }
  156. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  157. }
  158. break;
  159. default:
  160. goto kmem_alloc_mem_fail;
  161. }
  162. if (retptr == NULL)
  163. goto kmem_alloc_mem_fail;
  164. kmem_entry->size = kmem_handle->size;
  165. kmem_entry->cpua = (unsigned long)retptr;
  166. kmem_handle->ba = (unsigned long)(kmem_entry->dma_handle);
  167. kmem_handle->pa = virt_to_phys(retptr);
  168. kmem_entry->mode = 1;
  169. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  170. kmem_entry->mode |= KMEM_MODE_REUSABLE;
  171. if (kmem_handle->flags&KMEM_FLAG_EXCLUSIVE) kmem_entry->mode |= KMEM_MODE_EXCLUSIVE;
  172. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  173. }
  174. kmem_entry->refs = 0;
  175. if (kmem_handle->flags&KMEM_FLAG_HW) {
  176. pcidriver_module_get(privdata);
  177. kmem_entry->refs |= KMEM_REF_HW;
  178. }
  179. kmem_handle->flags = 0;
  180. /* Add the kmem_entry to the list of the device */
  181. spin_lock( &(privdata->kmemlist_lock) );
  182. list_add_tail( &(kmem_entry->list), &(privdata->kmem_list) );
  183. spin_unlock( &(privdata->kmemlist_lock) );
  184. return 0;
  185. kmem_alloc_mem_fail:
  186. kfree(kmem_entry);
  187. kmem_alloc_entry_fail:
  188. return -ENOMEM;
  189. }
  190. static int pcidriver_kmem_free_check(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle, pcidriver_kmem_entry_t *kmem_entry) {
  191. if ((kmem_handle->flags & KMEM_FLAG_FORCE) == 0) {
  192. if (kmem_entry->mode&KMEM_MODE_COUNT)
  193. kmem_entry->mode -= 1;
  194. if (kmem_handle->flags&KMEM_FLAG_HW) {
  195. if (kmem_entry->refs&KMEM_REF_HW)
  196. pcidriver_module_put(privdata);
  197. kmem_entry->refs &= ~KMEM_REF_HW;
  198. }
  199. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT)
  200. kmem_entry->mode &= ~KMEM_MODE_PERSISTENT;
  201. if (kmem_handle->flags&KMEM_FLAG_REUSE)
  202. return 0;
  203. if (kmem_entry->refs) {
  204. kmem_entry->mode += 1;
  205. mod_info("can't free referenced kmem_entry, refs = %lx\n", kmem_entry->refs);
  206. return -EBUSY;
  207. }
  208. if (kmem_entry->mode & KMEM_MODE_PERSISTENT) {
  209. kmem_entry->mode += 1;
  210. mod_info("can't free persistent kmem_entry\n");
  211. return -EBUSY;
  212. }
  213. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)==0)&&(kmem_entry->mode&KMEM_MODE_COUNT)&&((kmem_handle->flags&KMEM_FLAG_EXCLUSIVE)==0))
  214. return 0;
  215. } else {
  216. if (kmem_entry->refs&KMEM_REF_HW)
  217. pcidriver_module_put(privdata);
  218. while (!atomic_add_negative(-1, &(privdata->refs))) pcidriver_module_put(privdata);
  219. atomic_inc(&(privdata->refs));
  220. }
  221. return 1;
  222. }
  223. static int pcidriver_kmem_free_use(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  224. {
  225. int err;
  226. int failed = 0;
  227. struct list_head *ptr, *next;
  228. pcidriver_kmem_entry_t *kmem_entry;
  229. /* iterate safely over the entries and delete them */
  230. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  231. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  232. if (kmem_entry->use == kmem_handle->use) {
  233. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  234. if (err > 0)
  235. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  236. else
  237. failed = 1;
  238. }
  239. }
  240. if (failed) {
  241. mod_info("Some kmem_entries for use %lx are still referenced\n", kmem_handle->use);
  242. return -EBUSY;
  243. }
  244. return 0;
  245. }
  246. /**
  247. *
  248. * Called via sysfs, frees kernel memory and the corresponding management structure
  249. *
  250. */
  251. int pcidriver_kmem_free( pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle )
  252. {
  253. int err;
  254. pcidriver_kmem_entry_t *kmem_entry;
  255. if (kmem_handle->flags&KMEM_FLAG_MASS) {
  256. kmem_handle->flags &= ~KMEM_FLAG_MASS;
  257. return pcidriver_kmem_free_use(privdata, kmem_handle);
  258. }
  259. /* Find the associated kmem_entry for this buffer */
  260. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, kmem_handle)) == NULL)
  261. return -EINVAL; /* kmem_handle is not valid */
  262. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  263. if (err > 0)
  264. return pcidriver_kmem_free_entry(privdata, kmem_entry);
  265. return err;
  266. }
  267. /**
  268. *
  269. * Called when cleaning up, frees all kernel memory and their corresponding management structure
  270. *
  271. */
  272. int pcidriver_kmem_free_all(pcidriver_privdata_t *privdata)
  273. {
  274. // int failed = 0;
  275. struct list_head *ptr, *next;
  276. pcidriver_kmem_entry_t *kmem_entry;
  277. /* iterate safely over the entries and delete them */
  278. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  279. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  280. /*if (kmem_entry->refs)
  281. failed = 1;
  282. else*/
  283. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  284. }
  285. /*
  286. if (failed) {
  287. mod_info("Some kmem_entries are still referenced\n");
  288. return -EBUSY;
  289. }
  290. */
  291. return 0;
  292. }
  293. /**
  294. *
  295. * Synchronize memory to/from the device (or in both directions).
  296. *
  297. */
  298. int pcidriver_kmem_sync_entry( pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry, int direction)
  299. {
  300. if (kmem_entry->direction == PCI_DMA_NONE)
  301. return -EINVAL;
  302. #ifndef PCIDRIVER_DUMMY_DEVICE
  303. switch (direction) {
  304. case PCILIB_KMEM_SYNC_TODEVICE:
  305. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  306. break;
  307. case PCILIB_KMEM_SYNC_FROMDEVICE:
  308. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  309. break;
  310. case PCILIB_KMEM_SYNC_BIDIRECTIONAL:
  311. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  312. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  313. break;
  314. default:
  315. return -EINVAL; /* wrong direction parameter */
  316. }
  317. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  318. return 0; /* success */
  319. }
  320. /**
  321. *
  322. * Synchronize memory to/from the device (or in both directions).
  323. *
  324. */
  325. int pcidriver_kmem_sync( pcidriver_privdata_t *privdata, kmem_sync_t *kmem_sync )
  326. {
  327. pcidriver_kmem_entry_t *kmem_entry = NULL;
  328. /*
  329. * This is a shortcut to quickly find a next item in big multi-page kernel buffers
  330. */
  331. spin_lock(&(privdata->kmemlist_lock));
  332. if (privdata->kmem_last_sync) {
  333. if (privdata->kmem_last_sync->id == kmem_sync->handle.handle_id)
  334. kmem_entry = privdata->kmem_last_sync;
  335. else {
  336. privdata->kmem_last_sync = container_of(privdata->kmem_last_sync->list.next, pcidriver_kmem_entry_t, list);
  337. if (privdata->kmem_last_sync) {
  338. if (privdata->kmem_last_sync->id == kmem_sync->handle.handle_id)
  339. kmem_entry = privdata->kmem_last_sync;
  340. else
  341. privdata->kmem_last_sync = NULL;
  342. }
  343. }
  344. }
  345. spin_unlock(&(privdata->kmemlist_lock));
  346. /*
  347. * If not found go the standard way
  348. */
  349. if (!kmem_entry) {
  350. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, &(kmem_sync->handle))) == NULL)
  351. return -EINVAL; /* kmem_handle is not valid */
  352. spin_lock(&(privdata->kmemlist_lock));
  353. privdata->kmem_last_sync = kmem_entry;
  354. spin_unlock(&(privdata->kmemlist_lock));
  355. }
  356. return pcidriver_kmem_sync_entry(privdata, kmem_entry, kmem_sync->dir);
  357. }
  358. /**
  359. *
  360. * Free the given kmem_entry and its memory.
  361. *
  362. */
  363. int pcidriver_kmem_free_entry(pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry)
  364. {
  365. pcidriver_sysfs_remove(privdata, &(kmem_entry->sysfs_attr));
  366. /* Release DMA memory */
  367. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  368. case PCILIB_KMEM_TYPE_CONSISTENT:
  369. #ifdef PCIDRIVER_DUMMY_DEVICE
  370. kfree((void*)(kmem_entry->cpua));
  371. #else /* PCIDRIVER_DUMMY_DEVICE */
  372. pci_free_consistent( privdata->pdev, kmem_entry->size, (void *)(kmem_entry->cpua), kmem_entry->dma_handle );
  373. #endif /* PCIDRIVER_DUMMY_DEVICE */
  374. break;
  375. case PCILIB_KMEM_TYPE_REGION:
  376. iounmap((void *)(kmem_entry->cpua));
  377. break;
  378. case PCILIB_KMEM_TYPE_PAGE:
  379. #ifndef PCIDRIVER_DUMMY_DEVICE
  380. if (kmem_entry->dma_handle) {
  381. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  382. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_TODEVICE);
  383. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  384. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_FROMDEVICE);
  385. }
  386. }
  387. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  388. free_pages((unsigned long)kmem_entry->cpua, get_order(kmem_entry->size));
  389. break;
  390. }
  391. /* Remove the kmem list entry */
  392. spin_lock( &(privdata->kmemlist_lock) );
  393. if (privdata->kmem_last_sync == kmem_entry)
  394. privdata->kmem_last_sync = NULL;
  395. list_del( &(kmem_entry->list) );
  396. spin_unlock( &(privdata->kmemlist_lock) );
  397. /* Release kmem_entry memory */
  398. kfree(kmem_entry);
  399. return 0;
  400. }
  401. /**
  402. *
  403. * Find the corresponding kmem_entry for the given kmem_handle.
  404. *
  405. */
  406. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  407. {
  408. struct list_head *ptr;
  409. pcidriver_kmem_entry_t *entry, *result = NULL;
  410. /* should I implement it better using the handle_id? */
  411. spin_lock(&(privdata->kmemlist_lock));
  412. list_for_each(ptr, &(privdata->kmem_list)) {
  413. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  414. if (entry->id == kmem_handle->handle_id) {
  415. result = entry;
  416. break;
  417. }
  418. }
  419. spin_unlock(&(privdata->kmemlist_lock));
  420. return result;
  421. }
  422. /**
  423. *
  424. * find the corresponding kmem_entry for the given id.
  425. *
  426. */
  427. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  428. {
  429. struct list_head *ptr;
  430. pcidriver_kmem_entry_t *entry, *result = NULL;
  431. spin_lock(&(privdata->kmemlist_lock));
  432. list_for_each(ptr, &(privdata->kmem_list)) {
  433. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  434. if (entry->id == id) {
  435. result = entry;
  436. break;
  437. }
  438. }
  439. spin_unlock(&(privdata->kmemlist_lock));
  440. return result;
  441. }
  442. /**
  443. *
  444. * find the corresponding kmem_entry for the given use and item.
  445. *
  446. */
  447. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_use(pcidriver_privdata_t *privdata, unsigned long use, unsigned long item)
  448. {
  449. struct list_head *ptr;
  450. pcidriver_kmem_entry_t *entry, *result = NULL;
  451. spin_lock(&(privdata->kmemlist_lock));
  452. list_for_each(ptr, &(privdata->kmem_list)) {
  453. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  454. if ((entry->use == use)&&(entry->item == item)&&(entry->mode&KMEM_MODE_REUSABLE)) {
  455. result = entry;
  456. break;
  457. }
  458. }
  459. spin_unlock(&(privdata->kmemlist_lock));
  460. return result;
  461. }
  462. void pcidriver_kmem_mmap_close(struct vm_area_struct *vma) {
  463. unsigned long vma_size;
  464. pcidriver_kmem_entry_t *kmem_entry = (pcidriver_kmem_entry_t*)vma->vm_private_data;
  465. if (kmem_entry) {
  466. /*
  467. if (kmem_entry->id == 0) {
  468. mod_info("refs: %p %p %lx\n", vma, vma->vm_private_data, kmem_entry->refs);
  469. mod_info("kmem_size: %lu vma_size: %lu, s: %lx, e: %lx\n", kmem_entry->size, (vma->vm_end - vma->vm_start), vma->vm_start, vma->vm_end);
  470. }
  471. */
  472. vma_size = (vma->vm_end - vma->vm_start);
  473. if (kmem_entry->refs&KMEM_REF_COUNT) {
  474. kmem_entry->refs -= vma_size / PAGE_SIZE;
  475. }
  476. }
  477. }
  478. static struct vm_operations_struct pcidriver_kmem_mmap_ops = {
  479. .close = pcidriver_kmem_mmap_close
  480. };
  481. /**
  482. *
  483. * mmap() kernel memory to userspace.
  484. *
  485. */
  486. int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *vma)
  487. {
  488. unsigned long vma_size;
  489. pcidriver_kmem_entry_t *kmem_entry;
  490. int ret;
  491. mod_info_dbg("Entering mmap_kmem\n");
  492. /* FIXME: Is this really right? Always just the latest one? Can't we identify one? */
  493. /* Get latest entry on the kmem_list */
  494. kmem_entry = pcidriver_kmem_find_entry_id(privdata, privdata->kmem_cur_id);
  495. if (!kmem_entry) {
  496. mod_info("Trying to mmap a kernel memory buffer without creating it first!\n");
  497. return -EFAULT;
  498. }
  499. mod_info_dbg("Got kmem_entry with id: %d\n", kmem_entry->id);
  500. /* Check sizes */
  501. vma_size = (vma->vm_end - vma->vm_start);
  502. if ((vma_size > kmem_entry->size) &&
  503. ((kmem_entry->size < PAGE_SIZE) && (vma_size != PAGE_SIZE))) {
  504. mod_info("kem_entry size(%lu) and vma size do not match(%lu)\n", kmem_entry->size, vma_size);
  505. return -EINVAL;
  506. }
  507. /* reference counting */
  508. if ((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)&&(kmem_entry->refs&KMEM_REF_COUNT)) {
  509. mod_info("can't make second mmaping for exclusive kmem_entry\n");
  510. return -EBUSY;
  511. }
  512. if (((kmem_entry->refs&KMEM_REF_COUNT) + (vma_size / PAGE_SIZE)) > KMEM_REF_COUNT) {
  513. mod_info("maximal amount of references is reached by kmem_entry\n");
  514. return -EBUSY;
  515. }
  516. kmem_entry->refs += vma_size / PAGE_SIZE;
  517. vma->vm_flags |= (VM_RESERVED);
  518. // This is coherent memory, so it must not be cached.
  519. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  520. mod_info_dbg("Mapping address %08lx / PFN %08lx\n",
  521. virt_to_phys((void*)kmem_entry->cpua),
  522. page_to_pfn(virt_to_page((void*)kmem_entry->cpua)));
  523. if ((kmem_entry->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_REGION) {
  524. ret = remap_pfn_range_compat(vma, vma->vm_start, kmem_entry->dma_handle, (vma_size < kmem_entry->size)?vma_size:kmem_entry->size, vma->vm_page_prot);
  525. } else {
  526. ret = remap_pfn_range_cpua_compat(vma, vma->vm_start, kmem_entry->cpua, (vma_size < kmem_entry->size)?vma_size:kmem_entry->size, vma->vm_page_prot);
  527. }
  528. if (ret) {
  529. mod_info("kmem remap failed: %d (%lx)\n", ret,kmem_entry->cpua);
  530. kmem_entry->refs -= 1;
  531. return -EAGAIN;
  532. }
  533. vma->vm_ops = &pcidriver_kmem_mmap_ops;
  534. vma->vm_private_data = (void*)kmem_entry;
  535. return ret;
  536. }