kmem.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /**
  2. *
  3. * @file kmem.c
  4. * @brief This file contains all functions dealing with kernel memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include "config.h" /* compile-time configuration */
  20. #include "compat.h" /* compatibility definitions for older linux */
  21. #include "pciDriver.h" /* external interface for the driver */
  22. #include "common.h" /* internal definitions for all parts */
  23. #include "kmem.h" /* prototypes for kernel memory */
  24. #include "sysfs.h" /* prototypes for sysfs */
  25. /* VM_RESERVED is removed in 3.7-rc1 */
  26. #ifndef VM_RESERVED
  27. # define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
  28. #endif
  29. /**
  30. *
  31. * Allocates new kernel memory including the corresponding management structure, makes
  32. * it available via sysfs if possible.
  33. *
  34. */
  35. int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  36. {
  37. int flags;
  38. pcidriver_kmem_entry_t *kmem_entry;
  39. void *retptr;
  40. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  41. kmem_entry = pcidriver_kmem_find_entry_use(privdata, kmem_handle->use, kmem_handle->item);
  42. if (kmem_entry) {
  43. unsigned long flags = kmem_handle->flags;
  44. if (flags&KMEM_FLAG_TRY) {
  45. kmem_handle->type = kmem_entry->type;
  46. kmem_handle->size = kmem_entry->size;
  47. kmem_handle->align = kmem_entry->align;
  48. } else {
  49. if (kmem_handle->type != kmem_entry->type) {
  50. mod_info("Invalid type of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->type, kmem_handle->type);
  51. return -EINVAL;
  52. }
  53. if ((kmem_handle->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_PAGE) {
  54. kmem_handle->size = kmem_entry->size;
  55. } else if (kmem_handle->size != kmem_entry->size) {
  56. mod_info("Invalid size of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->size, kmem_handle->size);
  57. return -EINVAL;
  58. }
  59. if (kmem_handle->align != kmem_entry->align) {
  60. mod_info("Invalid alignment of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->align, kmem_handle->align);
  61. return -EINVAL;
  62. }
  63. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?1:0) != ((flags&KMEM_FLAG_EXCLUSIVE)?1:0)) {
  64. mod_info("Invalid mode of reusable kmem_entry\n");
  65. return -EINVAL;
  66. }
  67. }
  68. if ((kmem_entry->mode&KMEM_MODE_COUNT)==KMEM_MODE_COUNT) {
  69. mod_info("Reuse counter of kmem_entry is overflown");
  70. return -EBUSY;
  71. }
  72. kmem_handle->handle_id = kmem_entry->id;
  73. kmem_handle->pa = (unsigned long)(kmem_entry->dma_handle);
  74. kmem_handle->flags = KMEM_FLAG_REUSED;
  75. if (kmem_entry->refs&KMEM_REF_HW) kmem_handle->flags |= KMEM_FLAG_REUSED_HW;
  76. if (kmem_entry->mode&KMEM_MODE_PERSISTENT) kmem_handle->flags |= KMEM_FLAG_REUSED_PERSISTENT;
  77. kmem_entry->mode += 1;
  78. if (flags&KMEM_FLAG_HW) {
  79. if ((kmem_entry->refs&KMEM_REF_HW)==0)
  80. pcidriver_module_get(privdata);
  81. kmem_entry->refs |= KMEM_REF_HW;
  82. }
  83. if (flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  84. privdata->kmem_cur_id = kmem_entry->id;
  85. return 0;
  86. }
  87. if (kmem_handle->flags&KMEM_FLAG_TRY) return -ENOENT;
  88. }
  89. /* First, allocate zeroed memory for the kmem_entry */
  90. if ((kmem_entry = kcalloc(1, sizeof(pcidriver_kmem_entry_t), GFP_KERNEL)) == NULL)
  91. goto kmem_alloc_entry_fail;
  92. /* Initialize the kmem_entry */
  93. kmem_entry->id = atomic_inc_return(&privdata->kmem_count) - 1;
  94. privdata->kmem_cur_id = kmem_entry->id;
  95. kmem_handle->handle_id = kmem_entry->id;
  96. kmem_entry->use = kmem_handle->use;
  97. kmem_entry->item = kmem_handle->item;
  98. kmem_entry->type = kmem_handle->type;
  99. kmem_entry->align = kmem_handle->align;
  100. kmem_entry->direction = PCI_DMA_NONE;
  101. /* Initialize sysfs if possible */
  102. if (pcidriver_sysfs_initialize_kmem(privdata, kmem_entry->id, &(kmem_entry->sysfs_attr)) != 0)
  103. goto kmem_alloc_mem_fail;
  104. /* ...and allocate the DMA memory */
  105. /* note this is a memory pair, referencing the same area: the cpu address (cpua)
  106. * and the PCI bus address (pa). The CPU and PCI addresses may not be the same.
  107. * The CPU sees only CPU addresses, while the device sees only PCI addresses.
  108. * CPU address is used for the mmap (internal to the driver), and
  109. * PCI address is the address passed to the DMA Controller in the device.
  110. */
  111. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  112. case PCILIB_KMEM_TYPE_CONSISTENT:
  113. retptr = pci_alloc_consistent( privdata->pdev, kmem_handle->size, &(kmem_entry->dma_handle) );
  114. break;
  115. case PCILIB_KMEM_TYPE_REGION:
  116. retptr = ioremap(kmem_handle->pa, kmem_handle->size);
  117. kmem_entry->dma_handle = kmem_handle->pa;
  118. if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_S2C) {
  119. kmem_entry->direction = PCI_DMA_TODEVICE;
  120. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_C2S) {
  121. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  122. }
  123. break;
  124. case PCILIB_KMEM_TYPE_PAGE:
  125. flags = GFP_KERNEL;
  126. if (kmem_handle->size == 0)
  127. kmem_handle->size = PAGE_SIZE;
  128. else if (kmem_handle->size%PAGE_SIZE)
  129. goto kmem_alloc_mem_fail;
  130. retptr = (void*)__get_free_pages(flags, get_order(kmem_handle->size));
  131. kmem_entry->dma_handle = 0;
  132. if (retptr) {
  133. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  134. kmem_entry->direction = PCI_DMA_TODEVICE;
  135. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_TODEVICE);
  136. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  137. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  138. goto kmem_alloc_mem_fail;
  139. }
  140. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  141. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  142. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_FROMDEVICE);
  143. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  144. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  145. goto kmem_alloc_mem_fail;
  146. }
  147. }
  148. }
  149. break;
  150. default:
  151. goto kmem_alloc_mem_fail;
  152. }
  153. if (retptr == NULL)
  154. goto kmem_alloc_mem_fail;
  155. kmem_entry->size = kmem_handle->size;
  156. kmem_entry->cpua = (unsigned long)retptr;
  157. kmem_handle->pa = (unsigned long)(kmem_entry->dma_handle);
  158. kmem_entry->mode = 1;
  159. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  160. kmem_entry->mode |= KMEM_MODE_REUSABLE;
  161. if (kmem_handle->flags&KMEM_FLAG_EXCLUSIVE) kmem_entry->mode |= KMEM_MODE_EXCLUSIVE;
  162. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  163. }
  164. kmem_entry->refs = 0;
  165. if (kmem_handle->flags&KMEM_FLAG_HW) {
  166. pcidriver_module_get(privdata);
  167. kmem_entry->refs |= KMEM_REF_HW;
  168. }
  169. kmem_handle->flags = 0;
  170. set_pages_reserved_compat(kmem_entry->cpua, kmem_entry->size);
  171. /* Add the kmem_entry to the list of the device */
  172. spin_lock( &(privdata->kmemlist_lock) );
  173. list_add_tail( &(kmem_entry->list), &(privdata->kmem_list) );
  174. spin_unlock( &(privdata->kmemlist_lock) );
  175. return 0;
  176. kmem_alloc_mem_fail:
  177. kfree(kmem_entry);
  178. kmem_alloc_entry_fail:
  179. return -ENOMEM;
  180. }
  181. static int pcidriver_kmem_free_check(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle, pcidriver_kmem_entry_t *kmem_entry) {
  182. if ((kmem_handle->flags & KMEM_FLAG_FORCE) == 0) {
  183. if (kmem_entry->mode&KMEM_MODE_COUNT)
  184. kmem_entry->mode -= 1;
  185. if (kmem_handle->flags&KMEM_FLAG_HW) {
  186. if (kmem_entry->refs&KMEM_REF_HW)
  187. pcidriver_module_put(privdata);
  188. kmem_entry->refs &= ~KMEM_REF_HW;
  189. }
  190. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT)
  191. kmem_entry->mode &= ~KMEM_MODE_PERSISTENT;
  192. if (kmem_handle->flags&KMEM_FLAG_REUSE)
  193. return 0;
  194. if (kmem_entry->refs) {
  195. kmem_entry->mode += 1;
  196. mod_info("can't free referenced kmem_entry\n");
  197. return -EBUSY;
  198. }
  199. if (kmem_entry->mode & KMEM_MODE_PERSISTENT) {
  200. kmem_entry->mode += 1;
  201. mod_info("can't free persistent kmem_entry\n");
  202. return -EBUSY;
  203. }
  204. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)==0)&&(kmem_entry->mode&KMEM_MODE_COUNT)&&((kmem_handle->flags&KMEM_FLAG_EXCLUSIVE)==0))
  205. return 0;
  206. } else {
  207. if (kmem_entry->refs&KMEM_REF_HW)
  208. pcidriver_module_put(privdata);
  209. while (!atomic_add_negative(-1, &(privdata->refs))) pcidriver_module_put(privdata);
  210. atomic_inc(&(privdata->refs));
  211. }
  212. return 1;
  213. }
  214. static int pcidriver_kmem_free_use(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  215. {
  216. int err;
  217. int failed = 0;
  218. struct list_head *ptr, *next;
  219. pcidriver_kmem_entry_t *kmem_entry;
  220. /* iterate safely over the entries and delete them */
  221. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  222. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  223. if (kmem_entry->use == kmem_handle->use) {
  224. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  225. if (err > 0)
  226. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  227. else
  228. failed = 1;
  229. }
  230. }
  231. if (failed) {
  232. mod_info("Some kmem_entries for use %lx are still referenced\n", kmem_handle->use);
  233. return -EBUSY;
  234. }
  235. return 0;
  236. }
  237. /**
  238. *
  239. * Called via sysfs, frees kernel memory and the corresponding management structure
  240. *
  241. */
  242. int pcidriver_kmem_free( pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle )
  243. {
  244. int err;
  245. pcidriver_kmem_entry_t *kmem_entry;
  246. if (kmem_handle->flags&KMEM_FLAG_MASS) {
  247. kmem_handle->flags &= ~KMEM_FLAG_MASS;
  248. return pcidriver_kmem_free_use(privdata, kmem_handle);
  249. }
  250. /* Find the associated kmem_entry for this buffer */
  251. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, kmem_handle)) == NULL)
  252. return -EINVAL; /* kmem_handle is not valid */
  253. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  254. if (err > 0)
  255. return pcidriver_kmem_free_entry(privdata, kmem_entry);
  256. return err;
  257. }
  258. /**
  259. *
  260. * Called when cleaning up, frees all kernel memory and their corresponding management structure
  261. *
  262. */
  263. int pcidriver_kmem_free_all(pcidriver_privdata_t *privdata)
  264. {
  265. // int failed = 0;
  266. struct list_head *ptr, *next;
  267. pcidriver_kmem_entry_t *kmem_entry;
  268. /* iterate safely over the entries and delete them */
  269. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  270. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  271. /*if (kmem_entry->refs)
  272. failed = 1;
  273. else*/
  274. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  275. }
  276. /*
  277. if (failed) {
  278. mod_info("Some kmem_entries are still referenced\n");
  279. return -EBUSY;
  280. }
  281. */
  282. return 0;
  283. }
  284. /**
  285. *
  286. * Synchronize memory to/from the device (or in both directions).
  287. *
  288. */
  289. int pcidriver_kmem_sync_entry( pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry, int direction)
  290. {
  291. if (kmem_entry->direction == PCI_DMA_NONE)
  292. return -EINVAL;
  293. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
  294. switch (direction) {
  295. case PCILIB_KMEM_SYNC_TODEVICE:
  296. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  297. break;
  298. case PCILIB_KMEM_SYNC_FROMDEVICE:
  299. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  300. break;
  301. case PCILIB_KMEM_SYNC_BIDIRECTIONAL:
  302. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  303. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  304. break;
  305. default:
  306. return -EINVAL; /* wrong direction parameter */
  307. }
  308. #else
  309. switch (direction) {
  310. case PCILIB_KMEM_SYNC_TODEVICE:
  311. pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  312. break;
  313. case PCILIB_KMEM_SYNC_FROMDEVICE:
  314. pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  315. break;
  316. case PCILIB_KMEM_SYNC_BIDIRECTIONAL:
  317. pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  318. break;
  319. default:
  320. return -EINVAL; /* wrong direction parameter */
  321. }
  322. #endif
  323. return 0; /* success */
  324. }
  325. /**
  326. *
  327. * Synchronize memory to/from the device (or in both directions).
  328. *
  329. */
  330. int pcidriver_kmem_sync( pcidriver_privdata_t *privdata, kmem_sync_t *kmem_sync )
  331. {
  332. pcidriver_kmem_entry_t *kmem_entry;
  333. /* Find the associated kmem_entry for this buffer */
  334. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, &(kmem_sync->handle))) == NULL)
  335. return -EINVAL; /* kmem_handle is not valid */
  336. return pcidriver_kmem_sync_entry(privdata, kmem_entry, kmem_sync->dir);
  337. }
  338. /**
  339. *
  340. * Free the given kmem_entry and its memory.
  341. *
  342. */
  343. int pcidriver_kmem_free_entry(pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry)
  344. {
  345. pcidriver_sysfs_remove(privdata, &(kmem_entry->sysfs_attr));
  346. /* Go over the pages of the kmem buffer, and mark them as not reserved */
  347. #if 0
  348. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
  349. /*
  350. * This code is DISABLED.
  351. * Apparently, it is not needed to unreserve them. Doing so here
  352. * hangs the machine. Why?
  353. *
  354. * Uhm.. see links:
  355. *
  356. * http://lwn.net/Articles/161204/
  357. * http://lists.openfabrics.org/pipermail/general/2007-March/034101.html
  358. *
  359. * I insist, this should be enabled, but doing so hangs the machine.
  360. * Literature supports the point, and there is even a similar problem (see link)
  361. * But this is not the case. It seems right to me. but obviously is not.
  362. *
  363. * Anyway, this goes away in kernel >=2.6.15.
  364. */
  365. unsigned long start = __pa(kmem_entry->cpua) >> PAGE_SHIFT;
  366. unsigned long end = __pa(kmem_entry->cpua + kmem_entry->size) >> PAGE_SHIFT;
  367. unsigned long i;
  368. for(i=start;i<end;i++) {
  369. struct page *kpage = pfn_to_page(i);
  370. ClearPageReserved(kpage);
  371. }
  372. #endif
  373. #endif
  374. /* Release DMA memory */
  375. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  376. case PCILIB_KMEM_TYPE_CONSISTENT:
  377. pci_free_consistent( privdata->pdev, kmem_entry->size, (void *)(kmem_entry->cpua), kmem_entry->dma_handle );
  378. break;
  379. case PCILIB_KMEM_TYPE_REGION:
  380. iounmap((void *)(kmem_entry->cpua));
  381. break;
  382. case PCILIB_KMEM_TYPE_PAGE:
  383. if (kmem_entry->dma_handle) {
  384. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  385. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_TODEVICE);
  386. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  387. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_FROMDEVICE);
  388. }
  389. }
  390. free_pages((unsigned long)kmem_entry->cpua, get_order(kmem_entry->size));
  391. break;
  392. }
  393. /* Remove the kmem list entry */
  394. spin_lock( &(privdata->kmemlist_lock) );
  395. list_del( &(kmem_entry->list) );
  396. spin_unlock( &(privdata->kmemlist_lock) );
  397. /* Release kmem_entry memory */
  398. kfree(kmem_entry);
  399. return 0;
  400. }
  401. /**
  402. *
  403. * Find the corresponding kmem_entry for the given kmem_handle.
  404. *
  405. */
  406. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  407. {
  408. struct list_head *ptr;
  409. pcidriver_kmem_entry_t *entry, *result = NULL;
  410. /* should I implement it better using the handle_id? */
  411. spin_lock(&(privdata->kmemlist_lock));
  412. list_for_each(ptr, &(privdata->kmem_list)) {
  413. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  414. if (entry->id == kmem_handle->handle_id) {
  415. result = entry;
  416. break;
  417. }
  418. }
  419. spin_unlock(&(privdata->kmemlist_lock));
  420. return result;
  421. }
  422. /**
  423. *
  424. * find the corresponding kmem_entry for the given id.
  425. *
  426. */
  427. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  428. {
  429. struct list_head *ptr;
  430. pcidriver_kmem_entry_t *entry, *result = NULL;
  431. spin_lock(&(privdata->kmemlist_lock));
  432. list_for_each(ptr, &(privdata->kmem_list)) {
  433. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  434. if (entry->id == id) {
  435. result = entry;
  436. break;
  437. }
  438. }
  439. spin_unlock(&(privdata->kmemlist_lock));
  440. return result;
  441. }
  442. /**
  443. *
  444. * find the corresponding kmem_entry for the given use and item.
  445. *
  446. */
  447. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_use(pcidriver_privdata_t *privdata, unsigned long use, unsigned long item)
  448. {
  449. struct list_head *ptr;
  450. pcidriver_kmem_entry_t *entry, *result = NULL;
  451. spin_lock(&(privdata->kmemlist_lock));
  452. list_for_each(ptr, &(privdata->kmem_list)) {
  453. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  454. if ((entry->use == use)&&(entry->item == item)&&(entry->mode&KMEM_MODE_REUSABLE)) {
  455. result = entry;
  456. break;
  457. }
  458. }
  459. spin_unlock(&(privdata->kmemlist_lock));
  460. return result;
  461. }
  462. void pcidriver_kmem_mmap_close(struct vm_area_struct *vma) {
  463. unsigned long vma_size;
  464. pcidriver_kmem_entry_t *kmem_entry = (pcidriver_kmem_entry_t*)vma->vm_private_data;
  465. if (kmem_entry) {
  466. /*
  467. if (kmem_entry->id == 0) {
  468. mod_info("refs: %p %p %lx\n", vma, vma->vm_private_data, kmem_entry->refs);
  469. mod_info("kmem_size: %lu vma_size: %lu, s: %lx, e: %lx\n", kmem_entry->size, (vma->vm_end - vma->vm_start), vma->vm_start, vma->vm_end);
  470. }
  471. */
  472. vma_size = (vma->vm_end - vma->vm_start);
  473. if (kmem_entry->refs&KMEM_REF_COUNT) {
  474. kmem_entry->refs -= vma_size / PAGE_SIZE;
  475. }
  476. }
  477. }
  478. static struct vm_operations_struct pcidriver_kmem_mmap_ops = {
  479. .close = pcidriver_kmem_mmap_close
  480. };
  481. /**
  482. *
  483. * mmap() kernel memory to userspace.
  484. *
  485. */
  486. int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *vma)
  487. {
  488. unsigned long vma_size;
  489. pcidriver_kmem_entry_t *kmem_entry;
  490. int ret;
  491. mod_info_dbg("Entering mmap_kmem\n");
  492. /* FIXME: Is this really right? Always just the latest one? Can't we identify one? */
  493. /* Get latest entry on the kmem_list */
  494. kmem_entry = pcidriver_kmem_find_entry_id(privdata, privdata->kmem_cur_id);
  495. if (!kmem_entry) {
  496. mod_info("Trying to mmap a kernel memory buffer without creating it first!\n");
  497. return -EFAULT;
  498. }
  499. mod_info_dbg("Got kmem_entry with id: %d\n", kmem_entry->id);
  500. /* Check sizes */
  501. vma_size = (vma->vm_end - vma->vm_start);
  502. if ((vma_size > kmem_entry->size) &&
  503. ((kmem_entry->size < PAGE_SIZE) && (vma_size != PAGE_SIZE))) {
  504. mod_info("kem_entry size(%lu) and vma size do not match(%lu)\n", kmem_entry->size, vma_size);
  505. return -EINVAL;
  506. }
  507. /* reference counting */
  508. if ((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)&&(kmem_entry->refs&KMEM_REF_COUNT)) {
  509. mod_info("can't make second mmaping for exclusive kmem_entry\n");
  510. return -EBUSY;
  511. }
  512. if (((kmem_entry->refs&KMEM_REF_COUNT) + (vma_size / PAGE_SIZE)) > KMEM_REF_COUNT) {
  513. mod_info("maximal amount of references is reached by kmem_entry\n");
  514. return -EBUSY;
  515. }
  516. kmem_entry->refs += vma_size / PAGE_SIZE;
  517. vma->vm_flags |= (VM_RESERVED);
  518. #ifdef pgprot_noncached
  519. // This is coherent memory, so it must not be cached.
  520. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  521. #endif
  522. mod_info_dbg("Mapping address %08lx / PFN %08lx\n",
  523. virt_to_phys((void*)kmem_entry->cpua),
  524. page_to_pfn(virt_to_page((void*)kmem_entry->cpua)));
  525. if ((kmem_entry->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_REGION) {
  526. ret = remap_pfn_range_compat(
  527. vma,
  528. vma->vm_start,
  529. kmem_entry->dma_handle,
  530. (vma_size < kmem_entry->size)?vma_size:kmem_entry->size,
  531. vma->vm_page_prot);
  532. } else {
  533. ret = remap_pfn_range_cpua_compat(
  534. vma,
  535. vma->vm_start,
  536. kmem_entry->cpua,
  537. (vma_size < kmem_entry->size)?vma_size:kmem_entry->size,
  538. vma->vm_page_prot );
  539. }
  540. if (ret) {
  541. mod_info("kmem remap failed: %d (%lx)\n", ret,kmem_entry->cpua);
  542. kmem_entry->refs -= 1;
  543. return -EAGAIN;
  544. }
  545. vma->vm_ops = &pcidriver_kmem_mmap_ops;
  546. vma->vm_private_data = (void*)kmem_entry;
  547. return ret;
  548. }