kmem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. /**
  2. *
  3. * @file kmem.c
  4. * @brief This file contains all functions dealing with kernel memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include "config.h" /* compile-time configuration */
  20. #include "compat.h" /* compatibility definitions for older linux */
  21. #include "pciDriver.h" /* external interface for the driver */
  22. #include "common.h" /* internal definitions for all parts */
  23. #include "kmem.h" /* prototypes for kernel memory */
  24. #include "sysfs.h" /* prototypes for sysfs */
  25. /* VM_RESERVED is removed in 3.7-rc1 */
  26. #ifndef VM_RESERVED
  27. # define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
  28. #endif
  29. /**
  30. *
  31. * Allocates new kernel memory including the corresponding management structure, makes
  32. * it available via sysfs if possible.
  33. *
  34. */
  35. int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  36. {
  37. int flags;
  38. pcidriver_kmem_entry_t *kmem_entry;
  39. void *retptr;
  40. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  41. kmem_entry = pcidriver_kmem_find_entry_use(privdata, kmem_handle->use, kmem_handle->item);
  42. if (kmem_entry) {
  43. unsigned long flags = kmem_handle->flags;
  44. if (flags&KMEM_FLAG_TRY) {
  45. kmem_handle->type = kmem_entry->type;
  46. kmem_handle->size = kmem_entry->size;
  47. kmem_handle->align = kmem_entry->align;
  48. } else {
  49. if (kmem_handle->type != kmem_entry->type) {
  50. mod_info("Invalid type of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->type, kmem_handle->type);
  51. kmem_handle->type = kmem_entry->type;
  52. return -EINVAL;
  53. }
  54. if (((kmem_handle->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_PAGE)&&(kmem_handle->size == 0)) {
  55. kmem_handle->size = kmem_entry->size;
  56. } else if (kmem_handle->size != kmem_entry->size) {
  57. mod_info("Invalid size of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->size, kmem_handle->size);
  58. kmem_handle->size = kmem_entry->size;
  59. return -EINVAL;
  60. }
  61. if (kmem_handle->align != kmem_entry->align) {
  62. mod_info("Invalid alignment of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->align, kmem_handle->align);
  63. kmem_handle->align = kmem_entry->align;
  64. return -EINVAL;
  65. }
  66. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?1:0) != ((flags&KMEM_FLAG_EXCLUSIVE)?1:0)) {
  67. mod_info("Invalid mode of reusable kmem_entry\n");
  68. kmem_handle->flags = (kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?KMEM_FLAG_EXCLUSIVE:0;
  69. return -EINVAL;
  70. }
  71. }
  72. if ((kmem_entry->mode&KMEM_MODE_COUNT)==KMEM_MODE_COUNT) {
  73. mod_info("Reuse counter of kmem_entry is overflown");
  74. return -EBUSY;
  75. }
  76. kmem_handle->handle_id = kmem_entry->id;
  77. kmem_handle->pa = (unsigned long)(kmem_entry->dma_handle);
  78. kmem_handle->flags = KMEM_FLAG_REUSED;
  79. if (kmem_entry->refs&KMEM_REF_HW) kmem_handle->flags |= KMEM_FLAG_REUSED_HW;
  80. if (kmem_entry->mode&KMEM_MODE_PERSISTENT) kmem_handle->flags |= KMEM_FLAG_REUSED_PERSISTENT;
  81. kmem_entry->mode += 1;
  82. if (flags&KMEM_FLAG_HW) {
  83. if ((kmem_entry->refs&KMEM_REF_HW)==0)
  84. pcidriver_module_get(privdata);
  85. kmem_entry->refs |= KMEM_REF_HW;
  86. }
  87. if (flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  88. privdata->kmem_cur_id = kmem_entry->id;
  89. return 0;
  90. }
  91. if (kmem_handle->flags&KMEM_FLAG_TRY) return -ENOENT;
  92. }
  93. /* First, allocate zeroed memory for the kmem_entry */
  94. if ((kmem_entry = kcalloc(1, sizeof(pcidriver_kmem_entry_t), GFP_KERNEL)) == NULL)
  95. goto kmem_alloc_entry_fail;
  96. /* Initialize the kmem_entry */
  97. kmem_entry->id = atomic_inc_return(&privdata->kmem_count) - 1;
  98. privdata->kmem_cur_id = kmem_entry->id;
  99. kmem_handle->handle_id = kmem_entry->id;
  100. kmem_entry->use = kmem_handle->use;
  101. kmem_entry->item = kmem_handle->item;
  102. kmem_entry->type = kmem_handle->type;
  103. kmem_entry->align = kmem_handle->align;
  104. kmem_entry->direction = PCI_DMA_NONE;
  105. /* Initialize sysfs if possible */
  106. if (pcidriver_sysfs_initialize_kmem(privdata, kmem_entry->id, &(kmem_entry->sysfs_attr)) != 0)
  107. goto kmem_alloc_mem_fail;
  108. /* ...and allocate the DMA memory */
  109. /* note this is a memory pair, referencing the same area: the cpu address (cpua)
  110. * and the PCI bus address (pa). The CPU and PCI addresses may not be the same.
  111. * The CPU sees only CPU addresses, while the device sees only PCI addresses.
  112. * CPU address is used for the mmap (internal to the driver), and
  113. * PCI address is the address passed to the DMA Controller in the device.
  114. */
  115. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  116. case PCILIB_KMEM_TYPE_CONSISTENT:
  117. #ifdef PCIDRIVER_DUMMY_DEVICE
  118. retptr = kmalloc( kmem_handle->size, GFP_KERNEL);
  119. #else /* PCIDRIVER_DUMMY_DEVICE */
  120. retptr = pci_alloc_consistent( privdata->pdev, kmem_handle->size, &(kmem_entry->dma_handle) );
  121. #endif /* PCIDRIVER_DUMMY_DEVICE */
  122. break;
  123. case PCILIB_KMEM_TYPE_REGION:
  124. retptr = ioremap(kmem_handle->pa, kmem_handle->size);
  125. kmem_entry->dma_handle = kmem_handle->pa;
  126. if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_S2C) {
  127. kmem_entry->direction = PCI_DMA_TODEVICE;
  128. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_C2S) {
  129. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  130. }
  131. break;
  132. case PCILIB_KMEM_TYPE_PAGE:
  133. flags = GFP_KERNEL;
  134. if (kmem_handle->size == 0)
  135. kmem_handle->size = PAGE_SIZE;
  136. else if (kmem_handle->size%PAGE_SIZE)
  137. goto kmem_alloc_mem_fail;
  138. else
  139. flags |= __GFP_COMP;
  140. retptr = (void*)__get_free_pages(flags, get_order(kmem_handle->size));
  141. kmem_entry->dma_handle = 0;
  142. if (retptr) {
  143. #ifndef PCIDRIVER_DUMMY_DEVICE
  144. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  145. kmem_entry->direction = PCI_DMA_TODEVICE;
  146. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_TODEVICE);
  147. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  148. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  149. goto kmem_alloc_mem_fail;
  150. }
  151. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  152. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  153. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_FROMDEVICE);
  154. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  155. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  156. goto kmem_alloc_mem_fail;
  157. }
  158. }
  159. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  160. }
  161. break;
  162. default:
  163. goto kmem_alloc_mem_fail;
  164. }
  165. if (retptr == NULL)
  166. goto kmem_alloc_mem_fail;
  167. kmem_entry->size = kmem_handle->size;
  168. kmem_entry->cpua = (unsigned long)retptr;
  169. kmem_handle->pa = (unsigned long)(kmem_entry->dma_handle);
  170. kmem_entry->mode = 1;
  171. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  172. kmem_entry->mode |= KMEM_MODE_REUSABLE;
  173. if (kmem_handle->flags&KMEM_FLAG_EXCLUSIVE) kmem_entry->mode |= KMEM_MODE_EXCLUSIVE;
  174. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  175. }
  176. kmem_entry->refs = 0;
  177. if (kmem_handle->flags&KMEM_FLAG_HW) {
  178. pcidriver_module_get(privdata);
  179. kmem_entry->refs |= KMEM_REF_HW;
  180. }
  181. kmem_handle->flags = 0;
  182. set_pages_reserved_compat(kmem_entry->cpua, kmem_entry->size);
  183. /* Add the kmem_entry to the list of the device */
  184. spin_lock( &(privdata->kmemlist_lock) );
  185. list_add_tail( &(kmem_entry->list), &(privdata->kmem_list) );
  186. spin_unlock( &(privdata->kmemlist_lock) );
  187. return 0;
  188. kmem_alloc_mem_fail:
  189. kfree(kmem_entry);
  190. kmem_alloc_entry_fail:
  191. return -ENOMEM;
  192. }
  193. static int pcidriver_kmem_free_check(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle, pcidriver_kmem_entry_t *kmem_entry) {
  194. if ((kmem_handle->flags & KMEM_FLAG_FORCE) == 0) {
  195. if (kmem_entry->mode&KMEM_MODE_COUNT)
  196. kmem_entry->mode -= 1;
  197. if (kmem_handle->flags&KMEM_FLAG_HW) {
  198. if (kmem_entry->refs&KMEM_REF_HW)
  199. pcidriver_module_put(privdata);
  200. kmem_entry->refs &= ~KMEM_REF_HW;
  201. }
  202. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT)
  203. kmem_entry->mode &= ~KMEM_MODE_PERSISTENT;
  204. if (kmem_handle->flags&KMEM_FLAG_REUSE)
  205. return 0;
  206. if (kmem_entry->refs) {
  207. kmem_entry->mode += 1;
  208. mod_info("can't free referenced kmem_entry, refs = %lx\n", kmem_entry->refs);
  209. return -EBUSY;
  210. }
  211. if (kmem_entry->mode & KMEM_MODE_PERSISTENT) {
  212. kmem_entry->mode += 1;
  213. mod_info("can't free persistent kmem_entry\n");
  214. return -EBUSY;
  215. }
  216. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)==0)&&(kmem_entry->mode&KMEM_MODE_COUNT)&&((kmem_handle->flags&KMEM_FLAG_EXCLUSIVE)==0))
  217. return 0;
  218. } else {
  219. if (kmem_entry->refs&KMEM_REF_HW)
  220. pcidriver_module_put(privdata);
  221. while (!atomic_add_negative(-1, &(privdata->refs))) pcidriver_module_put(privdata);
  222. atomic_inc(&(privdata->refs));
  223. }
  224. return 1;
  225. }
  226. static int pcidriver_kmem_free_use(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  227. {
  228. int err;
  229. int failed = 0;
  230. struct list_head *ptr, *next;
  231. pcidriver_kmem_entry_t *kmem_entry;
  232. /* iterate safely over the entries and delete them */
  233. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  234. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  235. if (kmem_entry->use == kmem_handle->use) {
  236. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  237. if (err > 0)
  238. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  239. else
  240. failed = 1;
  241. }
  242. }
  243. if (failed) {
  244. mod_info("Some kmem_entries for use %lx are still referenced\n", kmem_handle->use);
  245. return -EBUSY;
  246. }
  247. return 0;
  248. }
  249. /**
  250. *
  251. * Called via sysfs, frees kernel memory and the corresponding management structure
  252. *
  253. */
  254. int pcidriver_kmem_free( pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle )
  255. {
  256. int err;
  257. pcidriver_kmem_entry_t *kmem_entry;
  258. if (kmem_handle->flags&KMEM_FLAG_MASS) {
  259. kmem_handle->flags &= ~KMEM_FLAG_MASS;
  260. return pcidriver_kmem_free_use(privdata, kmem_handle);
  261. }
  262. /* Find the associated kmem_entry for this buffer */
  263. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, kmem_handle)) == NULL)
  264. return -EINVAL; /* kmem_handle is not valid */
  265. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  266. if (err > 0)
  267. return pcidriver_kmem_free_entry(privdata, kmem_entry);
  268. return err;
  269. }
  270. /**
  271. *
  272. * Called when cleaning up, frees all kernel memory and their corresponding management structure
  273. *
  274. */
  275. int pcidriver_kmem_free_all(pcidriver_privdata_t *privdata)
  276. {
  277. // int failed = 0;
  278. struct list_head *ptr, *next;
  279. pcidriver_kmem_entry_t *kmem_entry;
  280. /* iterate safely over the entries and delete them */
  281. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  282. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  283. /*if (kmem_entry->refs)
  284. failed = 1;
  285. else*/
  286. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  287. }
  288. /*
  289. if (failed) {
  290. mod_info("Some kmem_entries are still referenced\n");
  291. return -EBUSY;
  292. }
  293. */
  294. return 0;
  295. }
  296. /**
  297. *
  298. * Synchronize memory to/from the device (or in both directions).
  299. *
  300. */
  301. int pcidriver_kmem_sync_entry( pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry, int direction)
  302. {
  303. if (kmem_entry->direction == PCI_DMA_NONE)
  304. return -EINVAL;
  305. #ifndef PCIDRIVER_DUMMY_DEVICE
  306. #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
  307. switch (direction) {
  308. case PCILIB_KMEM_SYNC_TODEVICE:
  309. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  310. break;
  311. case PCILIB_KMEM_SYNC_FROMDEVICE:
  312. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  313. break;
  314. case PCILIB_KMEM_SYNC_BIDIRECTIONAL:
  315. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  316. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  317. break;
  318. default:
  319. return -EINVAL; /* wrong direction parameter */
  320. }
  321. #else
  322. switch (direction) {
  323. case PCILIB_KMEM_SYNC_TODEVICE:
  324. pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  325. break;
  326. case PCILIB_KMEM_SYNC_FROMDEVICE:
  327. pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  328. break;
  329. case PCILIB_KMEM_SYNC_BIDIRECTIONAL:
  330. pci_dma_sync_single( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  331. break;
  332. default:
  333. return -EINVAL; /* wrong direction parameter */
  334. }
  335. #endif
  336. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  337. return 0; /* success */
  338. }
  339. /**
  340. *
  341. * Synchronize memory to/from the device (or in both directions).
  342. *
  343. */
  344. int pcidriver_kmem_sync( pcidriver_privdata_t *privdata, kmem_sync_t *kmem_sync )
  345. {
  346. pcidriver_kmem_entry_t *kmem_entry = NULL;
  347. /*
  348. * This is a shortcut to quickly find a next item in big multi-page kernel buffers
  349. */
  350. spin_lock(&(privdata->kmemlist_lock));
  351. if (privdata->kmem_last_sync) {
  352. if (privdata->kmem_last_sync->id == kmem_sync->handle.handle_id)
  353. kmem_entry = privdata->kmem_last_sync;
  354. else {
  355. privdata->kmem_last_sync = container_of(privdata->kmem_last_sync->list.next, pcidriver_kmem_entry_t, list);
  356. if (privdata->kmem_last_sync) {
  357. if (privdata->kmem_last_sync->id == kmem_sync->handle.handle_id)
  358. kmem_entry = privdata->kmem_last_sync;
  359. else
  360. privdata->kmem_last_sync = NULL;
  361. }
  362. }
  363. }
  364. spin_unlock(&(privdata->kmemlist_lock));
  365. /*
  366. * If not found go the standard way
  367. */
  368. if (!kmem_entry) {
  369. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, &(kmem_sync->handle))) == NULL)
  370. return -EINVAL; /* kmem_handle is not valid */
  371. spin_lock(&(privdata->kmemlist_lock));
  372. privdata->kmem_last_sync = kmem_entry;
  373. spin_unlock(&(privdata->kmemlist_lock));
  374. }
  375. return pcidriver_kmem_sync_entry(privdata, kmem_entry, kmem_sync->dir);
  376. }
  377. /**
  378. *
  379. * Free the given kmem_entry and its memory.
  380. *
  381. */
  382. int pcidriver_kmem_free_entry(pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry)
  383. {
  384. pcidriver_sysfs_remove(privdata, &(kmem_entry->sysfs_attr));
  385. /* Go over the pages of the kmem buffer, and mark them as not reserved */
  386. #if 0
  387. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
  388. /*
  389. * This code is DISABLED.
  390. * Apparently, it is not needed to unreserve them. Doing so here
  391. * hangs the machine. Why?
  392. *
  393. * Uhm.. see links:
  394. *
  395. * http://lwn.net/Articles/161204/
  396. * http://lists.openfabrics.org/pipermail/general/2007-March/034101.html
  397. *
  398. * I insist, this should be enabled, but doing so hangs the machine.
  399. * Literature supports the point, and there is even a similar problem (see link)
  400. * But this is not the case. It seems right to me. but obviously is not.
  401. *
  402. * Anyway, this goes away in kernel >=2.6.15.
  403. */
  404. unsigned long start = __pa(kmem_entry->cpua) >> PAGE_SHIFT;
  405. unsigned long end = __pa(kmem_entry->cpua + kmem_entry->size) >> PAGE_SHIFT;
  406. unsigned long i;
  407. for(i=start;i<end;i++) {
  408. struct page *kpage = pfn_to_page(i);
  409. ClearPageReserved(kpage);
  410. }
  411. #endif
  412. #endif
  413. /* Release DMA memory */
  414. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  415. case PCILIB_KMEM_TYPE_CONSISTENT:
  416. #ifdef PCIDRIVER_DUMMY_DEVICE
  417. kfree((void*)(kmem_entry->cpua));
  418. #else /* PCIDRIVER_DUMMY_DEVICE */
  419. pci_free_consistent( privdata->pdev, kmem_entry->size, (void *)(kmem_entry->cpua), kmem_entry->dma_handle );
  420. #endif /* PCIDRIVER_DUMMY_DEVICE */
  421. break;
  422. case PCILIB_KMEM_TYPE_REGION:
  423. iounmap((void *)(kmem_entry->cpua));
  424. break;
  425. case PCILIB_KMEM_TYPE_PAGE:
  426. #ifndef PCIDRIVER_DUMMY_DEVICE
  427. if (kmem_entry->dma_handle) {
  428. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  429. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_TODEVICE);
  430. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  431. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_FROMDEVICE);
  432. }
  433. }
  434. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  435. free_pages((unsigned long)kmem_entry->cpua, get_order(kmem_entry->size));
  436. break;
  437. }
  438. /* Remove the kmem list entry */
  439. spin_lock( &(privdata->kmemlist_lock) );
  440. if (privdata->kmem_last_sync == kmem_entry)
  441. privdata->kmem_last_sync = NULL;
  442. list_del( &(kmem_entry->list) );
  443. spin_unlock( &(privdata->kmemlist_lock) );
  444. /* Release kmem_entry memory */
  445. kfree(kmem_entry);
  446. return 0;
  447. }
  448. /**
  449. *
  450. * Find the corresponding kmem_entry for the given kmem_handle.
  451. *
  452. */
  453. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  454. {
  455. struct list_head *ptr;
  456. pcidriver_kmem_entry_t *entry, *result = NULL;
  457. /* should I implement it better using the handle_id? */
  458. spin_lock(&(privdata->kmemlist_lock));
  459. list_for_each(ptr, &(privdata->kmem_list)) {
  460. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  461. if (entry->id == kmem_handle->handle_id) {
  462. result = entry;
  463. break;
  464. }
  465. }
  466. spin_unlock(&(privdata->kmemlist_lock));
  467. return result;
  468. }
  469. /**
  470. *
  471. * find the corresponding kmem_entry for the given id.
  472. *
  473. */
  474. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  475. {
  476. struct list_head *ptr;
  477. pcidriver_kmem_entry_t *entry, *result = NULL;
  478. spin_lock(&(privdata->kmemlist_lock));
  479. list_for_each(ptr, &(privdata->kmem_list)) {
  480. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  481. if (entry->id == id) {
  482. result = entry;
  483. break;
  484. }
  485. }
  486. spin_unlock(&(privdata->kmemlist_lock));
  487. return result;
  488. }
  489. /**
  490. *
  491. * find the corresponding kmem_entry for the given use and item.
  492. *
  493. */
  494. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_use(pcidriver_privdata_t *privdata, unsigned long use, unsigned long item)
  495. {
  496. struct list_head *ptr;
  497. pcidriver_kmem_entry_t *entry, *result = NULL;
  498. spin_lock(&(privdata->kmemlist_lock));
  499. list_for_each(ptr, &(privdata->kmem_list)) {
  500. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  501. if ((entry->use == use)&&(entry->item == item)&&(entry->mode&KMEM_MODE_REUSABLE)) {
  502. result = entry;
  503. break;
  504. }
  505. }
  506. spin_unlock(&(privdata->kmemlist_lock));
  507. return result;
  508. }
  509. void pcidriver_kmem_mmap_close(struct vm_area_struct *vma) {
  510. unsigned long vma_size;
  511. pcidriver_kmem_entry_t *kmem_entry = (pcidriver_kmem_entry_t*)vma->vm_private_data;
  512. if (kmem_entry) {
  513. /*
  514. if (kmem_entry->id == 0) {
  515. mod_info("refs: %p %p %lx\n", vma, vma->vm_private_data, kmem_entry->refs);
  516. mod_info("kmem_size: %lu vma_size: %lu, s: %lx, e: %lx\n", kmem_entry->size, (vma->vm_end - vma->vm_start), vma->vm_start, vma->vm_end);
  517. }
  518. */
  519. vma_size = (vma->vm_end - vma->vm_start);
  520. if (kmem_entry->refs&KMEM_REF_COUNT) {
  521. kmem_entry->refs -= vma_size / PAGE_SIZE;
  522. }
  523. }
  524. }
  525. static struct vm_operations_struct pcidriver_kmem_mmap_ops = {
  526. .close = pcidriver_kmem_mmap_close
  527. };
  528. /**
  529. *
  530. * mmap() kernel memory to userspace.
  531. *
  532. */
  533. int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *vma)
  534. {
  535. unsigned long vma_size;
  536. pcidriver_kmem_entry_t *kmem_entry;
  537. int ret;
  538. mod_info_dbg("Entering mmap_kmem\n");
  539. /* FIXME: Is this really right? Always just the latest one? Can't we identify one? */
  540. /* Get latest entry on the kmem_list */
  541. kmem_entry = pcidriver_kmem_find_entry_id(privdata, privdata->kmem_cur_id);
  542. if (!kmem_entry) {
  543. mod_info("Trying to mmap a kernel memory buffer without creating it first!\n");
  544. return -EFAULT;
  545. }
  546. mod_info_dbg("Got kmem_entry with id: %d\n", kmem_entry->id);
  547. /* Check sizes */
  548. vma_size = (vma->vm_end - vma->vm_start);
  549. if ((vma_size > kmem_entry->size) &&
  550. ((kmem_entry->size < PAGE_SIZE) && (vma_size != PAGE_SIZE))) {
  551. mod_info("kem_entry size(%lu) and vma size do not match(%lu)\n", kmem_entry->size, vma_size);
  552. return -EINVAL;
  553. }
  554. /* reference counting */
  555. if ((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)&&(kmem_entry->refs&KMEM_REF_COUNT)) {
  556. mod_info("can't make second mmaping for exclusive kmem_entry\n");
  557. return -EBUSY;
  558. }
  559. if (((kmem_entry->refs&KMEM_REF_COUNT) + (vma_size / PAGE_SIZE)) > KMEM_REF_COUNT) {
  560. mod_info("maximal amount of references is reached by kmem_entry\n");
  561. return -EBUSY;
  562. }
  563. kmem_entry->refs += vma_size / PAGE_SIZE;
  564. vma->vm_flags |= (VM_RESERVED);
  565. #ifdef pgprot_noncached
  566. // This is coherent memory, so it must not be cached.
  567. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  568. #endif
  569. mod_info_dbg("Mapping address %08lx / PFN %08lx\n",
  570. virt_to_phys((void*)kmem_entry->cpua),
  571. page_to_pfn(virt_to_page((void*)kmem_entry->cpua)));
  572. if ((kmem_entry->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_REGION) {
  573. ret = remap_pfn_range_compat(
  574. vma,
  575. vma->vm_start,
  576. kmem_entry->dma_handle,
  577. (vma_size < kmem_entry->size)?vma_size:kmem_entry->size,
  578. vma->vm_page_prot);
  579. } else {
  580. ret = remap_pfn_range_cpua_compat(
  581. vma,
  582. vma->vm_start,
  583. kmem_entry->cpua,
  584. (vma_size < kmem_entry->size)?vma_size:kmem_entry->size,
  585. vma->vm_page_prot );
  586. }
  587. if (ret) {
  588. mod_info("kmem remap failed: %d (%lx)\n", ret,kmem_entry->cpua);
  589. kmem_entry->refs -= 1;
  590. return -EAGAIN;
  591. }
  592. vma->vm_ops = &pcidriver_kmem_mmap_ops;
  593. vma->vm_private_data = (void*)kmem_entry;
  594. return ret;
  595. }