kmem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. /**
  2. *
  3. * @file kmem.c
  4. * @brief This file contains all functions dealing with kernel memory.
  5. * @author Guillermo Marcus
  6. * @date 2009-04-05
  7. *
  8. */
  9. #include <linux/version.h>
  10. #include <linux/string.h>
  11. #include <linux/types.h>
  12. #include <linux/list.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/pci.h>
  15. #include <linux/cdev.h>
  16. #include <linux/wait.h>
  17. #include <linux/mm.h>
  18. #include <linux/pagemap.h>
  19. #include "base.h"
  20. /**
  21. *
  22. * Allocates new kernel memory including the corresponding management structure, makes
  23. * it available via sysfs if possible.
  24. *
  25. */
  26. int pcidriver_kmem_alloc(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  27. {
  28. int flags;
  29. pcidriver_kmem_entry_t *kmem_entry;
  30. void *retptr;
  31. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  32. kmem_entry = pcidriver_kmem_find_entry_use(privdata, kmem_handle->use, kmem_handle->item);
  33. if (kmem_entry) {
  34. unsigned long flags = kmem_handle->flags;
  35. if (flags&KMEM_FLAG_TRY) {
  36. kmem_handle->type = kmem_entry->type;
  37. kmem_handle->size = kmem_entry->size;
  38. kmem_handle->align = kmem_entry->align;
  39. } else {
  40. if (kmem_handle->type != kmem_entry->type) {
  41. mod_info("Invalid type of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->type, kmem_handle->type);
  42. kmem_handle->type = kmem_entry->type;
  43. return -EINVAL;
  44. }
  45. if (((kmem_handle->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_PAGE)&&(kmem_handle->size == 0)) {
  46. kmem_handle->size = kmem_entry->size;
  47. } else if (kmem_handle->size != kmem_entry->size) {
  48. mod_info("Invalid size of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->size, kmem_handle->size);
  49. kmem_handle->size = kmem_entry->size;
  50. return -EINVAL;
  51. }
  52. if (kmem_handle->align != kmem_entry->align) {
  53. mod_info("Invalid alignment of reusable kmem_entry, currently: %lu, but requested: %lu\n", kmem_entry->align, kmem_handle->align);
  54. kmem_handle->align = kmem_entry->align;
  55. return -EINVAL;
  56. }
  57. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?1:0) != ((flags&KMEM_FLAG_EXCLUSIVE)?1:0)) {
  58. mod_info("Invalid mode of reusable kmem_entry\n");
  59. kmem_handle->flags = (kmem_entry->mode&KMEM_MODE_EXCLUSIVE)?KMEM_FLAG_EXCLUSIVE:0;
  60. return -EINVAL;
  61. }
  62. }
  63. if ((kmem_entry->mode&KMEM_MODE_COUNT)==KMEM_MODE_COUNT) {
  64. mod_info("Reuse counter of kmem_entry is overflown");
  65. return -EBUSY;
  66. }
  67. kmem_handle->handle_id = kmem_entry->id;
  68. kmem_handle->ba = (unsigned long)(kmem_entry->dma_handle);
  69. kmem_handle->pa = virt_to_phys((void*)kmem_entry->cpua);
  70. kmem_handle->flags = KMEM_FLAG_REUSED;
  71. if (kmem_entry->refs&KMEM_REF_HW) kmem_handle->flags |= KMEM_FLAG_REUSED_HW;
  72. if (kmem_entry->mode&KMEM_MODE_PERSISTENT) kmem_handle->flags |= KMEM_FLAG_REUSED_PERSISTENT;
  73. kmem_entry->mode += 1;
  74. if (flags&KMEM_FLAG_HW) {
  75. if ((kmem_entry->refs&KMEM_REF_HW)==0)
  76. pcidriver_module_get(privdata);
  77. kmem_entry->refs |= KMEM_REF_HW;
  78. }
  79. if (flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  80. privdata->kmem_cur_id = kmem_entry->id;
  81. return 0;
  82. }
  83. if (kmem_handle->flags&KMEM_FLAG_TRY) return -ENOENT;
  84. }
  85. /* First, allocate zeroed memory for the kmem_entry */
  86. if ((kmem_entry = kcalloc(1, sizeof(pcidriver_kmem_entry_t), GFP_KERNEL)) == NULL)
  87. goto kmem_alloc_entry_fail;
  88. /* Initialize the kmem_entry */
  89. kmem_entry->id = atomic_inc_return(&privdata->kmem_count) - 1;
  90. privdata->kmem_cur_id = kmem_entry->id;
  91. kmem_handle->handle_id = kmem_entry->id;
  92. kmem_entry->use = kmem_handle->use;
  93. kmem_entry->item = kmem_handle->item;
  94. kmem_entry->type = kmem_handle->type;
  95. kmem_entry->align = kmem_handle->align;
  96. kmem_entry->direction = PCI_DMA_NONE;
  97. /* Initialize sysfs if possible */
  98. if (pcidriver_sysfs_initialize_kmem(privdata, kmem_entry->id, &(kmem_entry->sysfs_attr)) != 0)
  99. goto kmem_alloc_mem_fail;
  100. /* ...and allocate the DMA memory */
  101. /* note this is a memory pair, referencing the same area: the cpu address (cpua)
  102. * and the PCI bus address (pa). The CPU and PCI addresses may not be the same.
  103. * The CPU sees only CPU addresses, while the device sees only PCI addresses.
  104. * CPU address is used for the mmap (internal to the driver), and
  105. * PCI address is the address passed to the DMA Controller in the device.
  106. */
  107. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  108. case PCILIB_KMEM_TYPE_CONSISTENT:
  109. #ifdef PCIDRIVER_DUMMY_DEVICE
  110. retptr = kmalloc( kmem_handle->size, GFP_KERNEL);
  111. #else /* PCIDRIVER_DUMMY_DEVICE */
  112. retptr = pci_alloc_consistent( privdata->pdev, kmem_handle->size, &(kmem_entry->dma_handle) );
  113. #endif /* PCIDRIVER_DUMMY_DEVICE */
  114. break;
  115. case PCILIB_KMEM_TYPE_REGION:
  116. retptr = ioremap(kmem_handle->pa, kmem_handle->size);
  117. kmem_entry->dma_handle = kmem_handle->pa;
  118. if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_S2C) {
  119. kmem_entry->direction = PCI_DMA_TODEVICE;
  120. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_REGION_C2S) {
  121. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  122. }
  123. break;
  124. case PCILIB_KMEM_TYPE_PAGE:
  125. flags = GFP_KERNEL;
  126. if (kmem_handle->size == 0)
  127. kmem_handle->size = PAGE_SIZE;
  128. else if (kmem_handle->size%PAGE_SIZE)
  129. goto kmem_alloc_mem_fail;
  130. else
  131. flags |= __GFP_COMP;
  132. retptr = (void*)__get_free_pages(flags, get_order(kmem_handle->size));
  133. kmem_entry->dma_handle = 0;
  134. if (retptr) {
  135. #ifndef PCIDRIVER_DUMMY_DEVICE
  136. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  137. kmem_entry->direction = PCI_DMA_TODEVICE;
  138. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_TODEVICE);
  139. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  140. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  141. goto kmem_alloc_mem_fail;
  142. }
  143. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  144. kmem_entry->direction = PCI_DMA_FROMDEVICE;
  145. kmem_entry->dma_handle = pci_map_single(privdata->pdev, retptr, kmem_handle->size, PCI_DMA_FROMDEVICE);
  146. if (pci_dma_mapping_error(privdata->pdev, kmem_entry->dma_handle)) {
  147. free_pages((unsigned long)retptr, get_order(kmem_handle->size));
  148. goto kmem_alloc_mem_fail;
  149. }
  150. }
  151. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  152. }
  153. break;
  154. default:
  155. goto kmem_alloc_mem_fail;
  156. }
  157. if (retptr == NULL)
  158. goto kmem_alloc_mem_fail;
  159. kmem_entry->size = kmem_handle->size;
  160. kmem_entry->cpua = (unsigned long)retptr;
  161. kmem_handle->ba = (unsigned long)(kmem_entry->dma_handle);
  162. kmem_handle->pa = virt_to_phys(retptr);
  163. kmem_entry->mode = 1;
  164. if (kmem_handle->flags&KMEM_FLAG_REUSE) {
  165. kmem_entry->mode |= KMEM_MODE_REUSABLE;
  166. if (kmem_handle->flags&KMEM_FLAG_EXCLUSIVE) kmem_entry->mode |= KMEM_MODE_EXCLUSIVE;
  167. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT) kmem_entry->mode |= KMEM_MODE_PERSISTENT;
  168. }
  169. kmem_entry->refs = 0;
  170. if (kmem_handle->flags&KMEM_FLAG_HW) {
  171. pcidriver_module_get(privdata);
  172. kmem_entry->refs |= KMEM_REF_HW;
  173. }
  174. kmem_handle->flags = 0;
  175. /* Add the kmem_entry to the list of the device */
  176. spin_lock( &(privdata->kmemlist_lock) );
  177. list_add_tail( &(kmem_entry->list), &(privdata->kmem_list) );
  178. spin_unlock( &(privdata->kmemlist_lock) );
  179. return 0;
  180. kmem_alloc_mem_fail:
  181. kfree(kmem_entry);
  182. kmem_alloc_entry_fail:
  183. return -ENOMEM;
  184. }
  185. static int pcidriver_kmem_free_check(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle, pcidriver_kmem_entry_t *kmem_entry) {
  186. if ((kmem_handle->flags & KMEM_FLAG_FORCE) == 0) {
  187. if (kmem_entry->mode&KMEM_MODE_COUNT)
  188. kmem_entry->mode -= 1;
  189. if (kmem_handle->flags&KMEM_FLAG_HW) {
  190. if (kmem_entry->refs&KMEM_REF_HW)
  191. pcidriver_module_put(privdata);
  192. kmem_entry->refs &= ~KMEM_REF_HW;
  193. }
  194. if (kmem_handle->flags&KMEM_FLAG_PERSISTENT)
  195. kmem_entry->mode &= ~KMEM_MODE_PERSISTENT;
  196. if (kmem_handle->flags&KMEM_FLAG_REUSE)
  197. return 0;
  198. if (((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)==0)&&(kmem_entry->mode&KMEM_MODE_COUNT)&&((kmem_handle->flags&KMEM_FLAG_EXCLUSIVE)==0))
  199. return 0;
  200. if (kmem_entry->refs) {
  201. kmem_entry->mode += 1;
  202. mod_info("can't free referenced kmem_entry, refs = %lx\n", kmem_entry->refs);
  203. return -EBUSY;
  204. }
  205. if (kmem_entry->mode & KMEM_MODE_PERSISTENT) {
  206. kmem_entry->mode += 1;
  207. mod_info("can't free persistent kmem_entry\n");
  208. return -EBUSY;
  209. }
  210. } else {
  211. if (kmem_entry->refs&KMEM_REF_HW)
  212. pcidriver_module_put(privdata);
  213. while (!atomic_add_negative(-1, &(privdata->refs))) pcidriver_module_put(privdata);
  214. atomic_inc(&(privdata->refs));
  215. }
  216. return 1;
  217. }
  218. static int pcidriver_kmem_free_use(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  219. {
  220. int err;
  221. int failed = 0;
  222. struct list_head *ptr, *next;
  223. pcidriver_kmem_entry_t *kmem_entry;
  224. /* iterate safely over the entries and delete them */
  225. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  226. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  227. if (kmem_entry->use == kmem_handle->use) {
  228. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  229. if (err > 0)
  230. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  231. else
  232. failed = 1;
  233. }
  234. }
  235. if (failed) {
  236. mod_info("Some kmem_entries for use %lx are still referenced\n", kmem_handle->use);
  237. return -EBUSY;
  238. }
  239. return 0;
  240. }
  241. /**
  242. *
  243. * Called via sysfs, frees kernel memory and the corresponding management structure
  244. *
  245. */
  246. int pcidriver_kmem_free( pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle )
  247. {
  248. int err;
  249. pcidriver_kmem_entry_t *kmem_entry;
  250. if (kmem_handle->flags&KMEM_FLAG_MASS) {
  251. kmem_handle->flags &= ~KMEM_FLAG_MASS;
  252. return pcidriver_kmem_free_use(privdata, kmem_handle);
  253. }
  254. /* Find the associated kmem_entry for this buffer */
  255. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, kmem_handle)) == NULL)
  256. return -EINVAL; /* kmem_handle is not valid */
  257. err = pcidriver_kmem_free_check(privdata, kmem_handle, kmem_entry);
  258. if (err > 0)
  259. return pcidriver_kmem_free_entry(privdata, kmem_entry);
  260. return err;
  261. }
  262. /**
  263. *
  264. * Called when cleaning up, frees all kernel memory and their corresponding management structure
  265. *
  266. */
  267. int pcidriver_kmem_free_all(pcidriver_privdata_t *privdata)
  268. {
  269. // int failed = 0;
  270. struct list_head *ptr, *next;
  271. pcidriver_kmem_entry_t *kmem_entry;
  272. /* iterate safely over the entries and delete them */
  273. list_for_each_safe(ptr, next, &(privdata->kmem_list)) {
  274. kmem_entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  275. /*if (kmem_entry->refs)
  276. failed = 1;
  277. else*/
  278. pcidriver_kmem_free_entry(privdata, kmem_entry); /* spin lock inside! */
  279. }
  280. /*
  281. if (failed) {
  282. mod_info("Some kmem_entries are still referenced\n");
  283. return -EBUSY;
  284. }
  285. */
  286. return 0;
  287. }
  288. /**
  289. *
  290. * Synchronize memory to/from the device (or in both directions).
  291. *
  292. */
  293. int pcidriver_kmem_sync_entry( pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry, int direction)
  294. {
  295. if (kmem_entry->direction == PCI_DMA_NONE)
  296. return -EINVAL;
  297. #ifndef PCIDRIVER_DUMMY_DEVICE
  298. switch (direction) {
  299. case PCILIB_KMEM_SYNC_TODEVICE:
  300. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  301. break;
  302. case PCILIB_KMEM_SYNC_FROMDEVICE:
  303. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  304. break;
  305. case PCILIB_KMEM_SYNC_BIDIRECTIONAL:
  306. pci_dma_sync_single_for_device( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  307. pci_dma_sync_single_for_cpu( privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, kmem_entry->direction );
  308. break;
  309. default:
  310. return -EINVAL; /* wrong direction parameter */
  311. }
  312. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  313. return 0; /* success */
  314. }
  315. /**
  316. *
  317. * Synchronize memory to/from the device (or in both directions).
  318. *
  319. */
  320. int pcidriver_kmem_sync( pcidriver_privdata_t *privdata, kmem_sync_t *kmem_sync )
  321. {
  322. pcidriver_kmem_entry_t *kmem_entry = NULL;
  323. /*
  324. * This is a shortcut to quickly find a next item in big multi-page kernel buffers
  325. */
  326. spin_lock(&(privdata->kmemlist_lock));
  327. if (privdata->kmem_last_sync) {
  328. if (privdata->kmem_last_sync->id == kmem_sync->handle.handle_id)
  329. kmem_entry = privdata->kmem_last_sync;
  330. else {
  331. privdata->kmem_last_sync = container_of(privdata->kmem_last_sync->list.next, pcidriver_kmem_entry_t, list);
  332. if (privdata->kmem_last_sync) {
  333. if (privdata->kmem_last_sync->id == kmem_sync->handle.handle_id)
  334. kmem_entry = privdata->kmem_last_sync;
  335. else
  336. privdata->kmem_last_sync = NULL;
  337. }
  338. }
  339. }
  340. spin_unlock(&(privdata->kmemlist_lock));
  341. /*
  342. * If not found go the standard way
  343. */
  344. if (!kmem_entry) {
  345. if ((kmem_entry = pcidriver_kmem_find_entry(privdata, &(kmem_sync->handle))) == NULL)
  346. return -EINVAL; /* kmem_handle is not valid */
  347. spin_lock(&(privdata->kmemlist_lock));
  348. privdata->kmem_last_sync = kmem_entry;
  349. spin_unlock(&(privdata->kmemlist_lock));
  350. }
  351. return pcidriver_kmem_sync_entry(privdata, kmem_entry, kmem_sync->dir);
  352. }
  353. /**
  354. *
  355. * Free the given kmem_entry and its memory.
  356. *
  357. */
  358. int pcidriver_kmem_free_entry(pcidriver_privdata_t *privdata, pcidriver_kmem_entry_t *kmem_entry)
  359. {
  360. pcidriver_sysfs_remove(privdata, &(kmem_entry->sysfs_attr));
  361. /* Release DMA memory */
  362. switch (kmem_entry->type&PCILIB_KMEM_TYPE_MASK) {
  363. case PCILIB_KMEM_TYPE_CONSISTENT:
  364. #ifdef PCIDRIVER_DUMMY_DEVICE
  365. kfree((void*)(kmem_entry->cpua));
  366. #else /* PCIDRIVER_DUMMY_DEVICE */
  367. pci_free_consistent( privdata->pdev, kmem_entry->size, (void *)(kmem_entry->cpua), kmem_entry->dma_handle );
  368. #endif /* PCIDRIVER_DUMMY_DEVICE */
  369. break;
  370. case PCILIB_KMEM_TYPE_REGION:
  371. iounmap((void *)(kmem_entry->cpua));
  372. break;
  373. case PCILIB_KMEM_TYPE_PAGE:
  374. #ifndef PCIDRIVER_DUMMY_DEVICE
  375. if (kmem_entry->dma_handle) {
  376. if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_S2C_PAGE) {
  377. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_TODEVICE);
  378. } else if (kmem_entry->type == PCILIB_KMEM_TYPE_DMA_C2S_PAGE) {
  379. pci_unmap_single(privdata->pdev, kmem_entry->dma_handle, kmem_entry->size, PCI_DMA_FROMDEVICE);
  380. }
  381. }
  382. #endif /* ! PCIDRIVER_DUMMY_DEVICE */
  383. free_pages((unsigned long)kmem_entry->cpua, get_order(kmem_entry->size));
  384. break;
  385. }
  386. /* Remove the kmem list entry */
  387. spin_lock( &(privdata->kmemlist_lock) );
  388. if (privdata->kmem_last_sync == kmem_entry)
  389. privdata->kmem_last_sync = NULL;
  390. list_del( &(kmem_entry->list) );
  391. spin_unlock( &(privdata->kmemlist_lock) );
  392. /* Release kmem_entry memory */
  393. kfree(kmem_entry);
  394. return 0;
  395. }
  396. /**
  397. *
  398. * Find the corresponding kmem_entry for the given kmem_handle.
  399. *
  400. */
  401. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry(pcidriver_privdata_t *privdata, kmem_handle_t *kmem_handle)
  402. {
  403. struct list_head *ptr;
  404. pcidriver_kmem_entry_t *entry, *result = NULL;
  405. /* should I implement it better using the handle_id? */
  406. spin_lock(&(privdata->kmemlist_lock));
  407. list_for_each(ptr, &(privdata->kmem_list)) {
  408. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  409. if (entry->id == kmem_handle->handle_id) {
  410. result = entry;
  411. break;
  412. }
  413. }
  414. spin_unlock(&(privdata->kmemlist_lock));
  415. return result;
  416. }
  417. /**
  418. *
  419. * find the corresponding kmem_entry for the given id.
  420. *
  421. */
  422. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_id(pcidriver_privdata_t *privdata, int id)
  423. {
  424. struct list_head *ptr;
  425. pcidriver_kmem_entry_t *entry, *result = NULL;
  426. spin_lock(&(privdata->kmemlist_lock));
  427. list_for_each(ptr, &(privdata->kmem_list)) {
  428. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  429. if (entry->id == id) {
  430. result = entry;
  431. break;
  432. }
  433. }
  434. spin_unlock(&(privdata->kmemlist_lock));
  435. return result;
  436. }
  437. /**
  438. *
  439. * find the corresponding kmem_entry for the given use and item.
  440. *
  441. */
  442. pcidriver_kmem_entry_t *pcidriver_kmem_find_entry_use(pcidriver_privdata_t *privdata, unsigned long use, unsigned long item)
  443. {
  444. struct list_head *ptr;
  445. pcidriver_kmem_entry_t *entry, *result = NULL;
  446. spin_lock(&(privdata->kmemlist_lock));
  447. list_for_each(ptr, &(privdata->kmem_list)) {
  448. entry = list_entry(ptr, pcidriver_kmem_entry_t, list);
  449. if ((entry->use == use)&&(entry->item == item)&&(entry->mode&KMEM_MODE_REUSABLE)) {
  450. result = entry;
  451. break;
  452. }
  453. }
  454. spin_unlock(&(privdata->kmemlist_lock));
  455. return result;
  456. }
  457. void pcidriver_kmem_mmap_close(struct vm_area_struct *vma) {
  458. unsigned long vma_size;
  459. pcidriver_kmem_entry_t *kmem_entry = (pcidriver_kmem_entry_t*)vma->vm_private_data;
  460. if (kmem_entry) {
  461. /*
  462. if (kmem_entry->id == 0) {
  463. mod_info("refs: %p %p %lx\n", vma, vma->vm_private_data, kmem_entry->refs);
  464. mod_info("kmem_size: %lu vma_size: %lu, s: %lx, e: %lx\n", kmem_entry->size, (vma->vm_end - vma->vm_start), vma->vm_start, vma->vm_end);
  465. }
  466. */
  467. vma_size = (vma->vm_end - vma->vm_start);
  468. if (kmem_entry->refs&KMEM_REF_COUNT) {
  469. kmem_entry->refs -= vma_size / PAGE_SIZE;
  470. }
  471. }
  472. }
  473. static struct vm_operations_struct pcidriver_kmem_mmap_ops = {
  474. .close = pcidriver_kmem_mmap_close
  475. };
  476. /**
  477. *
  478. * mmap() kernel memory to userspace.
  479. *
  480. */
  481. int pcidriver_mmap_kmem(pcidriver_privdata_t *privdata, struct vm_area_struct *vma)
  482. {
  483. unsigned long vma_size;
  484. pcidriver_kmem_entry_t *kmem_entry;
  485. int ret;
  486. mod_info_dbg("Entering mmap_kmem\n");
  487. /* FIXME: Is this really right? Always just the latest one? Can't we identify one? */
  488. /* Get latest entry on the kmem_list */
  489. kmem_entry = pcidriver_kmem_find_entry_id(privdata, privdata->kmem_cur_id);
  490. if (!kmem_entry) {
  491. mod_info("Trying to mmap a kernel memory buffer without creating it first!\n");
  492. return -EFAULT;
  493. }
  494. mod_info_dbg("Got kmem_entry with id: %d\n", kmem_entry->id);
  495. /* Check sizes */
  496. vma_size = (vma->vm_end - vma->vm_start);
  497. if ((vma_size > kmem_entry->size) &&
  498. ((kmem_entry->size < PAGE_SIZE) && (vma_size != PAGE_SIZE))) {
  499. mod_info("kem_entry size(%lu) and vma size do not match(%lu)\n", kmem_entry->size, vma_size);
  500. return -EINVAL;
  501. }
  502. /* reference counting */
  503. if ((kmem_entry->mode&KMEM_MODE_EXCLUSIVE)&&(kmem_entry->refs&KMEM_REF_COUNT)) {
  504. mod_info("can't make second mmaping for exclusive kmem_entry\n");
  505. return -EBUSY;
  506. }
  507. if (((kmem_entry->refs&KMEM_REF_COUNT) + (vma_size / PAGE_SIZE)) > KMEM_REF_COUNT) {
  508. mod_info("maximal amount of references is reached by kmem_entry\n");
  509. return -EBUSY;
  510. }
  511. kmem_entry->refs += vma_size / PAGE_SIZE;
  512. vma->vm_flags |= (VM_RESERVED);
  513. if ((kmem_entry->type&&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_CONSISTENT) {
  514. // This is coherent memory, so it must not be cached.
  515. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  516. }
  517. mod_info_dbg("Mapping address %08lx / PFN %08lx\n",
  518. virt_to_phys((void*)kmem_entry->cpua),
  519. page_to_pfn(virt_to_page((void*)kmem_entry->cpua)));
  520. if ((kmem_entry->type&PCILIB_KMEM_TYPE_MASK) == PCILIB_KMEM_TYPE_REGION) {
  521. ret = remap_pfn_range(vma, vma->vm_start, (kmem_entry->dma_handle >> PAGE_SHIFT), (vma_size < kmem_entry->size)?vma_size:kmem_entry->size, vma->vm_page_prot);
  522. } else {
  523. ret = remap_pfn_range(vma, vma->vm_start, page_to_pfn(virt_to_page((void*)(kmem_entry->cpua))), (vma_size < kmem_entry->size)?vma_size:kmem_entry->size, vma->vm_page_prot);
  524. }
  525. if (ret) {
  526. mod_info("kmem remap failed: %d (%lx)\n", ret,kmem_entry->cpua);
  527. kmem_entry->refs -= 1;
  528. return -EAGAIN;
  529. }
  530. vma->vm_ops = &pcidriver_kmem_mmap_ops;
  531. vma->vm_private_data = (void*)kmem_entry;
  532. return ret;
  533. }