Selaa lähdekoodia

Provide IRQ enable/disable call

Suren A. Chilingaryan 13 vuotta sitten
vanhempi
commit
493958c9cf
10 muutettua tiedostoa jossa 171 lisäystä ja 30 poistoa
  1. 1 1
      cli.c
  2. 39 1
      dma.c
  3. 4 4
      dma.h
  4. 72 8
      dma/nwl.c
  5. 3 2
      dma/nwl.h
  6. 5 2
      dma/nwl_dma.h
  7. 38 4
      dma/nwl_irq.c
  8. 3 2
      dma/nwl_irq.h
  9. 0 5
      irq.h
  10. 6 1
      pcilib.h

+ 1 - 1
cli.c

@@ -906,7 +906,7 @@ int WaitIRQ(pcilib_t *handle, pcilib_model_description_t *model_info, pcilib_irq
     int err;
     size_t count;
     
-    err = pcilib_enable_irq(handle, 0);
+    err = pcilib_enable_irq(handle, PCILIB_EVENT_IRQ, 0);
     if (err) Error("Error enabling IRQs");
 
     err = pcilib_wait_irq(handle, irq_source, timeout, &count);

+ 39 - 1
dma.c

@@ -60,10 +60,48 @@ int pcilib_start_dma(pcilib_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t
 int pcilib_stop_dma(pcilib_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags) {
 }
 
-int pcilib_enable_irq(pcilib_t *ctx, pcilib_dma_flags_t flags) {
+int pcilib_enable_irq(pcilib_t *ctx, pcilib_irq_type_t irq_type, pcilib_dma_flags_t flags) {
+    int err; 
+
+    const pcilib_dma_info_t *info =  pcilib_get_dma_info(ctx);
+    if (!info) {
+	pcilib_error("DMA is not supported by the device");
+	return PCILIB_ERROR_NOTSUPPORTED;
+    }
+
+    if (!ctx->model_info.dma_api) {
+	pcilib_error("DMA Engine is not configured in the current model");
+	return PCILIB_ERROR_NOTAVAILABLE;
+    }
+    
+    if (!ctx->model_info.dma_api->enable_irq) {
+	pcilib_error("The IRQs are not supported by configured DMA engine");
+	return PCILIB_ERROR_NOTSUPPORTED;
+    }
+    
+    return ctx->model_info.dma_api->enable_irq(ctx->dma_ctx, irq_type, flags);
 }
 
 int pcilib_disable_irq(pcilib_t *ctx, pcilib_dma_flags_t flags) {
+    int err; 
+
+    const pcilib_dma_info_t *info =  pcilib_get_dma_info(ctx);
+    if (!info) {
+	pcilib_error("DMA is not supported by the device");
+	return PCILIB_ERROR_NOTSUPPORTED;
+    }
+
+    if (!ctx->model_info.dma_api) {
+	pcilib_error("DMA Engine is not configured in the current model");
+	return PCILIB_ERROR_NOTAVAILABLE;
+    }
+    
+    if (!ctx->model_info.dma_api->disable_irq) {
+	pcilib_error("The IRQs are not supported by configured DMA engine");
+	return PCILIB_ERROR_NOTSUPPORTED;
+    }
+    
+    return ctx->model_info.dma_api->disable_irq(ctx->dma_ctx, flags);
 }
 
 

+ 4 - 4
dma.h

@@ -7,11 +7,11 @@ struct pcilib_dma_api_description_s {
     pcilib_dma_context_t *(*init)(pcilib_t *ctx);
     void (*free)(pcilib_dma_context_t *ctx);
 
-    int (*pcilib_start_dma)(pcilib_dma_context_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags);
-    int (*pcilib_stop_dma)(pcilib_dma_context_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags);
+    int (*enable_irq)(pcilib_dma_context_t *ctx, pcilib_irq_type_t irq_type, pcilib_dma_flags_t flags);
+    int (*disable_irq)(pcilib_dma_context_t *ctx, pcilib_dma_flags_t flags);
 
-    int (*pcilib_enable_irq)(pcilib_dma_context_t *ctx, pcilib_dma_flags_t flags);
-    int (*pcilib_disable_irq)(pcilib_dma_context_t *ctx, pcilib_dma_flags_t flags);
+    int (*start_dma)(pcilib_dma_context_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags);
+    int (*stop_dma)(pcilib_dma_context_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags);
 
     int (*push)(pcilib_dma_context_t *ctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, void *buf, size_t *written);
     int (*stream)(pcilib_dma_context_t *ctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr);

+ 72 - 8
dma/nwl.c

@@ -240,7 +240,7 @@ int dma_nwl_start(nwl_dma_t *ctx) {
     if (ctx->started) return 0;
     
 #ifdef NWL_GENERATE_DMA_IRQ
-    dma_nwl_enable_irq(ctx, PCILIB_DMA_IRQ);
+    dma_nwl_enable_irq(ctx, PCILIB_DMA_IRQ, 0);
 #endif /* NWL_GENERATE_DMA_IRQ */
 
     ctx->started = 1;
@@ -255,7 +255,7 @@ int dma_nwl_stop(nwl_dma_t *ctx) {
     
     ctx->started = 0;
     
-    err = dma_nwl_disable_irq(ctx);
+    err = dma_nwl_free_irq(ctx);
     if (err) return err;
     
     err = dma_nwl_stop_loopback(ctx);
@@ -355,7 +355,7 @@ int dma_nwl_write_fragment(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma,
     if (data) {
 	for (pos = 0; pos < size; pos += info->page_size) {
 	    int block_size = min2(size - pos, info->page_size);
-    
+	    
     	    bufnum = dma_nwl_get_next_buffer(ctx, info, 1, timeout);
 	    if (bufnum == PCILIB_DMA_BUFFER_INVALID) {
 		if (written) *written = pos;
@@ -431,6 +431,7 @@ double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
     uint32_t val;
     uint32_t *buf, *cmp;
     const char *error = NULL;
+//    pcilib_register_value_t regval;
 
     size_t us = 0;
     struct timeval start, cur;
@@ -446,9 +447,12 @@ double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
     if (size%sizeof(uint32_t)) size = 1 + size / sizeof(uint32_t);
     else size /= sizeof(uint32_t);
 
+	// Not supported
+    if (direction == PCILIB_DMA_TO_DEVICE) return -1.;
+
 	// Stop Generators and drain old data
     dma_nwl_stop_loopback(ctx);
-    dma_nwl_stop_engine(ctx, readid); // DS: replace with something better
+//    dma_nwl_stop_engine(ctx, readid); // DS: replace with something better
 
     __sync_synchronize();
 
@@ -456,15 +460,26 @@ double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
 
 #ifdef NWL_GENERATE_DMA_IRQ
     dma_nwl_enable_engine_irq(ctx, readid);
+    dma_nwl_enable_engine_irq(ctx, writeid);
 #endif /* NWL_GENERATE_DMA_IRQ */
 
+
     dma_nwl_start_loopback(ctx, direction, size * sizeof(uint32_t));
 
 /*
+    printf("Packet size: %li\n", size * sizeof(uint32_t));
+    pcilib_read_register(ctx->pcilib, NULL, "dma1w_counter", &regval);
+    printf("Count write: %lx\n", regval);
+
     nwl_read_register(val, ctx, read_base, REG_DMA_ENG_CTRL_STATUS);
     printf("Read DMA control: %lx\n", val);    
     nwl_read_register(val, ctx, write_base, REG_DMA_ENG_CTRL_STATUS);
     printf("Write DMA control: %lx\n", val);    
+
+    nwl_read_register(val, ctx, write_base, REG_DMA_ENG_NEXT_BD);
+    printf("Pointer1: %lx\n", val);    
+    nwl_read_register(val, ctx, write_base, REG_SW_NEXT_BD);
+    printf("Pointer2: %lx\n", val);    
 */
 
 	// Allocate memory and prepare data
@@ -486,11 +501,27 @@ double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
 
 	    err = pcilib_write_dma(ctx->pcilib, writeid, addr, size * sizeof(uint32_t), buf, &bytes);
 	    if ((err)||(bytes != size * sizeof(uint32_t))) {
-		printf("%i %lu write\n", err, bytes);
 		error = "Write failed";
 	        break;
 	    }
 	}
+/*
+    printf("RegRead: %i\n",pcilib_read_register(ctx->pcilib, NULL, "dma1w_counter", &regval));
+    printf("Count write (%i of %i): %lx\n", i, iterations, regval);
+
+    printf("RegRead: %i\n",pcilib_read_register(ctx->pcilib, NULL, "dma1r_counter", &regval));
+    printf("Count read (%i of %i): %lx\n", i, iterations, regval);
+
+
+    nwl_read_register(val, ctx, read_base, REG_DMA_ENG_COMP_BYTES);
+    printf("Compl Bytes (read): %lx\n", val);    
+
+    nwl_read_register(val, ctx, write_base, REG_DMA_ENG_COMP_BYTES);
+    printf("Compl Bytes (write): %lx\n", val);    
+
+    nwl_read_register(val, ctx, read_base, REG_DMA_ENG_CTRL_STATUS);
+    printf("Read DMA control (after write): %lx\n", val);    
+*/
 /*
     nwl_read_register(val, ctx, read_base, REG_DMA_ENG_CTRL_STATUS);
     printf("Read DMA control (after write): %lx\n", val);    
@@ -504,7 +535,23 @@ double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
 	us += ((cur.tv_sec - start.tv_sec)*1000000 + (cur.tv_usec - start.tv_usec));
 
 	if ((err)||(bytes != size * sizeof(uint32_t))) {
-	    printf("%i %lu read\n", err, bytes);
+/*
+    nwl_read_register(val, ctx, read_base, REG_DMA_ENG_CTRL_STATUS);
+    printf("Read DMA control: %lx\n", val);    
+    nwl_read_register(val, ctx, write_base, REG_DMA_ENG_CTRL_STATUS);
+    printf("Write DMA control: %lx\n", val);    
+    nwl_read_register(val, ctx, write_base, REG_DMA_ENG_NEXT_BD);
+    printf("After Pointer wr1: %lx\n", val);    
+    nwl_read_register(val, ctx, write_base, REG_SW_NEXT_BD);
+    printf("After Pointer wr2: %lx\n", val);    
+    pcilib_read_register(ctx->pcilib, NULL, "end_address", &regval);
+    printf("End address: %lx\n", regval);
+
+	nwl_read_register(val, ctx, read_base, REG_DMA_ENG_NEXT_BD);
+	printf("After Pointer read1: %lx\n", val);    
+	nwl_read_register(val, ctx, read_base, REG_SW_NEXT_BD);
+	printf("After Pointer read2: %lx\n", val);    
+*/
 	     error = "Read failed";
 	     break;
 	}
@@ -512,16 +559,33 @@ double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
 	if (direction == PCILIB_DMA_BIDIRECTIONAL) {
 	    res = memcmp(buf, cmp, size * sizeof(uint32_t));
 	    if (res) {
-		puts("verify");
 		error = "Written and read values does not match";
 		break;
 	    }
 	}
 	     
     }
+    
+    if (error) {
+	pcilib_warning("%s at iteration %i, error: %i, bytes: %zu", error, i, err, bytes);
+    }
+    
+/*
+    puts("Finished...");
+    nwl_read_register(val, ctx, read_base, REG_DMA_ENG_NEXT_BD);
+    printf("After Pointer read1: %lx\n", val);    
+    nwl_read_register(val, ctx, read_base, REG_SW_NEXT_BD);
+    printf("After Pointer read2: %lx\n", val);    
+
+    nwl_read_register(val, ctx, write_base, REG_DMA_ENG_NEXT_BD);
+    printf("After Pointer wr1: %lx\n", val);    
+    nwl_read_register(val, ctx, write_base, REG_SW_NEXT_BD);
+    printf("After Pointer wr2: %lx\n", val);    
+*/
 
 #ifdef NWL_GENERATE_DMA_IRQ
-    dma_nwl_disable_irq(ctx);
+    dma_nwl_disable_engine_irq(ctx, writeid);
+    dma_nwl_disable_engine_irq(ctx, readid);
 #endif /* NWL_GENERATE_DMA_IRQ */
 
     dma_nwl_stop_loopback(ctx);

+ 3 - 2
dma/nwl.h

@@ -28,9 +28,10 @@ struct nwl_dma_s {
     
     pcilib_register_bank_description_t *dma_bank;
     char *base_addr;
-    
+
+    int irq_init;			/**< indicates that IRQ subsystem is initialized (detecting which types should be preserverd) */    
     pcilib_irq_type_t irq_enabled;	/**< indicates that IRQs are enabled */
-    int irq_preserve;			/**< indicates that IRQs should not be disabled during clean-up */
+    pcilib_irq_type_t irq_preserve;	/**< indicates that IRQs should not be disabled during clean-up */
     int started;			/**< indicates that DMA subsystem is initialized and DMA engine can start */
     
     pcilib_dma_engine_t n_engines;

+ 5 - 2
dma/nwl_dma.h

@@ -11,6 +11,9 @@ typedef struct nwl_dma_s nwl_dma_t;
 pcilib_dma_context_t *dma_nwl_init(pcilib_t *ctx);
 void  dma_nwl_free(pcilib_dma_context_t *vctx);
 
+int dma_nwl_enable_irq(pcilib_dma_context_t *vctx, pcilib_irq_type_t type, pcilib_dma_flags_t flags);
+int dma_nwl_disable_irq(pcilib_dma_context_t *vctx, pcilib_dma_flags_t flags);
+
 int dma_nwl_write_fragment(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, void *data, size_t *written);
 int dma_nwl_stream_read(pcilib_dma_context_t *vctx, pcilib_dma_engine_t dma, uintptr_t addr, size_t size, pcilib_dma_flags_t flags, pcilib_timeout_t timeout, pcilib_dma_callback_t cb, void *cbattr);
 double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dma, uintptr_t addr, size_t size, size_t iterations, pcilib_dma_direction_t direction);
@@ -20,8 +23,8 @@ double dma_nwl_benchmark(pcilib_dma_context_t *vctx, pcilib_dma_engine_addr_t dm
 pcilib_dma_api_description_t nwl_dma_api = {
     dma_nwl_init,
     dma_nwl_free,
-    NULL,
-    NULL,
+    dma_nwl_enable_irq,
+    dma_nwl_disable_irq,
     NULL,
     NULL,
     dma_nwl_write_fragment,

+ 38 - 4
dma/nwl_irq.c

@@ -13,12 +13,39 @@
 #include "nwl.h"
 #include "nwl_defines.h"
 
-int dma_nwl_enable_irq(nwl_dma_t *ctx, pcilib_irq_type_t type) {
+int dma_nwl_init_irq(nwl_dma_t *ctx, uint32_t val) {
+    if (val&(DMA_INT_ENABLE|DMA_USER_INT_ENABLE)) {
+	if (val&DMA_INT_ENABLE) ctx->irq_preserve |= PCILIB_DMA_IRQ;
+	if (val&DMA_USER_INT_ENABLE) ctx->irq_preserve |= PCILIB_EVENT_IRQ;
+    }
+    
+    ctx->irq_init = 1;
+    return 0;
+}
+
+int dma_nwl_free_irq(nwl_dma_t *ctx) {
+    if (ctx->irq_init) {
+	dma_nwl_disable_irq((pcilib_dma_context_t*)ctx, 0);
+	if (ctx->irq_preserve) dma_nwl_enable_irq((pcilib_dma_context_t*)ctx, ctx->irq_preserve, 0);
+	ctx->irq_enabled = 0;
+	ctx->irq_init = 0;
+    }
+    return 0;
+}
+
+int dma_nwl_enable_irq(pcilib_dma_context_t *vctx, pcilib_irq_type_t type, pcilib_dma_flags_t flags) {
     uint32_t val;
+    nwl_dma_t *ctx = (nwl_dma_t*)vctx;
     
-    if (ctx->irq_enabled == type) return 0;
+    if (flags&PCILIB_DMA_FLAG_PERMANENT) ctx->irq_preserve |= type;
 
+    if (ctx->irq_enabled == type) return 0;
+    
+    type |= ctx->irq_enabled;
+    
     nwl_read_register(val, ctx, ctx->base_addr, REG_DMA_CTRL_STATUS);
+    if (!ctx->irq_init) dma_nwl_init_irq(ctx, val);
+
     val &= ~(DMA_INT_ENABLE|DMA_USER_INT_ENABLE);
     nwl_write_register(val, ctx, ctx->base_addr, REG_DMA_CTRL_STATUS);
     
@@ -33,22 +60,28 @@ int dma_nwl_enable_irq(nwl_dma_t *ctx, pcilib_irq_type_t type) {
     return 0;
 }
 
-int dma_nwl_disable_irq(nwl_dma_t *ctx) {
+
+int dma_nwl_disable_irq(pcilib_dma_context_t *vctx, pcilib_dma_flags_t flags) {
     uint32_t val;
+    nwl_dma_t *ctx = (nwl_dma_t*)vctx;
     
     ctx->irq_enabled = 0;
     
     nwl_read_register(val, ctx, ctx->base_addr, REG_DMA_CTRL_STATUS);
+    if (!ctx->irq_init) dma_nwl_init_irq(ctx, val);
     val &= ~(DMA_INT_ENABLE|DMA_USER_INT_ENABLE);
     nwl_write_register(val, ctx, ctx->base_addr, REG_DMA_CTRL_STATUS);
     
+    if (flags&PCILIB_DMA_FLAG_PERMANENT) ctx->irq_preserve = 0;
+
     return 0;
 }
 
+
 int dma_nwl_enable_engine_irq(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
     uint32_t val;
     
-    dma_nwl_enable_irq(ctx, ctx->irq_enabled|PCILIB_DMA_IRQ);
+    dma_nwl_enable_irq(ctx, ctx->irq_enabled|PCILIB_DMA_IRQ, 0);
 
     nwl_read_register(val, ctx, ctx->engines[dma].base_addr, REG_DMA_ENG_CTRL_STATUS);
     val |= (DMA_ENG_INT_ENABLE);
@@ -68,4 +101,5 @@ int dma_nwl_disable_engine_irq(nwl_dma_t *ctx, pcilib_dma_engine_t dma) {
 }
 
 
+
 // ACK

+ 3 - 2
dma/nwl_irq.h

@@ -1,8 +1,9 @@
 #ifndef _PCILIB_NWL_IRQ_H
 #define _PCILIB_NWL_IRQ_H
 
-int dma_nwl_enable_irq(nwl_dma_t *ctx, pcilib_irq_type_t type);
-int dma_nwl_disable_irq(nwl_dma_t *ctx);
+int dma_nwl_init_irq(nwl_dma_t *ctx, uint32_t val);
+int dma_nwl_free_irq(nwl_dma_t *ctx);
+
 int dma_nwl_enable_engine_irq(nwl_dma_t *ctx, pcilib_dma_engine_t dma);
 int dma_nwl_disable_engine_irq(nwl_dma_t *ctx, pcilib_dma_engine_t dma);
 

+ 0 - 5
irq.h

@@ -1,10 +1,5 @@
 #ifndef _PCILIB_IRQ_H
 #define _PCILIB_IRQ_H
 
-typedef enum {
-    PCILIB_DMA_IRQ = 1,
-    PCILIB_EVENT_IRQ = 2
-} pcilib_irq_type_t;
-
 
 #endif /* _PCILIB_IRQ_H */

+ 6 - 1
pcilib.h

@@ -157,6 +157,11 @@ typedef struct {
     const char *description;
 } pcilib_event_description_t;
 
+typedef enum {
+    PCILIB_DMA_IRQ = 1,
+    PCILIB_EVENT_IRQ = 2
+} pcilib_irq_type_t;
+
 typedef enum {
     PCILIB_DMA_FROM_DEVICE = 1,
     PCILIB_DMA_TO_DEVICE = 2,
@@ -204,7 +209,7 @@ void pcilib_close(pcilib_t *ctx);
 
 int pcilib_start_dma(pcilib_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags);
 int pcilib_stop_dma(pcilib_t *ctx, pcilib_dma_engine_t dma, pcilib_dma_flags_t flags);
-int pcilib_enable_irq(pcilib_t *ctx, pcilib_dma_flags_t flags);
+int pcilib_enable_irq(pcilib_t *ctx, pcilib_irq_type_t irq_type, pcilib_dma_flags_t flags);
 int pcilib_disable_irq(pcilib_t *ctx, pcilib_dma_flags_t flags);
 
 int pcilib_clear_irq(pcilib_t *ctx, pcilib_irq_source_t source);