소스 검색

removed old extensions/PeakReconstruction

Matze 4 년 전
부모
커밋
48f50797f4

+ 0 - 380
KCG/extensions/PeakReconstruction/PeakReconstuction.py

@@ -1,380 +0,0 @@
-
-from scipy.optimize import curve_fit
-from scipy.stats import chisquare
-from scipy.stats import chi2_contingency
-from scipy.linalg import solve
-
-
-import numpy as np
-import os
-
-import warnings
-with warnings.catch_warnings(): 
-    warnings.filterwarnings("ignore",category=FutureWarning)
-import h5py
-
-
-from time import time, sleep
-import psutil
-
-import pycuda.driver as cuda
-import pycuda.autoinit
-from pycuda.compiler import SourceModule
-import pycuda.gpuarray as gpuarray
-
-
-import filehelper as fh
-from PeakReconstuctionPlot import plot, plotFFTFile
-
-
-import scipy.constants
-class ANKA:
-    circumference = 110.4  # Meter
-    h = 184
-    frev = scipy.constants.c / circumference
-    trev = 1 / frev
-
-    frf = frev * h
-    trf = 1 / frf
-
-
-
-mod = SourceModule(open(os.path.join(os.path.dirname(__file__), "kernel.c"), "r").read())
-
-def analyze_file(filename, calibrationData=None, verbose=False, bucket=-1, outDir="", plotdir="", hdf=False, newWaterfall=False, waterfallfile=""):
-    print('=========================')
-    if verbose: print('=========================')
-    print(filename)
-    np.set_printoptions(precision=4)
-    np.set_printoptions(suppress=True)
-    
-    starttot = time()
-    starttime = time()
-
-    file = np.memmap(filename, dtype='uint32',mode='r')
-    start, stop, header = fh.read_file_specs(file, verbose) 
-    
-    
-
-    n = int((stop - start)*2//4)
-
-    if verbose: print(start, stop)
-    if verbose: print(n)
-    if verbose: print("Has Header: ", header!=None)
-
-    if header is not None:
-        datax = header['fine_delay_adc']
-        skip_turns = header['skip_turns']
-        delay = header['delay_th']
-        if verbose:
-            print('datax', datax)
-            print('delay_th', delay)
-    else:
-        datax = np.array([9,33,48,84])/3
-        skip_turns = 10
-        delay= 3
-
-    threads = 1024
-    blocks = 2147483647
-
-    ngrid = (n//2)//threads
-
-
-    if verbose: print('set up GPU')
-    
-
-    out = np.zeros(int(n*5), dtype=np.float32)
-    gpu_out = cuda.mem_alloc(out.size*out.dtype.itemsize)
-    #cuda.memset_d32(gpu_out, 0, int(n*5))
-    data = np.array(file[start:stop], dtype=np.int32)
-    gpu_in  = cuda.mem_alloc(data.size * data.dtype.itemsize) 
-
-    cuda.memcpy_htod(gpu_in, data)
-
-    baseline = np.zeros(n, dtype=np.float64)
-    calibration = np.array([[0,0,0,0],[1,1,1,1]], dtype=np.float32)
-    if calibrationData is not None:
-        for i, item in enumerate(datax):
-            calibration[0][i] = calibrationData[i][delay][int(item)][0]
-            calibration[1][i] = calibrationData[i][delay][int(item)][1]
-            pass
-    calibration = calibration.reshape(4*2)
-
-    if verbose:
-        print('Calibration ', calibration)
-        print('bucket ', bucket)
-
-    
-    if verbose: print('load {} datapoints in {} threads on {} Blocks = {}'.format(n, threads, ngrid, threads*ngrid*2))
-    func = mod.get_function('load')
-    func(gpu_in, gpu_out, cuda.Out(baseline), cuda.In(calibration),  block=(threads,1,1), grid=(ngrid,1))
-
-    gpu_in.free()
-    gpu_in = gpu_out
-    #cuda.memcpy_dtoh(out, gpu_out)
-    #print(out.reshape(n,5)[0:200])
-
-    baseline = np.sum(baseline)/np.sum(baseline != 0)
-    
-
-    end = time()
-    if verbose:
-        print('=========================')
-        print('Data Read in {}sec'.format(end-starttime)) 
-        print('baseline {}'.format(baseline))
-
-
-
-    start = time()
-    ngrid = n//threads
-
-    datax = np.array(datax*3, dtype=np.float32)
-
-    x = np.array([datax**2, datax, np.ones(len(datax))], dtype=np.float32).T
-    X = (np.linalg.inv((x.T).dot(x)).dot(x.T)).astype(np.float32)
-    
-
-    gpu_matrix = X.reshape(4*3)
-    result = np.array(np.zeros(n*5), dtype=np.float32)
-    
-    gpu_out  = cuda.mem_alloc(result.size * result.dtype.itemsize) 
-    
-    bucket = np.int32(bucket)
-    if verbose: 
-        print('calculate {} datapoints in {} threads on {} Blocks = {}'.format(n, threads, ngrid, threads*ngrid))
-    func = mod.get_function('calculate')
-    func(gpu_in, cuda.In(gpu_matrix), gpu_out, cuda.In(datax), cuda.In(baseline), bucket,  block=(threads,1,1), grid=(ngrid,1))
-
-    cuda.memcpy_dtoh(result, gpu_out)
-    
-
-    end = time()
-    if verbose: print('CPU sort')
-
-    result = result.reshape(n, 5)
-    
-    #print(result[:200])
-    
-    gpu_in.free()
-    gpu_out.free()
-
-    
-
-    if verbose: 
-        print('=========================')
-        print('GPU time in {}sec'.format(end-start))
-
-
-    end = time()
-    if verbose: 
-        print('=========================')
-        print('Analyzed in {}sec = {}µs/peak'.format(end-start, (end-start)*1000000/len(result)))
-        print('saveing')
-
-    if outDir != "":
-        x = filename.rfind(os.path.sep)
-        filename = os.path.join(outDir, filename[x+1:])
-    
-
-    if bucket == -1:
-        savedat = np.resize(result, (n//184, 184, 5))        
-    else:
-        savedat = np.array(result[bucket::184], dtype=np.float32)
-    
-    
-    outfile = filename[:-4] +  "_fit"
-    if hdf:
-        with h5py.File(outfile+".hdf", 'w') as f:
-            grp = f.create_group('header')
-            for k in header.keys():
-                grp[k] = header[k]
-            grp2 = f.create_group('fit_info')
-            grp2['baseline'] = baseline
-            grp2['calibration'] = calibration
-            grp2['bucket'] = bucket
-            dataset = f.create_dataset('dataset', data=savedat, compression="gzip")
-    else:
-        np.save(outfile+".npy", savedat)
-        outfile2 = filename[:-4] +  "_fit_info." + "npy"
-        info = dict(baseline=baseline, skip_turns=skip_turns, delay=datax, delayth=delay, bucket=bucket, calibration=calibration)
-        np.save(outfile2, info)
-        
-
-    end = time()
-        
-    if verbose: print('=========================')
-    print('Total {:.2f}sec = {}µs/peak'.format((end-starttot), (end-starttot)*1000000/len(result)))
-    print('{:10} Datasets Analyzed'.format(len(result)))
-    if bucket == -1:
-        print('{:10} Datasets stored'.format(len(result)*184))
-    print('{:10} good'.format(len(savedat)))
-    print('{:8.2f} baseline'.format(baseline))
-    print("Results in " + outfile)
-
-
-    ########################################################################################
-    ### Plotting
-    
-    if plotdir != "" and bucket != -1:
-        print('Plotting to ' + plotdir)
-        start = time()
-        x = filename.rfind(os.path.sep)
-        title = filename[x+1:-4]
-        filename = os.path.join(plotdir, filename[x+1:-4])
-        plot(savedat, title, filename, skip_turns)
-
-        print('Plot done {:.2f}sec'.format((time()-start)))
-
-
-    if waterfallfile != "":
-        print("FFT start")
-        start = time()
-        generateFFTfile(savedat, waterfallfile, skip_turns, new=newWaterfall)
-        #title = waterfallfile
-        #waterfall(savedat, title, waterfallfile, skip_turns, append=appendWaterfall, generatePlot=dowaterfall, lim=(0,120000))
-        print('FFT done {:.2f}sec'.format((time()-start)))
-
-
-# 8888888888 8888888888 88888888888     8888888888 d8b 888
-# 888        888            888         888        Y8P 888
-# 888        888            888         888            888
-# 8888888    8888888        888         8888888    888 888  .d88b.
-# 888        888            888         888        888 888 d8P  Y8b
-# 888        888            888         888        888 888 88888888
-# 888        888            888         888        888 888 Y8b.
-# 888        888            888         888        888 888  "Y8888
-#
-#
-#
-def generateFFTfile(data, filename, infos, skippedTurns=0, lim=(0,120000), new=False):
-
-    if not os.path.isfile(filename+ ".hdf"):
-        new = True
-
-    if new:
-        f = h5py.File(filename+".hdf", 'w')
-    else:
-        f = h5py.File(filename+".hdf", 'a')
-        
-
-    fftfreq = np.fft.rfftfreq(data[:, 1].shape[0], ANKA.trev * (skippedTurns+1))
-    f1 = np.abs(np.fft.rfft(data[:, 3]))
-    f2 = np.abs(np.fft.rfft(data[:, 2])) 
-    f3 = np.abs(np.fft.rfft(data[:, 1]))
-
-    
-
-    if new: 
-        imin = np.where(fftfreq > lim[0])[0][0]
-        imax = np.where(fftfreq < lim[1])[0][-1]
-
-
-        f['indexctr'] = [0]
-        grp = f.create_group('infos')
-        grp['limHz'] = lim
-        grp['limIndex'] = (imin, imax)
-
-        grp.create_dataset('fftfreq', data=fftfreq[imin:imax], compression="gzip")
-        g1  = f.create_group('Amplitude')
-        g2  = f.create_group('Schwerpunkt')
-        g3  = f.create_group('Breite')
-    else:
-        grp = f['infos']
-        g1 = f['Amplitude']
-        g2 = f['Schwerpunkt']
-        g3 = f['Breite']
-        imin = grp['limIndex'][0]
-        imax = grp['limIndex'][1]
-
-    indexctr = f['indexctr']
-    ctr = indexctr[0]
-
-    print('ctr', str(ctr))
-
-    f1 = f1[imin:imax]
-    f2 = f2[imin:imax]
-    f3 = f3[imin:imax]
-      
-
-    g1.create_dataset(str(ctr), data=f1, compression="gzip")
-    g2.create_dataset(str(ctr), data=f2, compression="gzip")
-    g3.create_dataset(str(ctr), data=f3, compression="gzip")
-    ctr += 1
-    indexctr[0] = ctr
-
-    f.close()
-
-#                           d8b             
-#                           Y8P             
-#                                           
-#    88888b.d88b.   8888b.  888 88888b.     
-#    888 "888 "88b     "88b 888 888 "88b    
-#    888  888  888 .d888888 888 888  888    
-#    888  888  888 888  888 888 888  888    
-#    888  888  888 "Y888888 888 888  888    
-
-if __name__ == '__main__':
-    import argparse as ap
-
-    parser = ap.ArgumentParser("Sequence Skript Converter")
-    parser.add_argument('-f', type=str, default="", help="Set input file")
-    parser.add_argument('-d', type=str, default="", help="Set input directory")
-    parser.add_argument('-o', type=str, default="", help="Set Output Direktory if wished")
-    parser.add_argument('-v', action='store_true', default=False, help="print debuginfos")
-    parser.add_argument('-b', type=int, default=-1, help="Set if only one Bucket is filled")
-    parser.add_argument('-c', type=str, default="", help="Calibration file")
-    parser.add_argument('-wf', type=str, default="",  help="Heatmap File (without extension!)")
-    parser.add_argument('-w',  type=int, default=2,  help="Heatmap\n 1: new\n 2: append")
-    parser.add_argument('-wp', action='store_true', default=False, help="Heatmap generate Plot")
-    parser.add_argument('-p',  type=str, default="", help="Set Output Direktory for Plot")
-    parser.add_argument('-hdf', action='store_true', default=False, help="Save results as hdf5 file")
-    
-
-    args = parser.parse_args()
-    
-
-    starttot = time()
-
-    np.set_printoptions(precision=4)
-    np.set_printoptions(suppress=True)
-    calibration = None
-
-    if args.o != "":
-        if not os.path.isdir(args.o):
-            os.makedirs(args.o)
-    if args.p != "":
-        if not os.path.isdir(args.p):
-            os.makedirs(args.p)
-
-    if args.c != "":
-        calibration = np.load(args.c)
-        
-    
-    if args.f != "":
-        filename = args.f
-        if not os.path.isfile(filename):
-            print("File '" + filename + "' not found - exit")
-            exit()
-        analyze_file(filename, calibration, args.v, args.b, args.o, plotdir=args.p, hdf=args.hdf, newWaterfall=(args.w==1), waterfallfile=args.wf)
-        if args.wp and args.wf is not "":
-            plotFFTFile(args.wf + '.hdf', args.wf+'.png',  filename)
-
-    elif args.d != "":
-        filelist = os.listdir(args.d)
-        print('==================================================')
-        print("Analyzing {} with {} Files".format(args.d, len(filelist)))
-        start = time()
-        for i, file in enumerate(filelist):
-            if '.out' in file:
-                analyze_file(os.path.join(args.d,file), calibration, args.v, args.b, args.o, plotdir=args.p, hdf=args.hdf, newWaterfall=(i==0), waterfallfile=args.wf)
-        stop = time()
-
-        print('==================================================')
-        print('Analyzed in {}sec'.format(stop-start))
-        if args.wp and args.wf is not "":
-            plotFFTFile(args.wf + 'hdf', args.wf+'.png',  args.d)
-        
-    else:
-        print("No input defined")
-        print("use -f to input a single file")
-        print("use -d to input a directory (every .out file will be read in)")

+ 0 - 256
KCG/extensions/PeakReconstruction/PeakReconstuctionPlot.py

@@ -1,256 +0,0 @@
-
-from scipy.optimize import curve_fit, ridder
-from scipy.stats import chisquare
-from scipy.stats import chi2_contingency
-
-import warnings
-with warnings.catch_warnings(): 
-    warnings.filterwarnings("ignore",category=FutureWarning)
-
-import h5py
-
-
-import matplotlib
-matplotlib.use("Qt4Agg")
-#matplotlib.use("Agg")
-import numpy as np, matplotlib.pyplot as plt
-
-
-import scipy.constants
-class ANKA:
-    circumference = 110.4  # Meter
-    h = 184
-    frev = scipy.constants.c / circumference
-    trev = 1 / frev
-
-    frf = frev * h
-    trf = 1 / frf
-
-from time import time, sleep
-
-import os
-os.system('')
-
-def gauss(x, sigma, mu, A):
-    return A*np.exp(-0.5*((x-mu)/sigma)**2)
-
-def CFD(args, delay=30):
-    try:
-        def gaussm(x, args):
-            #print(args)
-            sigma,mu,A = args
-            
-            return -0.7*gauss(x,sigma,mu,A)+gauss(x,sigma,mu+delay,A)
-        #print(args)
-        return ridder(gaussm, -30,150, xtol=0.001, rtol=0.0001, args=(args,))+10
-    except Exception as e:
-        print(e, args)
-        return -1
-
-# 8888888b.  888          888
-# 888   Y88b 888          888
-# 888    888 888          888
-# 888   d88P 888  .d88b.  888888
-# 8888888P"  888 d88""88b 888
-# 888        888 888  888 888
-# 888        888 Y88..88P Y88b.
-# 888        888  "Y88P"   "Y888
-#
-#
-#
-def plot(data, title, filename, skippedTurns=0, dofft=False):
-    fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='all', figsize=(20,20))
-    fig.text(0.5, 0.965, title,
-                horizontalalignment='center', color='black', weight='bold',
-                size='large')
-
-
-
-    tx = data[:, 0]*2/10e6 # ms
-    terr = data[:, 4]
-    N=7
-    ax1.set_title('Amplitude')
-    ty = data[:, 3]
-    tc = np.convolve(ty, np.ones((N,))/N, mode='valid')
-    f1 = np.abs(np.fft.rfft(ty))
-    
-    #ax1.plot(tx, ty,'.')    
-    ax1.plot(tx[N//2:-(N//2)],tc)
-
-
-    ax2.set_title('Schwerpunkt')
-    ty = data[:, 2]
-    tc = np.convolve(ty, np.ones((N,))/N, mode='valid')
-    f2 = np.abs(np.fft.rfft(ty))
-
-    #ax2.plot(tx, ty,'.')    
-    ax2.plot(tx[N//2:-(N//2)],tc)
-
-    N=19
-    ax3.set_title('Breite')
-    ty = data[:, 1]
-    tc = np.convolve(ty, np.ones((N,))/N, mode='valid')
-    
-
-    #ax3.plot(tx, ty,'.')    
-    ax3.plot(tx[N//2:-(N//2)],tc)
-    N=7
-
-    if filename != "":
-        plt.savefig(filename+ ".png", dpi=150, bbox_inches='tight')
-    
-    if dofft:
-        fftfreq = np.fft.rfftfreq(ty.shape[0], ANKA.trev * (skippedTurns+1))
-        f3 = np.abs(np.fft.rfft(data[:, 1])) 
-        f2 = np.abs(np.fft.rfft(data[:, 2])) 
-        f1 = np.abs(np.fft.rfft(data[:, 3])) 
-        
-        fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='all', figsize=(20,20))
-        fig.text(0.5, 0.965, title + " fft",
-                 horizontalalignment='center', color='black', weight='bold',
-                 size='large')
-        
-        ax1.set_title('Amplitude')
-        ax1.plot(fftfreq, f1)
-        ax1.set_ylim(800, 8000000)
-        ax1.set_yscale("log", nonposy='clip')
-        ax2.set_title('Schwerpunkt')
-        ax2.plot(fftfreq, f2)
-        ax2.set_ylim(800, 870000)   
-        ax2.set_yscale("log", nonposy='clip')
-        ax3.set_title('Breite')
-        ax3.plot(fftfreq, f3)
-        ax3.set_ylim(800, 230000)
-        ax3.set_xlim(0, 30000)
-        ax3.set_yscale("log", nonposy='clip') 
-
-        if filename != "":
-            plt.savefig(filename + "_fft.png", dpi=150, bbox_inches='tight')
-
-
-    if filename == "":
-        plt.show()
-
-
-#          888          888        8888888888 8888888888 88888888888     8888888888 d8b 888
-#          888          888        888        888            888         888        Y8P 888
-#          888          888        888        888            888         888            888
-# 88888b.  888  .d88b.  888888     8888888    8888888        888         8888888    888 888  .d88b.
-# 888 "88b 888 d88""88b 888        888        888            888         888        888 888 d8P  Y8b
-# 888  888 888 888  888 888        888        888            888         888        888 888 88888888
-# 888 d88P 888 Y88..88P Y88b.      888        888            888         888        888 888 Y8b.
-# 88888P"  888  "Y88P"   "Y888     888        888            888         888        888 888  "Y8888
-# 888
-# 888
-# 888
-def plotFFTFile(fftfile, plotfile, title):
-
-    with h5py.File(fftfile, 'r') as f:
-        grp = f['infos']
-        g1 = f['Amplitude']
-        indexctr = f['indexctr']
-        ctr = indexctr[0]
-
-
-        img =[]
-        for i in range(ctr):
-            img.append(g1[str(i)][:])
-            print('range', i, end='\r')
-
-        plt.figure(figsize=(20, 20))
-        x = grp['fftfreq']
-        y = np.array(range(ctr))
-
-        print('find min max')
-        fftMin = np.min(img)
-        fftMax = np.max(img)
-
-        print('start plotting')
-
-        im = plt.pcolormesh(x, y, img, norm=matplotlib.colors.LogNorm(vmin=fftMin, vmax=fftMax, clip=True),
-                                shading='flat', cmap='inferno')
-        print('speichern')
-        plt.savefig(plotfile)
-        print('plot done')
-
-
-
-#                        d8b
-#                        Y8P
-#
-# 88888b.d88b.   8888b.  888 88888b.
-# 888 "888 "88b     "88b 888 888 "88b
-# 888  888  888 .d888888 888 888  888
-# 888  888  888 888  888 888 888  888
-# 888  888  888 "Y888888 888 888  888
-#
-#
-#   
-if __name__ == '__main__':
-    
-    import argparse as ap
-
-    parser = ap.ArgumentParser("Calibration Plotting")
-    parser.add_argument('-f', type=str, default="", help="Set input file")
-    parser.add_argument('-s', action='store_true', default=False, help="Save to file")
-    parser.add_argument('-o', type=str, default="", help="Set Output Direktory if wished")
-    parser.add_argument('-b', type=str, default="", help="Set Bunch")
-    parser.add_argument('-fft', action='store_true', default=False, help="plot FFT")
-    parser.add_argument('-w', action='store_true', default=False, help="if inputfile is hdf5 heatmap")
-
-    
-    args = parser.parse_args()
-    
-    if args.f == "":
-        print('no input file given. Use -f to set it!')
-        print('exit')
-        exit()
-
-    if args.w == True:
-        plotFFTFile(args.f, args.f[:-4]+'.png', args.f)
-        exit()
-
-    x = args.f.rfind(os.sep)
-    filebase = args.f[:-4] + "_info.npy"
-    num = np.load(args.f)
-    info = np.load(filebase).item()
-    print(info)
-
-    skippedTurns = info['skip_turns']
-
-    bucket = info['bucket']
-
-    if args.b != "":
-        if bucket != -1:
-            if bucket != int(args.b):
-                print('Bucket {} not in data. Only {} available'.format(args.b, bucket))
-        else: 
-            bucket = int(args.b)
-
-    if bucket == -1:
-        print('no bucket given. Use -b to set it!')
-        print('exit')
-        exit()
-
-    outfile = ""
-    
-    if args.s:    
-        outfile = args.f[:-4]
-        if args.o != "":
-            outfile = os.path.join(args.o, outfile[x:])
-
-
-
-    title = args.f[x:-4] + " Bucket {}".format(bucket)
-
-    
-    if info['bucket'] == -1:
-        data = num[bucket::184]
-    else:
-        data = num    
-
-    plot(data, title, outfile, skippedTurns, args.fft)
-    print('done')
-
-
-

+ 0 - 269
KCG/extensions/PeakReconstruction/filehelper.py

@@ -1,269 +0,0 @@
-import numpy as np
-import warnings
-import os
-
-import matplotlib
-matplotlib.use("Qt4Agg")
-#matplotlib.use("Agg")
-import  matplotlib.pyplot as plt
-
-FILLING = 0xDEADDEAD
-MAX_ADCS = 4
-HEADER_SIZE_WORDS = 32 // 4
-
-
-def detect_data_chunks_by_filling(data):
-    # Find data filling
-    data_filling = data == FILLING
-    # Find indexes of where filling starts and ends
-    filling_changes = np.where(np.diff(data_filling))
-    # Prefix changes with begin and end of array
-    filling_changes = np.append([-1, ], np.append(filling_changes, data.size - 1))
-
-    data_chunks = []
-    for i in range(filling_changes.size - 1):
-        boundary_pair = filling_changes[[i, i + 1]] + [1, 0]
-        inJunk = data_filling[boundary_pair]
-
-        # Detect data junk "transitions" and remember them
-        if (inJunk == [False, False]).all() or (inJunk == [True, False]).all():
-            data_length = boundary_pair[1] - boundary_pair[0]
-            data_chunks.append((data_length, boundary_pair))
-
-    return data_chunks
-
-
-def read_file_specs(file, verbose=False):
-    data_chunks = detect_data_chunks_by_filling(file)
-
-    if len(data_chunks) > 1:
-       warnings.warn('Multiple chunks of data detected! Just using the first one.')
-
-    # We can only use the first chunk, since otherwise ADC detection mixes up.
-    chunk = data_chunks[0]
-
-    data_start, data_stop = chunk[1]
-    assert data_start == 0, 'Detected chunk does not start at 0!'
-
-    extract_counter = lambda x: np.right_shift(x, 24) & 0xfff
-    
-    counter = extract_counter(file[data_start:data_start + MAX_ADCS + HEADER_SIZE_WORDS + 1])
-    # Test if counter for all ADCs start with zero (only problematic if not always all ADCs are saved in out file
-    # TODO: improve test by doing it step by step (undependet of number of saved ADCs)
-    
-    has_header = (counter[0:MAX_ADCS - 1].any() != 0)   
-
-    header = None
-    if has_header:
-        header = file[data_start:data_start + HEADER_SIZE_WORDS]
-        data_start += HEADER_SIZE_WORDS
-        header = parse_header(header, verbose)
-
-    return data_start, data_stop, header
-
-
-def parse_header(header, verbose=False):
-    parsed = dict()
-    if verbose: print('parsing header')
-    # print([format(b, '#08X') for b in header])
-
-    assert header[0] == 0xf8888888 or header[7] == 0xf8888888, 'Field 8 is saved for data'
-    assert header[1] == 0xf7777777 or header[6] == 0xf7777777, 'Field 7 is saved for data'
-    assert header[2] == 0xf6666666 or header[5] == 0xf6666666, 'Field 6 is saved for data'
-
-    if header[0] == 0xf8888888 and header[1] == 0xf7777777 and header[2] == 0xf6666666:
-        header = header[::-1]
-
-    # TODO: what kind of timestamp is this? counts with appr. 25MHz up?!
-    parsed['timestamp'] = header[4]
-
-    assert header[3] >> 8 == 0, 'Highest 6 x 4 bits of field 3 is supposed to be zero'
-    parsed['delay_adc4'] = header[3] & 0xf
-    parsed['delay_adc3'] = header[3] >> 4 & 0xf
-
-    assert header[2] >> 16 == 0, 'Highest 4 x 4 bits of field 2 is supposed to be zero'
-    parsed['delay_adc2'] = header[2] & 0xf
-    parsed['delay_adc1'] = header[2] >> 4 & 0xf
-    parsed['delay_th']   = header[2] >> 8 & 0xf
-    parsed['delay_fpga'] = header[2] >> 12 & 0xf
-
-    parsed['fine_delay_adc'] = np.array([header[1] & 0xff, 
-                                         header[1] >> 8 & 0xff,
-                                         header[1] >> 16 & 0xff, 
-                                         header[1] >> 24 & 0xff])
-
-    assert header[0] >> 28 == 0xF, 'Highest 4 bits of field 0 is supposed to be 0xF'
-    parsed['skip_turns'] = header[0] & 0xfffffff
-
-    if verbose: print(parsed)
-    return parsed
-
-
-
-
-#  .d8888b.           888 d8b 888                       888    d8b
-# d88P  Y88b          888 Y8P 888                       888    Y8P
-# 888    888          888     888                       888
-# 888         8888b.  888 888 88888b.  888d888  8888b.  888888 888  .d88b.  88888b.
-# 888            "88b 888 888 888 "88b 888P"       "88b 888    888 d88""88b 888 "88b
-# 888    888 .d888888 888 888 888  888 888     .d888888 888    888 888  888 888  888
-# Y88b  d88P 888  888 888 888 888 d88P 888     888  888 Y88b.  888 Y88..88P 888  888
-#  "Y8888P"  "Y888888 888 888 88888P"  888     "Y888888  "Y888 888  "Y88P"  888  888
-#
-#
-#       
-
-                                                                 
-basepath = '/home/kapture/Data_kapture/2018-06-05_kapture_test_setup/'
-calibpath = 'files\\CalibrationDataKapture1\\timescans'
-def caliFile(file):
-    return os.path.join(calibpath, file)
-
-import csv
-def load_timescan(file):
-    data = np.zeros((4,16,32,2), dtype=np.float32)
-    with open(file) as f:
-        reader = csv.reader(f, delimiter=';')   
-        adc = -1
-        for row in reader:
-            #print(row)
-            if row == []:
-                pass
-            elif '#ADC' in row[0]:
-                adc += 1
-            elif '#' in row[0]:
-                pass
-            else:
-                if len(row) < 4:
-                    data[adc][int(row[0])][int(row[1])][0] = float(row[2])
-                elif len(row) < 5:
-                    data[adc][int(row[0])][int(row[1])][0] = float(row[2])
-                    data[adc][int(row[0])][int(row[1])][1] = float(row[3]) 
-                else:
-                    data[adc][int(row[0])][int(row[2])][0] = float(row[3])
-                    data[adc][int(row[0])][int(row[2])][1] = float(row[4])
-
-    return data
-
-def load_scaninfo(file):
-    with open(file, 'r') as f:
-        scaninfo = f.read().split('Scan\n')[1:]
-
-    out = []
-    for item in scaninfo:
-        tmp = {}
-        for v in item.split('\n'):
-            if '#' in v:
-                continue
-            t = v.split(':')
-            if len(t)>1:
-                if 'T/H' in str(t[0]) and 'ADC' not in str(t[0]):
-                    tmp['T/H'] = t[1].split('-')
-                elif 'fine' in str(t[0]):
-                    tmp['fine'] = t[1].split('-')
-                elif '25ps' in str(t[0]):
-                    tmp['25ps'] = t[1].split('-')
-                else:
-                    tmp[str(t[0])] = t[1].strip(' ')
-        
-        out.append(tmp)
-
-    return out
-
-def calibrate(value, adc, c,f, calibration=None):
-    if calibration is None:
-        return value
-
-    return (value-calibration[adc,c,f,0])*calibration[adc,c,f,1]
-
-def generate_timescan(rawdir,buckets=None,calibration=None):
-    files = sorted(os.listdir(rawdir))
-    out =np.zeros((4,16,32,2))
-    if buckets is None:
-        buckets = findBuckets(rawdir, files)
-    #print('buckets: ', buckets)
-    for file in files:
-        t = file.split('_')
-        c = int(t[1])
-        c25 = int(t[2])
-        f = int(t[3][:-4])
-        data = np.load(os.path.join(rawdir, file))
-        
-        
-        if c==1 and f== 25 and False:
-            print(file)
-            plt.figure()
-            plt.plot(data[:100,0], label='0')
-            plt.plot(data[:100,1], label='1')
-            plt.plot(data[:100,2], label='2')
-            plt.plot(data[:100,3], label='3')
-            plt.legend()
-            plt.show()
-        
-
-        for adc in range(4):
-            if buckets is not None:
-                m, s = makeMean(data[buckets[adc]::8,adc]) #
-            else:
-                m, s = makeMean(data[:,adc] ,15) #
-            #print(file, c,f, adc, m,s)
-            out[adc][c][f][0]= calibrate(m, adc,c,f,calibration)
-            out[adc][c][f][1]= s
-
-    return out, c, f
-
-
-def makeMean(data, threshold=None):
-    baseline = 2048
-    tmp=[]
-    if threshold is not None:
-        tmp = data[np.where((data>baseline+threshold) | (data<baseline-threshold))]
-        #print(len(tmp), len(data), end=" ")
-    else:
-        tmp = data
-
-    if len(tmp)<2:
-        tmp=[baseline,baseline]
-
-    return np.mean(tmp), np.std(tmp)
-
-
-def findBuckets(dir, filelist):
-    buckets = np.zeros(4)
-    adcs= np.zeros(4)
-    
-    for file in filelist:
-        data = np.load(os.path.join(dir, file))[:300,:]
-        for adc, found in enumerate(adcs):
-            if found<30:
-                #print(np.mean(data[:,adc]), np.max(data[:,adc]), np.min(data[:,adc]))
-                if (np.max(data[:,adc])-np.mean(data[:,adc])) > 40:
-                    buckets[adc] += int(np.argmax(data[:,adc]))%8
-                    adcs[adc]+=1
-                elif (np.mean(data[:,adc]) - np.min(data[:,adc])) > 40:
-                    #print(np.mean(data[:,adc]), np.max(data[:,adc]), np.min(data[:,adc]))
-                    #print('min',adc,file)
-                   
-                    buckets[adc] += int(np.argmin(data[:,adc]))%8
-                    adcs[adc]+=1
-            if (adcs>15).any():
-                print('Buckets Found', buckets, adcs, buckets/adcs)
-                return np.array(np.rint(buckets/adcs), dtype=np.int32)
-
-    print('No Buckets', buckets, adcs)
-
-    return None
-
-    
-
-def calc_calibration(datalow, datahigh, highvalue):
-    datal = np.reshape(datalow, 4*16*32)
-    datah = np.reshape(datahigh, 4*16*32)
-
-    calib = np.zeros(4*16*32*2)
-    
-    calib[::2]  = datal
-    calib[1::2] = (highvalue)/(datah-datal)
-
-    return np.reshape(calib, (4,16,32,2))
-

+ 0 - 204
KCG/extensions/PeakReconstruction/kernel.c

@@ -1,204 +0,0 @@
-__global__ void calculate(float* in, float* matrix, float* out, float* datax, double* baseline, int bucket)
-{
-	//int index = threadIdx.y;
-	int idx = (threadIdx.x + blockDim.x * blockIdx.x)*5;
-	int odx = idx;
-	out[odx] = idx/5; 
-	if(bucket != -1)
-	{
-		if((idx/5)%184 != bucket)
-		{
-			out[odx+1] = 0;
-			out[odx+2] = 0;
-			out[odx+3] = 0;
-			out[odx+4] = -4;
-			return;
-		}
-	}
-	
-	const int adc = 4;
-	double tmp[4];
-	for(int i = 0; i < adc; i++)
-	{
-		tmp[i] = in[idx+1+i] - baseline[0];
-		if(tmp[i] < 0.01)
-		{				
-			out[odx+1] = 0;
-			out[odx+2] = 0;
-			out[odx+3] = 0;
-			out[odx+4] = -2;
-			return;
-		}
-		else
-		{
-			tmp[i] = log(tmp[i]);
-		}			
-	}
-
-	double popt[3];
-	double res[4];
-	
-
-	popt[0] = matrix[0+0]*tmp[0] + matrix[0+1]*tmp[1] + matrix[0+2]*tmp[2] + matrix[0+3]*tmp[3] ;
-	popt[1] = matrix[4+0]*tmp[0] + matrix[4+1]*tmp[1] + matrix[4+2]*tmp[2] + matrix[4+3]*tmp[3] ;
-	popt[2] = matrix[8+0]*tmp[0] + matrix[8+1]*tmp[1] + matrix[8+2]*tmp[2] + matrix[8+3]*tmp[3] ;
-
-	res[0] = sqrt(1/(2*abs(popt[0])));
-	res[1] = -popt[1]/(2*popt[0]);
-	res[2] = exp(popt[2] - popt[1]*popt[1]/(4*popt[0]));
-	res[3] = 0;
-
-	int stop = 0;
-	if(res[0] <=0 || res[0] > 120) stop = 1;
-	if(res[1] <=0 || res[1] > 150) stop = 1;
-	if(res[2] <=0 || res[2] > 2000) stop = 1;
-	if(stop)
-	{
-		out[odx+1] = 0;
-		out[odx+2] = 0;
-		out[odx+3] = 0;
-		out[odx+4] = -1;
-		return;
-	}
-
-	for(int i=0; i < adc; i++)
-	{
-		tmp[i] =res[2] * exp(-0.5 * pow( (datax[i]-res[1])/res[0], 2 ));
-		res[3] += pow((in[idx+i+1]-baseline[0])-tmp[i], 2);
-	}
-	/*
-	res[0] = popt[0];
-	res[1] = popt[1];
-	res[2] = popt[2];
-	res[3] = 1;
-	*/
-	
-	out[odx+1] = res[0];//*1000;
-	out[odx+2] = res[1];//*1000;
-	out[odx+3] = res[2];//*1000;
-	out[odx+4] = res[3]*0.25;//*1000;
-	
-	/*
-	if(out[odx+4] > 100*1)
-	{ // perform LVM Fit
-		//double val[adc];
-		for(int i = 0; i < adc; i++)
-		{
-			tmp[i] = in[idx+1+i] - baseline[0];
-		}
-		//float popt[3];
-		popt[0] = 60;
-		popt[1] = 30;
-		popt[2] = 200;
-		int erg= levmarq(popt, adc, tmp);
-		out[odx+1] = popt[0]* 1000;
-		out[odx+2] = popt[1]*1000;
-		out[odx+3] = popt[2]*1000;
-		out[odx+4] = 500 + erg;
-	}
-	*/
-}
-
-/*
-# 888                             888
-# 888                             888
-# 888                             888
-# 888       .d88b.   8888b.   .d88888
-# 888      d88""88b     "88b d88" 888
-# 888      888  888 .d888888 888  888
-# 888      Y88..88P 888  888 Y88b 888
-# 88888888  "Y88P"  "Y888888  "Y88888
-#
-*/
-
-__global__ void load(int* in, float* out, double* baseline, float* calibration)
-{
-	const int adc = 4;
-	const int idx = (threadIdx.x + blockDim.x * blockIdx.x)*adc;
-	const int odx = (threadIdx.x + blockDim.x * blockIdx.x)*(adc+1)*2;
-	const int index = (threadIdx.x + blockDim.x * blockIdx.x)*2;
-
-
-	out[odx] = index;
-	out[odx+5] = index+1;
-	//double tmp0[4] = 0;
-	//double tmp1[4] = 0;
-	
-	for(int i =0; i<adc; i++)
-	{
-		out[odx+1+i]     = ((in[idx+i] & 0xfff)     - calibration[i]) * calibration[i+adc];
-		out[odx+adc+2+i] = ((in[idx+i]>>12 & 0xfff) - calibration[i]) * calibration[i+adc];
-	}
-
-
-	//Baseline----------------------------
-	double mean0 = 0;
-	double mean1 = 0;
-
-	for(int i =0; i<adc; i++)
-	{
-		mean0 += out[odx+1+i];
-		mean1 += out[odx+adc+2+i];
-	}		
-
-	mean0 /= adc;
-	mean1 /= adc;
-	double std0 = 0;
-	double std1 = 0;
-	for(int i=0; i < adc; i++)
-	{
-		std0 += pow(out[odx+1+i]-mean0, 2);
-		std1 += pow(out[odx+adc+2+i]-mean1, 2);
-	}
-	std0 = sqrt(std0/adc);
-	std1 = sqrt(std1/adc);
-
-	if(std0 < 7)
-	{
-		baseline[index] = mean0;
-	}
-	if(std1 < 7)
-	{
-		baseline[index+1] = mean1;
-	}
-
-}
-
-__global__ void reduce(float* in, int* length)
-{
-	const int adc = 4;
-	int len = length[0];
-	int offset = (threadIdx.x + blockDim.x * blockIdx.x)*(adc+1)*len;
-
-	__shared__ int sdata[1024];
-
-	int ptr = 0;
-	for(int i=0; i < len; i++)
-	{
-		if(in[offset+i*(adc+1)+1] > 0)
-		{
-			memcpy(in+offset+ptr*(adc+1), in+offset+i*(adc+1),5*sizeof(float) );
-
-			ptr ++;
-		}
-	}
-
-	sdata[threadIdx.x] = ptr;
-
-	__syncthreads();
-
-	
-	if(threadIdx.x == 0)
-	{
-		offset = blockDim.x * blockIdx.x*len*(adc+1);
-		ptr = 0;
-		for(int i=0; i < 1024; i++)
-		{
-			memcpy(in + offset +ptr, in+offset+i*len*(adc+1), sdata[i]*5*sizeof(float));
-			ptr += sdata[i]*5;
-		}
-		length[blockIdx.x+2] = ptr/5;
-		length[1] = (adc+1)*len;
-	}
-	
-}

+ 0 - 217
KCG/extensions/PeakReconstruction/levmar.c

@@ -1,217 +0,0 @@
-#define TOL 1e-30 /* smallest value allowed in cholesky_decomp() */
-__device__ float gauss(float x, float* param)
-{
-   return param[2]*exp(-0.5*pow((x-param[1])/param[0], 2));
-}
-
-__device__ void gaussgrad(float x, float* param, float*g)
-{
-   float ex = exp(-0.5*pow((x-param[1])/param[0], 2));
-   g[0] = param[2]* ex *pow(x-param[1],2) /pow(param[0],3);
-   g[1] = param[2]* ex * (x-param[1]) /pow(param[0],2);
-   g[2] = ex;
-}
-
-/* calculate the error function (chi-squared) */
-__device__ float error_func(float *par, int ny, float *y)//, float *dysq)
-{ 
-   int x;
-   float res, e=0;
-
-   for (x=0; x<ny; x++) {
-      res = gauss(x, par) - y[x];
-      //if (dysq)  /* weighted least-squares */
-      //   e += res*res/dysq[x];
-      //else
-         e += res*res;
-   }
-   return e;
-}
-
-/* solve the equation Ax=b for a symmetric positive-definite matrix A,
-    using the Cholesky decomposition A=LL^T.  The matrix L is passed in "l".
-    Elements above the diagonal are ignored.
-*/
-__device__ void solve_axb_cholesky(int n, float l[3][3], float* x, float* b)
-{
-   int i,j;
-   float sum;
-
-   /* solve L*y = b for y (where x[] is used to store y) */
-
-   for (i=0; i<n; i++) 
-   {
-      sum = 0;
-      for (j=0; j<i; j++)
-      {
-         sum += l[i][j] * x[j];
-      }
-      x[i] = (b[i] - sum)/l[i][i];      
-   }
-
-   /* solve L^T*x = y for x (where x[] is used to store both y and x) */
-
-   for (i=n-1; i>=0; i--) 
-   {
-      sum = 0;
-      for (j=i+1; j<n; j++)
-      {
-         sum += l[j][i] * x[j];
-      }
-      x[i] = (x[i] - sum)/l[i][i];      
-   }
-}
-
-
-/* This function takes a symmetric, positive-definite matrix "a" and returns
-    its (lower-triangular) Cholesky factor in "l".  Elements above the
-    diagonal are neither used nor modified.  The same array may be passed
-    as both l and a, in which case the decomposition is performed in place.
-*/
-__device__ int cholesky_decomp(int n, float l[3][3], float a[3][3])
-{
-   int i,j,k;
-   float sum;
-
-   for (i=0; i<n; i++) 
-   {
-      for (j=0; j<i; j++) 
-      {
-         sum = 0;
-         for (k=0; k<j; k++)
-         {
-            sum += l[i][k] * l[j][k];
-         }
-         l[i][j] = (a[i][j] - sum)/l[j][j];
-      }
-
-      sum = 0;
-      for(k=0; k<i; k++)
-      {
-         sum += l[i][k] * l[i][k];
-      }
-      sum = a[i][i] - sum;
-      if (sum<TOL) return 1; /* not positive-definite */
-      l[i][i] = sqrt(sum);
-   }
-   return 0;
-}
-
-/* perform least-squares minimization using the Levenberg-Marquardt
-    algorithm.  The arguments are as follows:
-
-    npar    number of parameters
-    par     array of parameters to be varied
-    ny      number of measurements to be fit
-    y       array of measurements
-    dysq    array of error in measurements, squared
-                (set dysq=NULL for unweighted least-squares)
-    func    function to be fit
-    grad    gradient of "func" with respect to the input parameters
-    fdata   pointer to any additional data required by the function
-    lmstat  pointer to the "status" structure, where minimization parameters
-                are set and the final status is returned.
-
-    Before calling levmarq, several of the parameters in lmstat must be set.
-    For default values, call levmarq_init(lmstat).
- */
-__device__ int levmarq(float *par, int ny, float *y)//, float *dysq)
-{
-   const int npar = 3;
-   int x,i,j,it,nit,ill;
-   float lambda,up,down,mult,weight,err,newerr,derr,target_derr;
-   float h[npar][npar];
-   float ch[npar][npar];
-   float g[npar], d[npar];
-   float delta[npar], newpar[npar];
-
-   
-   nit = 1000; //max_it;
-   lambda = 0.001; //
-   up = 10; //up_factor;
-   down = 1/10; //down_factor;
-   target_derr = 0.000001; //lmstat->target_derr;
-   weight = 1;
-   derr = newerr = 0; /* to avoid compiler warnings */
-
-   /* calculate the initial error ("chi-squared") */
-   err = error_func(par, ny, y);//, dysq);
-
-   /* main iteration */
-   for (it=0; it<nit; it++) 
-   {
-      /* calculate the approximation to the Hessian and the "derivative" d */
-      for (i=0; i<npar; i++) 
-      {
-         d[i] = 0;
-         for (j=0; j<=i; j++)
-         {
-            h[i][j] = 0;
-         }
-      }
-      
-      for (x=0; x<ny; x++) 
-      {
-         //if (dysq) weight = 1/dysq[x]; /* for weighted least-squares */
-         gaussgrad(x, par, g);//, fdata);
-         for (i=0; i<npar; i++) 
-         {
-            d[i] += (y[x] - gauss(x, par))*g[i]*weight;
-            for (j=0; j<=i; j++)
-            {
-               h[i][j] += g[i]*g[j]*weight;
-            }
-         }
-      }
-
-      /*  make a step "delta."  If the step is rejected, increase
-          lambda and try again */
-      mult = 1 + lambda;
-      ill = 1; /* ill-conditioned? */
-      while (ill && (it<nit)) 
-      {
-         for (i=0; i<npar; i++)
-         {
-            h[i][i] = h[i][i]*mult;
-         }
-
-         ill = cholesky_decomp(npar, ch, h);
-
-         if (!ill) 
-         {
-            solve_axb_cholesky(npar, ch, delta, d);
-            for (i=0; i<npar; i++)
-            {
-               newpar[i] = par[i] + delta[i];
-            }
-            newerr = error_func(newpar, ny, y);//, dysq);
-            derr = newerr - err;
-            ill = (derr > 0);
-         } 
-         
-         if (ill)
-         {
-            mult = (1 + lambda*up)/(1 + lambda);
-            lambda *= up;
-            it++;
-         }
-      }
-      for (i=0; i<npar; i++)
-      {
-         par[i] = newpar[i];
-      }
-      err = newerr;
-      lambda *= down;  
-
-      if ((!ill)&&(-derr<target_derr)) break;
-   }
-
-   /*
-   lmstat->final_it = it;
-   lmstat->final_err = err;
-   lmstat->final_derr = derr;
-   */
-   return it;
-   return (it==(nit+1));
-}
-