123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709 |
- import logging
- import time
- import os
- import math
- import numpy as np
- from numpy.polynomial.polynomial import polyval
- import h5py
- import traceback
- try:
- #Compatibility Python3 and Python 2
- from .CalibrationHandle import theCalibration
- from .constants import KARA
- except:
- from CalibrationHandle import theCalibration
- from constants import KARA
- HEADER_SIZE_BYTES = 32
- RAW_FILE = 1
- RAW_FILE_NPY = 2
- TIMESCAN = 3
- class DataSet(object):
- """
- Most usefull functions:
- train(adc,frm,to,calibrate)
- heatmap(adc, frm,to)
- fft(adc,frm,to,trunbyturn,mean)
- readFromLog()
- ! Be aware: parameter ADC is in range of 0 to 7
- """
- def __init__(self, filename=None, decodedData=None, rawData=None, stringData=None, delays=None, shiftFMC2=0, tRev=KARA.trev, bunchesPerTurn=KARA.h, calibrationFile=""):
- """
- Initialise the dataset
- use one of:
- :param filename: string of the rawfile to open. If available it reads automatically the meta information from the Logfile
- :param decodedData: array of already decoded Data
- :param rawData: filedescriptor (eg. np.memmap) or array of rawdata
- :param stringData: handling rawdata given as string (eg. from bif._bif_read_data)
- :param delays: dict containing the delaysettings. default is read from LogFile or if not present {'c330':0, 'c25':0, 'c25b':4, 'f':[0,0,0,0,0,0,0,0]}
- :param shiftFMC2: compensate Bucketmissmatch between channel 1-4 and 5-8 (can later be done by setShiftFMC2)
- :param tRev: default KARA.trev
- :param bunchesPerTrun: default KARA.h
- :param calibrationFile: define the to use calibration. by default it looks for "calibration.hdf" in the same dic as the filename
-
- :return: -
- """
- self.fileName = filename
- if filename is not None:
- self.path = os.path.dirname(filename)
- self.type = 0
- self.header = None
- self.adcNumber = 4
- self.adcSwap = False #compatibility for old data
- self.log = None
- self.stepper = None
- self.invert = False
- self.datax = None
- self.isAcquisition=False
- self.skippedTurns = 0
-
-
- self.tRev = tRev
- self.bunchesPerTurn = bunchesPerTurn
- self.shiftFMC2 = shiftFMC2
-
- self.array = []
- self.frm = 0
- self.to = 0
- self.v = False
- self._heatmaps = {}
- self._ffts = {}
- self._fftsnobunching = False
- self._f = 0
- self._t = 1
-
- self.c330 = 0
- self.c25 = 0
- self.c25b = 4
- self.f = []
-
- self.dataRead = False
- self.noFile = False
- self.calibId = "current"
- if calibrationFile != "":
- self.calibId = theCalibration.openFile(calibrationFile)
- else:
- if filename is not None:
- self.calibId = theCalibration.openFile(os.path.join(self.path,'calibration.hdf'))
- else:
- print('calibrationFile not defined - no calibration possible')
- if delays is not None:
- try:
- self.c330 = delays['c330']
- self.c25 = delays['c25']
- self.c25b = delays['c25b']
- self.f = delays['f']
- except:
- pass
- if decodedData is not None:
- self.array = decodedData
- self.adcNumber = len(decodedData[1])-1
- self.dataRead = True
- self.noFile = True
- print('load decodedData with ADCs', self.adcNumber)
- elif stringData is not None:
- logging.vinfo("Read data directly from device.")
- logging.vinfo("Read %i bytes of data" % len(stringData))
- rawData = np.fromstring(stringData, dtype=np.uint32)
- self.loadFromRawData(rawData)
- self.dataRead = True
- self.noFile = True
- elif rawData is not None:
- self.loadFromRawData(rawData)
- self.dataRead = True
- self.noFile = True
- elif filename is not None:
- if not os.path.isfile(self.fileName):
- print('file {} does not exist'.format(self.fileName))
- return
- else:
- self.getFromLog()
- else:
- print('DataSet nothing given!')
- return
-
-
- def setShiftFMC2(self, shiftFMC2):
- self.shiftFMC2 = shiftFMC2
- self.to = 0 #force new decode
- def setDelays(self, delays):
- try:
- self.c330 = delays['c330']
- self.c25 = delays['c25']
- self.c25b = delays['c25b']
- self.f = np.array([float(v) for v in delays['f'][1:-1].split(',')])
- except:
- traceback.print_exc()
- pass
- def fineDelays(self):
- #print(self.header)
- if len(self.f):
- return self.f
- if self.header:
- return self.header['fine_delay_adc']
- else:
- return None
- def getCalibratedDelay(self):
- if self.datax is not None:
- return self.datax*1e-12
-
- out = []
- for i,v in enumerate(self.f):
- out.append(theCalibration.calibrateX(i, self.c330, self.c25, v, self.c25b))
-
- return np.array(out)
- def numOfBunches(self):
- return self.array.shape[0]
- def numOfTurns(self):
- return self.numOfBunches() / self.bunchesPerTurn
- def heatmap(self, adc=1, frm=0, to=-1, bunch_frm=0, bunch_to=-1):
- newone = self.loadFromFile(0, to*self.bunchesPerTurn)
- if isinstance(adc, list):
- adc = adc[0]
- if not 0 <= adc <= self.adcNumber:
- raise ValueError('adc must be in [0,{:}]'.format(self.adcNumber-1))
- l = len(self.array)
- l = (l//self.bunchesPerTurn)*self.bunchesPerTurn
- if not adc in self._heatmaps or newone:
- heatmap = self.array[:l,adc].reshape(-1, self.bunchesPerTurn).transpose()
- self._heatmaps[adc] = heatmap
- return self._heatmaps[adc][bunch_frm:bunch_to, frm:to]
- def fft(self, adc=0, frm=0, to=-1, drop_first_bin=True, nobunching=False):
- """
- :param adc: single int in [0:7] or list.
- :param nobunching: False - generate img with fft for every bunch; True - generate one fft without splitting into bunches
- :return: if param adc is a list it returns a list with lenght 8 in which the requested elements containing the array with the fft
- """
- if nobunching:
- if not isinstance(adc, list):
- adc=[adc]
- if self._fftsnobunching != nobunching:
- self._fftsnobunching = nobunching
- self._ffts = {}
-
- if isinstance(adc, list):
- #print('list', adc)
- out = [[],[],[],[], [],[],[],[]]
- for i in adc:
- if i in self._ffts:
- if self._ffts[i][0] == frm and self._ffts[i][1] == to:
- #print('use stored FFT')
- out[i]=self._ffts[i][2]
- continue
- if self._fftsnobunching:
- heatmap = self.train(i,frm,to)
- else:
- heatmap = self.heatmap(i, frm, to)
- if drop_first_bin:
- if self._fftsnobunching:
- self._ffts[i] = [frm, to, self._fft(heatmap, self._fftsnobunching)[1:] ]
- else:
- self._ffts[i] = [frm, to, self._fft(heatmap, self._fftsnobunching)[:, 1:] ]
- else:
- self._ffts[i] = [frm, to, self._fft(heatmap, self._fftsnobunching)]
- out[i]=self._ffts[i][2]
- return out
- else:
- #print('single', adc)
- heatmap = self.heatmap(adc, frm, to)
- if drop_first_bin:
- return self._fft(heatmap)[:, 1:]
- else:
- return self._fft(heatmap)
- def fftMaxFreq(self):
- mult=1.0
- if self._fftsnobunching:
- mult = 184.0
- return ((self.numOfTurns()* mult) // 2 + 1) * self.fftFreqDist()
-
- def fftFreqDist(self):
- mult=1.0
- #if self._fftsnobunching:
- # mult = 1.0/184.0
- return 1.0/(self.numOfTurns() * (self.skippedTurns + 1) * self.tRev * mult)
- def train(self, adc=0, frm=0, to=-1, calibrate=False):
- """
- params: adc: single int in [0:7] or list. if adc is a list (eg. [1,3]) it returns a list of length 8 with the requested elements filled
- """
- self.loadFromFile(frm, to)
- if to != self.to:
- to -= self.frm
- pdata = self.array[frm-self.frm:to, :-1]
- else:
- pdata = self.array[frm-self.frm:, :-1]
- #print("train", adc)
- #print(pdata)
- if isinstance(adc, list):
- data = [[],[],[],[], [],[],[],[]]
- for item in range(len(data)):
- if item in adc:
- if calibrate:
- data[item] = theCalibration.calibrateY(pdata[:,item], item, self.calibId)
- else:
- data[item] = pdata[:,item]
- else:
- data[item] = np.zeros(len(pdata))
- #print('train', len(data))
- return np.array(data)
-
- if calibrate:
- return theCalibration.calibrateY(pdata[:,adc], adc, self.calibId)
- return pdata[:,adc]
- def combined(self, adc=0, frm=0, to=-1, calibrate=False, turnbyturn=False, mean=False, workingChannels=[0,1,2,3,4,5,6,7]):
- """
- generates one array with all adc
- :param adc: select the adc that will be return solo
- :param turnbyturn: default False. if set to one it repeats the x value with the maximum bunchnumber. so to overlay the turns
- :param mean: only used with turnbyturn set to True. If set to 1 it calculates the mean of all used turns
- :param working_channels: to dissable channels in the Combined data (List)
- :return: 2D List [0] contains X data and Y data of all. [1] only for selected adc
- """
- if len(workingChannels) < 2:
- raise ValueError('working_channels must have at least 2 channels; {}'.format(workingChannels))
- if turnbyturn:
- if to != -1:
- to = to//184*184
- selector = [0,1,2,3]
- if self.adcNumber > 4:
- #selector = [0,1,2,3,6,7] #currently not all chanels are working
- selector = workingChannels
- array = self.train(adc=selector, frm=frm,to=to, calibrate=calibrate).T
- array = array[:, np.array(selector)]
- if isinstance(adc, list):
- adc = adc[0]
- finedelays = np.array(self.fineDelays())
- if finedelays is None:
- finedelays = np.array([0,0,0,0])
- if self.adcNumber >4:
- finedelays = np.repeat(finedelays, 2)
- if calibrate:
- for i in range(self.adcNumber):
- finedelays[i] = (theCalibration.calibrateX(i, self.c330, self.c25, finedelays[i], self.c25b, self.calibId) - theCalibration.calibrateX(0, self.c330, self.c25, 0, self.c25b, self.calibId) )*1e12
- else:
- finedelays = finedelays*3
- if self.datax is not None:
- finedelays = self.datax - np.min(self.datax)
- finedelays = finedelays/100.0
- if not turnbyturn:
- a = np.array(np.reshape(np.repeat(np.arange(0, len(array)), len(selector)),-1),dtype=np.float)
- else:
- a = np.array(np.reshape(np.tile(np.repeat(np.arange(0, 184), len(selector)), len(array)//184),-1),dtype=np.float)
-
- b = np.reshape(np.repeat([finedelays[np.array(selector)]], len(array),0), -1)
- orig_xs = a+b
- # Remove bunch number and flatten array
- array = array.reshape((-1, 1))
- array = array.flatten()
- if turnbyturn and mean:
- array = np.mean(array.reshape((-1, 184, len(selector))),0)
- orig_xs = np.mean(orig_xs.reshape((-1, 184, len(selector))),0)
-
- array = array.reshape(-1)
- orig_xs = orig_xs.reshape(-1)
- #print(adc)
- ret = [np.array([orig_xs, array])]
- if adc not in selector:
- ret.append(np.array([0,0]))
- return ret
- if len(selector) == 6:
- if adc > 4:
- adc -= 2
- while adc not in selector:
- adc -= 1
- if adc < 0:
- adc = self.adcNumber
- ret.append(np.array([orig_xs[adc::len(selector)], array[adc::len(selector)]]))
- #print(ret)
- return ret
- def loadFromRawData(self, rawData):
- print('loadfromrawdata')
- #self.printData(rawData[:32+32])
- #print('-------------------')
- headerInfo = self.dataHasHeader(rawData)
- if True in headerInfo:
- #logging.vinfo("Header detected.")
- # We read words of 4 bytes each
- self.header = self.parseHeader(rawData, headerInfo)
- spliceWords = HEADER_SIZE_BYTES // 4
- rawData = rawData[spliceWords:]
- else:
- #logging.vinfo("No Header detected.")
- self.header = None
- self.array = self.decodeData(rawData)
- self.dataRead=True
- def printData(self, data):
- try:
- for line in range(len(data)//4):
- print('0x{:08x}: 0x{:08x} 0x{:08x} 0x{:08x} 0x{:08x}'.format(line*16, data[line*4+0], data[line*4+1], data[line*4+2], data[line*4+3]))
- except:
- traceback.print_exc()
- def loadFromFile(self, frm, to):
-
- if self.noFile:
- return False
- if self.dataRead == False:
- if '.npy' in self.fileName:
- self.type = RAW_FILE_NPY
- self.array = np.load(self.fileName)
- elif '.out' in self.fileName:
- self.type = RAW_FILE
- self.fp = np.memmap(self.fileName, dtype='uint32')
- #print('loadfromfile')
- #self.printdata(self.fp[:32+32])
- #print('-------------------')
-
- headerInfo = self.dataHasHeader(self.fp)
- if True in headerInfo:
- #logging.vinfo("Header detected.")
- # We read words of 4 bytes each
- self.header = self.parseHeader(self.fp, headerInfo)
- #if self.header == dict():
- # self.getFromLog()
- spliceWords = HEADER_SIZE_BYTES // 4
- self.fp = self.fp[spliceWords:]
- else:
- #logging.vinfo("No Header detected.")
- self.header = None
- self.getFromLog()
- self.adcNumber = self.dataAdcCount(self.fp)
- self.dataRead=True
- if to == -1:
- if self.to != to:
- if self.type == RAW_FILE:
- self.array = self.decodeData(self.fp[frm*self.adcNumber//2:])
- self.to = to
- return True
- return False
- tto = to+184*3
- if self.frm > frm or self.to < tto:
- if self.type == RAW_FILE:
- self.array = self.decodeData(self.fp[frm*self.adcNumber//2:tto*self.adcNumber//2])
-
- self.frm = frm
- self.to = tto
- return True
- return False
- def decodeData(self, data):
- self.adcNumber = self.dataAdcCount(data)
- try:
- end = np.where(data==0xDEADDEAD)[0][0]
- data = data[:end]
- except Exception as e:
- #print('decode_data', e)
- pass
- #data = data[np.where(data != 0xDEADDEAD)] # This is the new filling
- #print('len data', len(data))
- # Make sure we read multiple of adcNumber
- data = data[:self.adcNumber * int((math.floor(data.size // self.adcNumber)))]
- #print('len data', len(data))
- bunch_low = data & 0xfff
- bunch_high = np.right_shift(data, 12) & 0xfff
- bunch_number = np.right_shift(data, 24) & 0xfff
- bunch_low = bunch_low.reshape(-1, self.adcNumber)
- bunch_high = bunch_high.reshape(-1, self.adcNumber)
- if self.invert:
- bunch_high = 2048 - (bunch_high-2048)
- bunch_low = 2048 - (bunch_low-2048)
- result = np.empty((bunch_low.shape[0] + bunch_high.shape[0], self.adcNumber+1), dtype=np.uint16)
- result[0::2,:self.adcNumber] = bunch_low
- result[1::2,:self.adcNumber] = bunch_high
- result[0::2, self.adcNumber] = bunch_number[::self.adcNumber]
- result[1::2, self.adcNumber] = bunch_number[::self.adcNumber] + 1
- #result = result[:int(self.bunchesPerTurn * (math.floor(result.shape[0] // self.bunchesPerTurn))), :]
-
- if self.shiftFMC2:
- if self.v: print('shift FMC2 by ', self.shiftFMC2)
- #print('decode_data ', result.shape)
- tmp = []
- for i in range(self.adcNumber+1):
- if self.shiftFMC2 > 0:
- if i < 4:
- tmp.append(result[self.shiftFMC2:,i])
- else:
- tmp.append(result[:-self.shiftFMC2,i])
- else:
- shift = self.shiftFMC2*(-1)
- if i < 4:
- tmp.append(result[:-shift:,i])
- else:
- tmp.append(result[shift:,i])
-
- result = np.array(tmp, dtype=np.uint16).T
- result = result[:int(self.bunchesPerTurn * (math.floor(result.shape[0] // self.bunchesPerTurn))), :]
- if self.v: print('decode_data ', result.shape)
- #print(result)
- return result
- def dataAdcCount(self, data):
- bunch_number = np.right_shift(data, 24) & 0xfff
- ctr = 0
- b0 = bunch_number[0]
- for i in range(10):
- if b0 == bunch_number[i]:
- ctr += 1
- else:
- break
- if ctr < 4:
- ctr = 4
- if self.v: print('ADC number ', ctr)
- if ctr < 4:
- ctr = 4
- #return 8
- return ctr
- def dataHasHeader(self, data):
- possible_header = data[0:HEADER_SIZE_BYTES//4]
- kapture2 = (possible_header[0] & 0xFF000000) != 0
- back = possible_header[-1] & 0xF8888888 == 0xF8888888
- front = possible_header[0] & 0xF8888888 == 0xF8888888
- if self.v: print("has head", (front, back, kapture2))
- return (front, back, kapture2)
- def parseHeader(self, data, header_info, verbose=False):
- """
- Not supported for KAPTURE-2
- """
- global HEADER_SIZE_BYTES
- if header_info[2] == True:
- HEADER_SIZE_BYTES = 64
- header = data[0:HEADER_SIZE_BYTES//4]
- parsed = dict()
- if verbose: print('parsing header')
- # print([format(b, '#08X') for b in header])
- try:
- assert header[0] == 0xf8888888 or header[7] == 0xf8888888, 'Field 8 is saved for data'
- assert header[1] == 0xf7777777 or header[6] == 0xf7777777, 'Field 7 is saved for data'
- assert header[2] == 0xf6666666 or header[5] == 0xf6666666, 'Field 6 is saved for data'
- if header[0] == 0xf8888888 and header[1] == 0xf7777777 and header[2] == 0xf6666666:
- header = header[::-1]
- # TODO: what kind of timestamp is this? counts with appr. 25MHz up?!
- parsed['timestamp'] = header[4]
- assert header[3] >> 8 == 0, 'Highest 6 x 4 bits of field 3 is supposed to be zero'
- parsed['delay_adc4'] = header[3] & 0xf
- parsed['delay_adc3'] = header[3] >> 4 & 0xf
- assert header[2] >> 16 == 0, 'Highest 4 x 4 bits of field 2 is supposed to be zero'
- parsed['delay_adc2'] = header[2] & 0xf
- parsed['delay_adc1'] = header[2] >> 4 & 0xf
- parsed['delay_th'] = header[2] >> 8 & 0xf
- parsed['delay_fpga'] = header[2] >> 12 & 0xf
- parsed['fine_delay_adc'] = np.array([header[1] & 0xff,
- header[1] >> 8 & 0xff,
- header[1] >> 16 & 0xff,
- header[1] >> 24 & 0xff])
- assert header[0] >> 28 == 0xF, 'Highest 4 bits of field 0 is supposed to be 0xF'
- parsed['skip_turns'] = header[0] & 0xfffffff
- if verbose: print(parsed)
- except Exception as e:
- pass
- #traceback.print_exc()
- return parsed
- def readLogfile(self):
- """
- return: dict with all information present in the Logfile
- """
- if self.noFile:
- return None
- logFile = os.path.join(os.path.dirname(self.fileName), 'Measurement_board_6028.log')
- if not os.path.isfile(logFile):
- return None
- log = self._readLogfile(logFile)
- file = os.path.basename(self.fileName)
- for entry in log:
- try:
- if entry['Filename'] == file:
- return entry
- except:
- pass
- return None
- def _readLogfile(self, file):
- defaultKeys = ['Number of Turns', 'Number of Skipped Turns', 'T/H Delay', '25ps Delay', 'ADC 1 Delay', 'ADC 2 Delay', 'ADC 3 Delay', 'ADC 4 Delay', 'ADC Delays', "25ps Delay 2", "T/H Delay 2", 'Stage Step']
- out = []
- with open(file,'r') as f:
- log = f.read().split('---')[1:]
- for entry in log:
- tmp={}
- ptr = 0
- for item in defaultKeys:
- tmp[item] = '?'
- if item == '25ps Delay 2':
- tmp[item] = '4'
- for item in entry.split('\n')[1:]:
- t = item.split(':')
- if len(t) == 1:
- tmp[str(ptr)] = t[0].strip('\t')
- ptr+=1
- else:
- tmp[str(t[0]).strip('\t')] = t[1].strip(' ')
- out.append(tmp)
- return out
- def getFromLog(self):
- """
- used to get information from the Logfile.
- """
- print('getFromLog')
- self.log = self.readLogfile()
- if self.log == None:
- print('no Log found')
- return False
- try:
- self.isAcquisition=False
- try:
- if "Acquisition" in self.log['0']:
- self.isAcquisition = True
- elif "Acquisition" in self.log['1']:
- self.isAcquisition = True
- except:
- pass
- try:
- self.skippedTurns = int(self.log['Number of Skipped Turns'])
- except:
- pass
- self.c330 = int(self.log['T/H Delay'])
- self.c25 = int(self.log['25ps Delay'])
- self.f = np.array([float(v) for v in self.log['ADC Delays'][1:-1].split(',')])
- try:
- self.datax = np.array([float(v) for v in self.log['datax'][1:-1].split(',')])
- except:
- self.datax = None
- pass
- self.adcNumber = len(self.f)
- file = self.log['Filename'].split('_')[-1]
- self.swap_adc = float(file[:-4]) < 1543859487
- if self.swap_adc and self.adcNumber > 4:
- if self.v: print('swapping ADC', self.log['Filename'])
- t = np.array(self.f[4:])
- self.f[4:] = self.f[:4]
- self.f[:4] = t
- self.c25b = int(self.log['25ps Delay 2'])
- try:
- self.stepper = int(self.log['Stage Step'])
- except:
- self.stepper = None
- try:
- self.workingChannels = np.array([float(v) for v in self.log['Working Channels'][1:-1].split(',')])
- except:
- self.workingChannels = None
-
- try:
- self.shiftFMC2 = int(self.log['shiftFMC2'])
- except:
- pass
- #print('get good')
- return True
- except:
- #print('getFromLog ', self.fileName)
- #traceback.print_exc()
- pass
- return False
- def _pad_array(self, array):
- #return array
- height, width = array.shape
- pwidth = int(2**np.floor(np.log2(width)))
- padded = np.zeros((height, pwidth))
- padded[:, :width] = array[:, :pwidth]
- return padded
- def _fft(self, array, is1D=False):
- start = time.time()
- print('fft', array.shape)
- if is1D:
- freqs = np.fft.rfft(array)
- else:
- freqs = np.fft.rfft(self._pad_array(array))
- #logging.debug("np fft: {} s".format(time.time() - start))
- print('FFT done {:.2f} s'.format(time.time() - start))
- return freqs
|