123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947 |
- import logging
- import time
- import os
- import math
- import numpy as np
- from numpy.polynomial.polynomial import polyval
- import h5py
- import traceback
- try:
- #Compatibility Python3 and Python 2
- from .CalibrationHandle import theCalibration
- from .constants import KARA
- from . import board
- from .board import available_boards
- except:
- from CalibrationHandle import theCalibration
- from constants import KARA
- #FIXME: This is device dependent and needs to be in the config file!!
- HEADER_SIZE_BYTES = 32
- #FIXME: Python Dictionaries? Never heard of them...
- RAW_FILE = 1
- RAW_FILE_NPY = 2
- TIMESCAN = 3
- class DataSet(object):
- """
- Most usefull functions:
- train(adc,frm,to,calibrate)
- heatmap(adc, frm,to)
- fft(adc,frm,to,trunbyturn,mean)
- readFromLog()
- ! Be aware: parameter ADC is in range of 0 to 7
- """
- def __init__(self, filename=None, decodedData=None, rawData=None, stringData=None, delays=None, shiftFMC2=0, tRev=KARA.trev, bunchesPerTurn=KARA.h, calibrationFile=""):
- """
- Initialise the dataset
- use one of:
- :param filename: string of the rawfile to open. If available it reads automatically the meta information from the Logfile
- :param decodedData: array of already decoded Data
- :param rawData: filedescriptor (eg. np.memmap) or array of rawdata
- :param stringData: handling rawdata given as string (eg. from bif._bif_read_data)
- :param delays: dict containing the delaysettings. default is read from LogFile or if not present {'c330':0, 'c25':0, 'c25b':4, 'f':[0,0,0,0,0,0,0,0]}
- :param shiftFMC2: compensate Bucketmissmatch between channel 1-4 and 5-8 (can later be done by setShiftFMC2)
- :param tRev: default KARA.trev
- :param bunchesPerTrun: default KARA.h
- :param calibrationFile: define the to use calibration. by default it looks for "calibration.hdf" in the same dic as the filename
- :return: -
- """
- self.fileName = filename
- if filename is not None:
- self.path = os.path.dirname(filename)
- self.type = 0
- self.header = None
- self.adcNumber = 4
- self.adcSwap = False #compatibility for old data
- self.log = None
- self.stepper = None
- self.invert = False
- self.datax = None
- self.isAcquisition=False
- self.skippedTurns = 0
- self.tRev = tRev
- self.bunchesPerTurn = bunchesPerTurn
- self.shiftFMC2 = shiftFMC2
- self.array = []
- self.frm = 0
- self.to = 0
- self.v = False
- self._heatmaps = {}
- self._ffts = {}
- self._fftsnobunching = False
- self._f = 0
- self._t = 1
- self.c330 = 0
- self.c25 = 0
- self.c25b = 4
- self.f = []
- self.dataRead = False
- self.noFile = False
- self.calibId = "current"
- if calibrationFile != "":
- self.calibId = theCalibration.openFile(calibrationFile)
- else:
- if filename is not None:
- self.calibId = theCalibration.openFile(os.path.join(self.path,'calibration.hdf'))
- else:
- print('calibrationFile not defined - no calibration possible')
- if delays is not None:
- try:
- self.c330 = delays['c330']
- self.c25 = delays['c25']
- self.c25b = delays['c25b']
- self.f = delays['f']
- except:
- pass
- if decodedData is not None:
- self.array = decodedData
- self.adcNumber = len(decodedData[1])-1
- self.dataRead = True
- self.noFile = True
- print('load decodedData with ADCs', self.adcNumber)
- elif stringData is not None:
- logging.vinfo("Read data directly from device.")
- logging.vinfo("Read %i bytes of data" % len(stringData))
- rawData = np.fromstring(stringData, dtype=np.uint32)
- self.loadFromRawData(rawData)
- self.dataRead = True
- self.noFile = True
- elif rawData is not None:
- self.loadFromRawData(rawData)
- self.dataRead = True
- self.noFile = True
- elif filename is not None:
- if not os.path.isfile(self.fileName):
- print('file {} does not exist'.format(self.fileName))
- return
- else:
- self.getFromLog()
- else:
- print('DataSet nothing given!')
- return
- def setShiftFMC2(self, shiftFMC2):
- self.shiftFMC2 = shiftFMC2
- self.to = 0 #force new decode
- def setDelays(self, delays):
- try:
- self.c330 = delays['c330']
- self.c25 = delays['c25']
- self.c25b = delays['c25b']
- self.f = np.array([float(v) for v in delays['f'][1:-1].split(',')])
- except:
- traceback.print_exc()
- pass
- def fineDelays(self):
- #FIXME: THIS IS A TRIAGE!
- # This is a bandaid patch to make peak reconstruction at least *somewhat*
- # working again. We will only use the finedelays from current
- # configuration. This will inevitably be incorrect, if data is stored to
- # harddrive and then opened again in a new session with different settings
- # and configuration. Or on a different KAPTURE system alltogether. So we
- # will eventually need to think about a way to couple calibration,
- # configuration and measurment data together into one 'container' so it
- # can be re-used and moved between system boundaries, without losing
- # details.
- # if len(self.f):
- # return self.f
- # if self.header:
- # return self.header['fine_delay_adc']
- # else:
- # return None
- adcdelays_ps = [0 for i in range(self.adcNumber)]
- #FIXME: I am just *guessing* that available_boards[0] is the right one,
- # since support for multiple boards has been phazed out a while ago, but
- # still lingers in the codebase as a remnant. So [0] is a good guess, for
- # now.
- board_config = board.get_board_config(available_boards[0])
- bunches_per_turn = board_config.get("bunches_per_turn")
- time_between_bunches_s = self.tRev / bunches_per_turn
- #Convert seconds to picosecods
- time_between_bunches_ps = time_between_bunches_s * (1000**4)
- finedelay_factor = board_config.get("chip_delay_factor")
- #TRIAGE:
- #FMC Banks are currently (18.12.2020) switched, so the slots for the
- #ADC delay slots 1,2,3,4 are for ADCs 5,6,7,8 and vice versa
- fdelays = board_config.get("chip_delay")
- finedelays = fdelays[:4] + fdelays[4:]
-
- finedelays = [i * finedelay_factor for i in finedelays]
- #FIXME:
- #NOTE:
- # Michele Caselle requested, that the 'dead space' between each
- # individual bunch be removed. This means, we do not calculate the actual
- # time in ps between each bunch, based on the machine geometry and
- # configuration, but just assume that the next bunch starts exactly 3ps
- # (the smallest possible fine-delay) after the last bunch has ended.
- # Therefore, we just assume that the time between bunches is at max the
- # highest fine-delay we con configure + 3
- time_between_bunches_ps = 3
- finedelay_max = board_config.get("chip_delay_max")
- time_between_bunches_ps += finedelay_max * finedelay_factor
- #FIXME: We need a better way to understand how the ADCs are distributed
- # across the FMC connectors. Currently, I have to know that we have 8 ADCs
- # distributed as 4 ADCs onto 2 FMCs, and get the delays from the board
- # config accordingly. Not good. We should find a way to do this
- # programmatically.
- fmc1_delay = board_config.get("delay_330_th") * board_config.get("delay_330_factor")
- fmc1_delay += board_config.get("delay_25_th") * board_config.get("delay_25_factor")
- fmc2_delay = board_config.get("delay_330_th_2") * board_config.get("delay_330_factor")
- fmc2_delay += board_config.get("delay_25_th_2") * board_config.get("delay_25_factor")
- adcdelays_ps[:4] = [i + fmc1_delay for i in finedelays[:4]]
- adcdelays_ps[4:] = [i + fmc2_delay for i in finedelays[4:]]
- #NOTE:
- # The Track-and-Hold (TH) timings do not have an impact on the actual
- # distribution of data across the time domain! The THs only "latches" an
- # analogue value at a certain point in time and holds it, so the ADCs
- # have time to convert the analogue value into a digital one.
- adcdelays_ps = finedelays
- #Express the delays as fractions of full timing distance between bunches
- adcdelays_ps = [float(i) / time_between_bunches_ps for i in adcdelays_ps]
- #print("###############################")
- #print(finedelays)
- #print(adcdelays_ps)
- #print(board_config.dump())
- #print("###############################")
- return adcdelays_ps
- def getCalibratedDelay(self):
- if self.datax is not None:
- return self.datax*1e-12
- out = []
- for i,v in enumerate(self.f):
- out.append(theCalibration.calibrateX(i, self.c330, self.c25, v, self.c25b))
- return np.array(out)
- def numOfBunches(self):
- return self.array.shape[0]
- def numOfTurns(self):
- return self.numOfBunches() / self.bunchesPerTurn
- def heatmap(self, adc=1, frm=0, to=-1, bunch_frm=0, bunch_to=-1):
- newone = self.loadFromFile(0, to*self.bunchesPerTurn)
- if isinstance(adc, list):
- adc = adc[0]
- if not 0 <= adc <= self.adcNumber:
- raise ValueError('adc must be in [0,{:}]'.format(self.adcNumber-1))
- l = len(self.array)
- l = (l//self.bunchesPerTurn)*self.bunchesPerTurn
- if not adc in self._heatmaps or newone:
- heatmap = self.array[:l,adc].reshape(-1, self.bunchesPerTurn).transpose()
- self._heatmaps[adc] = heatmap
- return self._heatmaps[adc][bunch_frm:bunch_to, frm:to]
- def fft(self, adc=0, frm=0, to=-1, drop_first_bin=True, nobunching=False):
- """
- :param adc: single int in [0:7] or list.
- :param nobunching: False - generate img with fft for every bunch; True - generate one fft without splitting into bunches
- :return: if param adc is a list it returns a list with lenght 8 in which the requested elements containing the array with the fft
- """
- if nobunching:
- if not isinstance(adc, list):
- adc=[adc]
- if self._fftsnobunching != nobunching:
- self._fftsnobunching = nobunching
- self._ffts = {}
- if isinstance(adc, list):
- #print('list', adc)
- out = [[],[],[],[], [],[],[],[]]
- for i in adc:
- if i in self._ffts:
- if self._ffts[i][0] == frm and self._ffts[i][1] == to:
- #print('use stored FFT')
- out[i]=self._ffts[i][2]
- continue
- if self._fftsnobunching:
- heatmap = self.train(i,frm,to)
- else:
- heatmap = self.heatmap(i, frm, to)
- if drop_first_bin:
- if self._fftsnobunching:
- self._ffts[i] = [frm, to, self._fft(heatmap, self._fftsnobunching)[1:] ]
- else:
- self._ffts[i] = [frm, to, self._fft(heatmap, self._fftsnobunching)[:, 1:] ]
- else:
- self._ffts[i] = [frm, to, self._fft(heatmap, self._fftsnobunching)]
- out[i]=self._ffts[i][2]
- return out
- else:
- #print('single', adc)
- heatmap = self.heatmap(adc, frm, to)
- if drop_first_bin:
- return self._fft(heatmap)[:, 1:]
- else:
- return self._fft(heatmap)
- def fftMaxFreq(self):
- mult=1.0
- if self._fftsnobunching:
- mult = 184.0
- return ((self.numOfTurns()* mult) // 2 + 1) * self.fftFreqDist()
- def fftFreqDist(self):
- mult=1.0
- #if self._fftsnobunching:
- # mult = 1.0/184.0
- return 1.0/(self.numOfTurns() * (self.skippedTurns + 1) * self.tRev * mult)
- def train(self, adc=0, frm=0, to=-1, calibrate=False):
- #FIXME: This method seems pretty superfluous (with maybe the exception of
- # the calibration stuff).
- # Essentially all it does is just truncating data for a second time, and
- # reorders data so we access ADC data as 'data[adc][bunch]' rather than
- # 'data[:,adc][bunch]'. What's the point? Really just to save TWO characters
- # of coding? (:,)
- # Also, returning different data structures depending on the given
- # parameters is VERY DANGEROUS!!
- # If we give 'adc' as a single integer, it returns a 1-Dimensional array
- # (basically, a list) and if we give 'adc' as a list, it reutrns a
- # 2-dimensional array?
- # This is basically guaranteed to sooner or later cause trouble down the
- # line...
- """
- params: adc: single int in [0:7] or list. if adc is a list (eg. [1,3]) it returns a list of length 8 with the requested elements filled
- """
- self.loadFromFile(frm, to)
- #FIXME: Why are there two truncation parameters?
- #One passed in from the function call and one from the DatSet object?
- if to != self.to:
- to -= self.frm
- pdata = self.array[frm-self.frm:to, :-1]
- else:
- pdata = self.array[frm-self.frm:, :-1]
- #print("train", adc)
- #print(pdata)
- if isinstance(adc, list):
- #FIXME: What happens if we have more than 8 ADCs in the future?
- #This should NOT be hardcoded!!!
- data = [[],[],[],[], [],[],[],[]]
- for item in range(len(data)):
- if item in adc:
- if calibrate:
- data[item] = theCalibration.calibrateY(pdata[:,item], item, self.calibId)
- else:
- data[item] = pdata[:,item]
- else:
- data[item] = np.zeros(len(pdata))
- #print('train', len(data))
- #Note: Data is now stored as
- #[[ADC 1: Bunch1, Bunch2, Bunch3 ... Bunch n]
- # [ADC 2: Bunch1, Bunch2, Bunch3 ... Bunch n]
- # [ADC 3: Bunch1, Bunch2, Bunch3 ... Bunch n]
- # ...
- # [ADC m: Bunch1, Bunch2, Bunch3 ... Bunch n]]
- return np.array(data)
- if calibrate:
- return theCalibration.calibrateY(pdata[:,adc], adc, self.calibId)
- #FIXME: Missing range checking for ADC
- #This should ideally happen at the BEGINNING of the function
- return pdata[:,adc]
- def combined(self, adc=0, frm=0, to=-1, calibrate=False, turnbyturn=False, mean=False, workingChannels=[0,1,2,3,4,5,6,7]):
- """
- generates one array with all adc
- :param adc: select the adc that will be return solo
- :param turnbyturn: default False. if set to one it repeats the x value with the maximum bunchnumber. so to overlay the turns
- :param mean: only used with turnbyturn set to True. If set to 1 it calculates the mean of all used turns
- :param working_channels: to dissable channels in the Combined data (List)
- :return: 2D List [0] contains X data and Y data of all. [1] only for selected adc
- """
- if len(workingChannels) < 2:
- #FIXME: This should not be an exception, but rather an expectation
- #Error handling could be improved
- raise ValueError('working_channels must have at least 2 channels; {}'.format(workingChannels))
- if turnbyturn:
- if to != -1:
- to = to//184*184
- selector = [0,1,2,3]
- if self.adcNumber > 4:
- #selector = [0,1,2,3,6,7] #currently not all chanels are working
- selector = workingChannels
- #FIXME: WHY are we using .train() to re-arrange our data to be in
- # ADC-Bunches order, just to transpose it back into Bunch-ADC order, which
- # is EXACTLY what the raw data was to begin with...?
- array = self.train(adc=selector, frm=frm,to=to, calibrate=calibrate).T
- array = array[:, np.array(selector)]
- #At this point, 'array' is basically just a truncated version of the
- #raw-data in 'self.array' without the bunch-numbers
- if isinstance(adc, list):
- adc = adc[0]
- finedelays = np.array(self.fineDelays())
- if finedelays is None:
- finedelays = np.array([0,0,0,0])
- if self.adcNumber >4:
- #FIXME: Highly dangerous! What happens if we will have more than
- #8 ADCs in the future?
- finedelays = np.repeat(finedelays, 2)
- if calibrate:
- for i in range(self.adcNumber):
- finedelays[i] = (theCalibration.calibrateX(i, self.c330, self.c25, finedelays[i], self.c25b, self.calibId) - theCalibration.calibrateX(0, self.c330, self.c25, 0, self.c25b, self.calibId) )*1e12
- else:
- #FIXME:
- #WHAT DOES THIS RANDOM x3 DO!?
- #finedelays = finedelays*3
- pass
- #FIXME: Why? What purpose does this serve?
- #Why do we divide by 100!?
- #Disabled for now, until we understand why this was even implemented
- # if self.datax is not None:
- # finedelays = self.datax - np.min(self.datax)
- #finedelays = finedelays/100.0
- if not turnbyturn:
- #'array' should have numBunches many entries. So 'a' should now be
- # [0,1,2,3,4 ... NumBunches]
- a = np.arange(0, len(array))
- #Now 'a' should be numADC*numBunches long and internally repeat the
- #same each element of the previous array len(selector)-times in a row
- #Think [0,0,0,0, 1,1,1,1, 2,2,2,2 ... len(array)], if len(selector)
- #would be 4
- a = np.repeat(a, len(selector))
- #But ... a already IS 1-dimensional? Whay reshape it with -1 AGAIN?
- #a = np.reshape(a, -1)
- #And finally, we turn a into a np.float type array...
- a = np.array(a, dtype=np.float)
- else:
- a = np.array(np.reshape(np.tile(np.repeat(np.arange(0, 184), len(selector)), len(array)//184),-1),dtype=np.float)
- b = np.reshape(np.repeat([finedelays[np.array(selector)]], len(array),0), -1)
- orig_xs = a+b
- # 'a' is a list of all the bunch-bumbers (basically X-Coordinates) which
- # the individual datapoints from the ADCs belong to.
- # 'b' is a list of all fine-delays for all the ADCs, repeated for numBunches times.
- # This means 'a+b' is X-Coordinates with respect to the individual
- # delays of the ADCs. The "calibrated" X-Coordinates, so to say
- # Flatten array
- array = array.flatten()
- # Okay, so if I understand this correctly, 'array' should now be
- # basically just one long vector of values with all the the ADCs in
- # sequence.
- # Basically:
- # [ADC 1 Bunch1, ADC 1 Bunch2, ADC 1 Bunch3 ... ADC 1 Bunch n,
- # ADC 2 Bunch1, ADC 2 Bunch2, ADC 2 Bunch3 ... ADC 2 Bunch n,
- # ...
- # ADC M Bunch1, ADC M Bunch2, ADC M Bunch3 ... ADC M Bunch n]
- if turnbyturn and mean:
- array = np.mean(array.reshape((-1, 184, len(selector))),0)
- orig_xs = np.mean(orig_xs.reshape((-1, 184, len(selector))),0)
- array = array.reshape(-1)
- orig_xs = orig_xs.reshape(-1)
- #print(adc)
- ret = [np.array([orig_xs, array])]
- if adc not in selector:
- ret.append(np.array([0,0]))
- return ret
- #FIXME: I assume the check for ==6 is because of the 'Working channels'
- # thing that used to be in a previous version?
- if len(selector) == 6:
- if adc > 4:
- adc -= 2
- while adc not in selector:
- adc -= 1
- if adc < 0:
- adc = self.adcNumber
- ret.append(np.array([orig_xs[adc::len(selector)], array[adc::len(selector)]]))
- # Okay, so what comes out of this is an array with 2x2xnumBunches size.
- # [0][0] contains 'orig_xs', telling which element belongs to which X level
- # [0][1] contains the flattened ADC array, giving the Y levels for each element
- # [1][0] and [1][1] are the same, but for only 1 specific ADC, so the
- # plotting can color that one ADC differently. Which is completely
- # nonesensical, because we could have just pulled that data out [0] either
- # way...
- return ret
- def loadFromRawData(self, rawData):
- print('loadfromrawdata')
- #self.printData(rawData[:32+32])
- #print('-------------------')
- headerInfo = self.dataHasHeader(rawData)
- if True in headerInfo:
- #logging.vinfo("Header detected.")
- # We read words of 4 bytes each
- self.header = self.parseHeader(rawData, headerInfo)
- spliceWords = HEADER_SIZE_BYTES // 4
- rawData = rawData[spliceWords:]
- else:
- #logging.vinfo("No Header detected.")
- self.header = None
- self.array = self.decodeData(rawData)
- self.dataRead=True
- def printData(self, data):
- try:
- for line in range(len(data)//4):
- print('0x{:08x}: 0x{:08x} 0x{:08x} 0x{:08x} 0x{:08x}'.format(line*16, data[line*4+0], data[line*4+1], data[line*4+2], data[line*4+3]))
- except:
- traceback.print_exc()
- def loadFromFile(self, frm, to):
- if self.noFile:
- return False
- if self.dataRead == False:
- if '.npy' in self.fileName:
- self.type = RAW_FILE_NPY
- self.array = np.load(self.fileName)
- elif '.out' in self.fileName:
- self.type = RAW_FILE
- self.fp = np.memmap(self.fileName, dtype='uint32')
- #print('loadfromfile')
- #self.printdata(self.fp[:32+32])
- #print('-------------------')
- headerInfo = self.dataHasHeader(self.fp)
- if True in headerInfo:
- #logging.vinfo("Header detected.")
- # We read words of 4 bytes each
- self.header = self.parseHeader(self.fp, headerInfo)
- #if self.header == dict():
- # self.getFromLog()
- spliceWords = HEADER_SIZE_BYTES // 4
- self.fp = self.fp[spliceWords:]
- else:
- #logging.vinfo("No Header detected.")
- self.header = None
- self.getFromLog()
- self.adcNumber = self.dataAdcCount(self.fp)
- self.dataRead=True
- if to == -1:
- if self.to != to:
- if self.type == RAW_FILE:
- self.array = self.decodeData(self.fp[frm*self.adcNumber//2:])
- self.to = to
- return True
- return False
- tto = to+184*3
- if self.frm > frm or self.to < tto:
- if self.type == RAW_FILE:
- self.array = self.decodeData(self.fp[frm*self.adcNumber//2:tto*self.adcNumber//2])
- self.frm = frm
- self.to = tto
- return True
- return False
- def decodeData(self, data):
- #This method assumes any potential header data has already been removed
- self.adcNumber = self.dataAdcCount(data)
- try:
- end = np.where(data==0xDEADDEAD)[0][0]
- data = data[:end]
- except Exception as e:
- #FIXME: No error handling!?
- #Is not finding a filling pattern actually an exception?
- #Shouldn't this be EXPECTED?
- #print('decode_data', e)
- pass
- #data = data[np.where(data != 0xDEADDEAD)] # This is the new filling
- #print('len data', len(data))
- # Make sure we read multiple of adcNumber
- data = data[:self.adcNumber * int((math.floor(data.size // self.adcNumber)))]
- #print('len data', len(data))
- bunch_low = data & 0xfff
- bunch_high = np.right_shift(data, 12) & 0xfff
- #FIXME: Same as with the adcNumber thing. Why does the mask use 12 bits,
- # if we shift all but 8 bits to the right?
- bunch_number = np.right_shift(data, 24) & 0xfff
- bunch_low = bunch_low.reshape(-1, self.adcNumber)
- bunch_high = bunch_high.reshape(-1, self.adcNumber)
- if self.invert:
- #FIXME: What's the logic behind this math...?
- #Isn't this the same as 2x2048 - bunch ?
- bunch_high = 2048 - (bunch_high-2048)
- bunch_low = 2048 - (bunch_low-2048)
- result = np.empty((bunch_low.shape[0] + bunch_high.shape[0], self.adcNumber+1), dtype=np.uint16)
- result[0::2,:self.adcNumber] = bunch_low
- result[1::2,:self.adcNumber] = bunch_high
- result[0::2, self.adcNumber] = bunch_number[::self.adcNumber]
- result[1::2, self.adcNumber] = bunch_number[::self.adcNumber] + 1
- #result = result[:int(self.bunchesPerTurn * (math.floor(result.shape[0] // self.bunchesPerTurn))), :]
- if self.shiftFMC2:
- if self.v: print('shift FMC2 by ', self.shiftFMC2)
- #print('decode_data ', result.shape)
- tmp = []
- for i in range(self.adcNumber+1):
- if self.shiftFMC2 > 0:
- if i < 4:
- tmp.append(result[self.shiftFMC2:,i])
- else:
- tmp.append(result[:-self.shiftFMC2,i])
- else:
- shift = self.shiftFMC2*(-1)
- if i < 4:
- tmp.append(result[:-shift:,i])
- else:
- tmp.append(result[shift:,i])
- result = np.array(tmp, dtype=np.uint16).T
- result = result[:int(self.bunchesPerTurn * (math.floor(result.shape[0] // self.bunchesPerTurn))), :]
- if self.v: print('decode_data ', result.shape)
- #print(result)
- return result
- def dataAdcCount(self, data):
- #FIXME: Shift-Right would also clone any sign-bits on the very far left
- # end of the data repeatedly, when shifting to the right. This might
- # produce incorrect results, since we shift all but 8 bits off the data,
- # but then check for 12 bits (0xFFF), meaning we might catch up to 4
- # 'cloned' sign bits in the mask
- #Additionally, why do we even shift in the first place...?
- #We are using a mask anyway, why not just make the mask use the correct
- #bits...? And why do we apply the shift and the mask to the ENTIRE
- #DATASET, when all we end up checking is just the first 10 entries...?
- bunch_number = np.right_shift(data, 24) & 0xfff
- ctr = 0
- b0 = bunch_number[0]
- for i in range(10):
- if b0 == bunch_number[i]:
- ctr += 1
- else:
- break
- if ctr < 4:
- ctr = 4
- if self.v: print('ADC number ', ctr)
- #FIXME: Where is the case with 8 ADCs?
- #What about any other number of ADCs?
- if ctr < 4:
- ctr = 4
- #return 8
- return ctr
- def dataHasHeader(self, data):
- possible_header = data[0:HEADER_SIZE_BYTES//4]
- kapture2 = (possible_header[0] & 0xFF000000) != 0
- back = possible_header[-1] & 0xF8888888 == 0xF8888888
- front = possible_header[0] & 0xF8888888 == 0xF8888888
- if self.v: print("has head", (front, back, kapture2))
- return (front, back, kapture2)
- def parseHeader(self, data, header_info, verbose=False):
- """
- Not supported for KAPTURE-2
- """
- #FIXME: Good to know, but what prevents me from wrongly using it anyway?
- global HEADER_SIZE_BYTES
- if header_info[2] == True:
- HEADER_SIZE_BYTES = 64
- header = data[0:HEADER_SIZE_BYTES//4]
- parsed = dict()
- if verbose: print('parsing header')
- # print([format(b, '#08X') for b in header])
- try:
- assert header[0] == 0xf8888888 or header[7] == 0xf8888888, 'Field 8 is saved for data'
- assert header[1] == 0xf7777777 or header[6] == 0xf7777777, 'Field 7 is saved for data'
- assert header[2] == 0xf6666666 or header[5] == 0xf6666666, 'Field 6 is saved for data'
- if header[0] == 0xf8888888 and header[1] == 0xf7777777 and header[2] == 0xf6666666:
- header = header[::-1]
- # TODO: what kind of timestamp is this? counts with appr. 25MHz up?!
- parsed['timestamp'] = header[4]
- assert header[3] >> 8 == 0, 'Highest 6 x 4 bits of field 3 is supposed to be zero'
- parsed['delay_adc4'] = header[3] & 0xf
- parsed['delay_adc3'] = header[3] >> 4 & 0xf
- assert header[2] >> 16 == 0, 'Highest 4 x 4 bits of field 2 is supposed to be zero'
- parsed['delay_adc2'] = header[2] & 0xf
- parsed['delay_adc1'] = header[2] >> 4 & 0xf
- parsed['delay_th'] = header[2] >> 8 & 0xf
- parsed['delay_fpga'] = header[2] >> 12 & 0xf
- parsed['fine_delay_adc'] = np.array([header[1] & 0xff,
- header[1] >> 8 & 0xff,
- header[1] >> 16 & 0xff,
- header[1] >> 24 & 0xff])
- assert header[0] >> 28 == 0xF, 'Highest 4 bits of field 0 is supposed to be 0xF'
- parsed['skip_turns'] = header[0] & 0xfffffff
- if verbose: print(parsed)
- except Exception as e:
- #FIXME: So... if the decoding from header breaks, we just return a
- #broken 'parsed' object!?
- pass
- #traceback.print_exc()
- return parsed
- def readLogfile(self):
- """
- return: dict with all information present in the Logfile
- """
- if self.noFile:
- return None
- #FIXME: Why is the name of the Log File hardcoded!?
- logFile = os.path.join(os.path.dirname(self.fileName), 'Measurement_board_6028.log')
- if not os.path.isfile(logFile):
- return None
- log = self._readLogfile(logFile)
- file = os.path.basename(self.fileName)
- for entry in log:
- try:
- if entry['Filename'] == file:
- return entry
- except:
- pass
- return None
- #FIXME: Why is this split into one private and one public method?
- def _readLogfile(self, file):
- defaultKeys = ['Number of Turns', 'Number of Skipped Turns', 'T/H Delay', '25ps Delay', 'ADC 1 Delay', 'ADC 2 Delay', 'ADC 3 Delay', 'ADC 4 Delay', 'ADC Delays', "25ps Delay 2", "T/H Delay 2", 'Stage Step']
- out = []
- with open(file,'r') as f:
- log = f.read().split('---')[1:]
- for entry in log:
- tmp={}
- ptr = 0
- for item in defaultKeys:
- tmp[item] = '?'
- if item == '25ps Delay 2':
- tmp[item] = '4'
- for item in entry.split('\n')[1:]:
- t = item.split(':')
- if len(t) == 1:
- tmp[str(ptr)] = t[0].strip('\t')
- ptr+=1
- else:
- tmp[str(t[0]).strip('\t')] = t[1].strip(' ')
- out.append(tmp)
- return out
- def getFromLog(self):
- """
- used to get information from the Logfile.
- """
- print('getFromLog')
- self.log = self.readLogfile()
- if self.log == None:
- print('no Log found')
- return False
- try:
- self.isAcquisition=False
- try:
- if "Acquisition" in self.log['0']:
- self.isAcquisition = True
- elif "Acquisition" in self.log['1']:
- self.isAcquisition = True
- except:
- pass
- try:
- self.skippedTurns = int(self.log['Number of Skipped Turns'])
- except:
- pass
- self.c330 = int(self.log['T/H Delay'])
- self.c25 = int(self.log['25ps Delay'])
- self.f = np.array([float(v) for v in self.log['ADC Delays'][1:-1].split(',')])
- try:
- self.datax = np.array([float(v) for v in self.log['datax'][1:-1].split(',')])
- except:
- self.datax = None
- pass
- self.adcNumber = len(self.f)
- file = self.log['Filename'].split('_')[-1]
- self.swap_adc = float(file[:-4]) < 1543859487
- if self.swap_adc and self.adcNumber > 4:
- if self.v: print('swapping ADC', self.log['Filename'])
- t = np.array(self.f[4:])
- self.f[4:] = self.f[:4]
- self.f[:4] = t
- self.c25b = int(self.log['25ps Delay 2'])
- try:
- self.stepper = int(self.log['Stage Step'])
- except:
- self.stepper = None
- try:
- self.workingChannels = np.array([float(v) for v in self.log['Working Channels'][1:-1].split(',')])
- except:
- self.workingChannels = None
- try:
- self.shiftFMC2 = int(self.log['shiftFMC2'])
- except:
- pass
- #print('get good')
- return True
- except:
- #print('getFromLog ', self.fileName)
- #traceback.print_exc()
- pass
- return False
- def _pad_array(self, array):
- #return array
- height, width = array.shape
- pwidth = int(2**np.floor(np.log2(width)))
- padded = np.zeros((height, pwidth))
- padded[:, :width] = array[:, :pwidth]
- return padded
- def _fft(self, array, is1D=False):
- start = time.time()
- print('fft', array.shape)
- if is1D:
- freqs = np.fft.rfft(array)
- else:
- freqs = np.fft.rfft(self._pad_array(array))
- #logging.debug("np fft: {} s".format(time.time() - start))
- print('FFT done {:.2f} s'.format(time.time() - start))
- return freqs
|