|
@@ -12,18 +12,23 @@ try:
|
|
|
#Compatibility Python3 and Python 2
|
|
|
from .CalibrationHandle import theCalibration
|
|
|
from .constants import KARA
|
|
|
+ from . import board
|
|
|
+ from .board import available_boards
|
|
|
except:
|
|
|
from CalibrationHandle import theCalibration
|
|
|
from constants import KARA
|
|
|
|
|
|
|
|
|
+#FIXME: This is devide dependent and needs to be in the config file!!H[
|
|
|
HEADER_SIZE_BYTES = 32
|
|
|
|
|
|
|
|
|
+#FIXME: Python Dictionaries? Never heard of them...
|
|
|
RAW_FILE = 1
|
|
|
RAW_FILE_NPY = 2
|
|
|
TIMESCAN = 3
|
|
|
|
|
|
+
|
|
|
class DataSet(object):
|
|
|
"""
|
|
|
Most usefull functions:
|
|
@@ -33,7 +38,7 @@ class DataSet(object):
|
|
|
readFromLog()
|
|
|
! Be aware: parameter ADC is in range of 0 to 7
|
|
|
"""
|
|
|
- def __init__(self, filename=None, decodedData=None, rawData=None, stringData=None, delays=None, shiftFMC2=0, tRev=KARA.trev, bunchesPerTurn=KARA.h, calibrationFile=""):
|
|
|
+ def __init__(self, filename=None, decodedData=None, rawData=None, stringData=None, delays=None, shiftFMC2=0, tRev=KARA.trev, bunchesPerTurn=KARA.h, calibrationFile=""):
|
|
|
"""
|
|
|
Initialise the dataset
|
|
|
use one of:
|
|
@@ -46,7 +51,7 @@ class DataSet(object):
|
|
|
:param tRev: default KARA.trev
|
|
|
:param bunchesPerTrun: default KARA.h
|
|
|
:param calibrationFile: define the to use calibration. by default it looks for "calibration.hdf" in the same dic as the filename
|
|
|
-
|
|
|
+
|
|
|
:return: -
|
|
|
"""
|
|
|
self.fileName = filename
|
|
@@ -63,12 +68,12 @@ class DataSet(object):
|
|
|
self.datax = None
|
|
|
self.isAcquisition=False
|
|
|
self.skippedTurns = 0
|
|
|
-
|
|
|
-
|
|
|
+
|
|
|
+
|
|
|
self.tRev = tRev
|
|
|
self.bunchesPerTurn = bunchesPerTurn
|
|
|
self.shiftFMC2 = shiftFMC2
|
|
|
-
|
|
|
+
|
|
|
self.array = []
|
|
|
self.frm = 0
|
|
|
self.to = 0
|
|
@@ -80,12 +85,12 @@ class DataSet(object):
|
|
|
self._fftsnobunching = False
|
|
|
self._f = 0
|
|
|
self._t = 1
|
|
|
-
|
|
|
+
|
|
|
self.c330 = 0
|
|
|
self.c25 = 0
|
|
|
self.c25b = 4
|
|
|
self.f = []
|
|
|
-
|
|
|
+
|
|
|
self.dataRead = False
|
|
|
self.noFile = False
|
|
|
self.calibId = "current"
|
|
@@ -129,14 +134,14 @@ class DataSet(object):
|
|
|
elif filename is not None:
|
|
|
if not os.path.isfile(self.fileName):
|
|
|
print('file {} does not exist'.format(self.fileName))
|
|
|
- return
|
|
|
+ return
|
|
|
else:
|
|
|
self.getFromLog()
|
|
|
else:
|
|
|
print('DataSet nothing given!')
|
|
|
- return
|
|
|
-
|
|
|
-
|
|
|
+ return
|
|
|
+
|
|
|
+
|
|
|
|
|
|
def setShiftFMC2(self, shiftFMC2):
|
|
|
self.shiftFMC2 = shiftFMC2
|
|
@@ -154,23 +159,77 @@ class DataSet(object):
|
|
|
|
|
|
|
|
|
def fineDelays(self):
|
|
|
- #print(self.header)
|
|
|
- if len(self.f):
|
|
|
- return self.f
|
|
|
|
|
|
- if self.header:
|
|
|
- return self.header['fine_delay_adc']
|
|
|
- else:
|
|
|
- return None
|
|
|
+ #FIXME: THIS IS A TRIAGE!
|
|
|
+ # This is a bandaid patch to make peak reconstruction at least *somewhat*
|
|
|
+ # working again. We will only use the finedelays from current
|
|
|
+ # configuration. This will inevitably be incorrect, of data is stored to
|
|
|
+ # harddrive and then opened again in a new session with different settings
|
|
|
+ # and configuration. Or on a different KAPTURE system alltogether. So we
|
|
|
+ # will eventually need to think about a way to couple calibration,
|
|
|
+ # configuration and measurment data together into one 'container' so it
|
|
|
+ # can be re-used and moved between system boundaries, without losing
|
|
|
+ # details.
|
|
|
+
|
|
|
+ # if len(self.f):
|
|
|
+ # return self.f
|
|
|
+
|
|
|
+ # if self.header:
|
|
|
+ # return self.header['fine_delay_adc']
|
|
|
+ # else:
|
|
|
+ # return None
|
|
|
+
|
|
|
+ adcdelays_ps = [0 for i in range(self.adcNumber)]
|
|
|
+
|
|
|
+ #FIXME: I am just *guessing* that available_boards[0] is the right one,
|
|
|
+ # since support for multiple boards has been phazed out a while ago, but
|
|
|
+ # still lingers in the codebase as a remnant. So [0] is a good guess, for
|
|
|
+ # now.
|
|
|
+ board_config = board.get_board_config(available_boards[0])
|
|
|
+ bunches_per_turn = board_config.get("bunches_per_turn")
|
|
|
+ time_between_bunches_s = self.tRev / bunches_per_turn
|
|
|
+
|
|
|
+ #Convert seconds to picosecods
|
|
|
+ time_between_bunches_ps = time_between_bunches_s * (1000**4)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ finedelays = board_config.get("chip_delay")
|
|
|
+ finedelay_factor = board_config.get("chip_delay_factor")
|
|
|
+ finedelays = [i*finedelay_factor for i in finedelays]
|
|
|
+
|
|
|
+
|
|
|
+ #FIXME: We need a better way to understand how the ADCs are distributed
|
|
|
+ # across the FMC connectors. Currently, I have to know that we have 8 ADCs
|
|
|
+ # distributed as 4 ADCs onto 2 FMCs, and get the delays from the board
|
|
|
+ # config accordingly. Not good. We should find a way to do this
|
|
|
+ # programmatically.
|
|
|
+
|
|
|
+ fmc1_delay = board_config.get("delay_330_th") * board_config.get("delay_330_factor")
|
|
|
+ fmc1_delay += board_config.get("delay_25_th") * board_config.get("delay_25_factor")
|
|
|
+
|
|
|
+ fmc2_delay = board_config.get("delay_330_th_2") * board_config.get("delay_330_factor")
|
|
|
+ fmc2_delay += board_config.get("delay_25_th_2") * board_config.get("delay_25_factor")
|
|
|
+
|
|
|
+ adcdelays_ps[:4] = [i + fmc1_delay for i in finedelays[:4]]
|
|
|
+ adcdelays_ps[4:] = [i + fmc2_delay for i in finedelays[4:]]
|
|
|
+
|
|
|
+
|
|
|
+ #Express the delays as fractions of full bunches
|
|
|
+ adcdelays_ps = [i / time_between_bunches_ps for i in adcdelays_ps]
|
|
|
+
|
|
|
+ return adcdelays_ps
|
|
|
+
|
|
|
+
|
|
|
|
|
|
def getCalibratedDelay(self):
|
|
|
if self.datax is not None:
|
|
|
return self.datax*1e-12
|
|
|
-
|
|
|
+
|
|
|
out = []
|
|
|
for i,v in enumerate(self.f):
|
|
|
out.append(theCalibration.calibrateX(i, self.c330, self.c25, v, self.c25b))
|
|
|
-
|
|
|
+
|
|
|
return np.array(out)
|
|
|
|
|
|
|
|
@@ -206,7 +265,7 @@ class DataSet(object):
|
|
|
if self._fftsnobunching != nobunching:
|
|
|
self._fftsnobunching = nobunching
|
|
|
self._ffts = {}
|
|
|
-
|
|
|
+
|
|
|
if isinstance(adc, list):
|
|
|
#print('list', adc)
|
|
|
out = [[],[],[],[], [],[],[],[]]
|
|
@@ -242,19 +301,38 @@ class DataSet(object):
|
|
|
mult=1.0
|
|
|
if self._fftsnobunching:
|
|
|
mult = 184.0
|
|
|
- return ((self.numOfTurns()* mult) // 2 + 1) * self.fftFreqDist()
|
|
|
-
|
|
|
+ return ((self.numOfTurns()* mult) // 2 + 1) * self.fftFreqDist()
|
|
|
+
|
|
|
def fftFreqDist(self):
|
|
|
mult=1.0
|
|
|
#if self._fftsnobunching:
|
|
|
# mult = 1.0/184.0
|
|
|
return 1.0/(self.numOfTurns() * (self.skippedTurns + 1) * self.tRev * mult)
|
|
|
|
|
|
+
|
|
|
+
|
|
|
def train(self, adc=0, frm=0, to=-1, calibrate=False):
|
|
|
+ #FIXME: This method seems pretty superfluous (with maybe the exception of
|
|
|
+ # the calibration stuff).
|
|
|
+ # Essentially all it does is just truncating data for a second time, and
|
|
|
+ # reorders data so we access ADC data as 'data[adc][bunch]' rather than
|
|
|
+ # 'data[:,adc][bunch]'. What's the point? Really just to save TWO characters
|
|
|
+ # of coding? (:,)
|
|
|
+ # Also, returning different data structures depending on the given
|
|
|
+ # parameters is VERY DANGEROUS!!
|
|
|
+ # If we give 'adc' as a single integer, it returns a 1-Dimensional array
|
|
|
+ # (basically, a list) and if we give 'adc' as a list, it reutrns a
|
|
|
+ # 2-dimensional array?
|
|
|
+ # This is basically guaranteed to sooner or later cause trouble down the
|
|
|
+ # line...
|
|
|
+
|
|
|
"""
|
|
|
params: adc: single int in [0:7] or list. if adc is a list (eg. [1,3]) it returns a list of length 8 with the requested elements filled
|
|
|
"""
|
|
|
self.loadFromFile(frm, to)
|
|
|
+
|
|
|
+ #FIXME: Why are there two truncation parameters?
|
|
|
+ #One passed in from the function call and one from the DatSet object?
|
|
|
if to != self.to:
|
|
|
to -= self.frm
|
|
|
pdata = self.array[frm-self.frm:to, :-1]
|
|
@@ -263,6 +341,8 @@ class DataSet(object):
|
|
|
#print("train", adc)
|
|
|
#print(pdata)
|
|
|
if isinstance(adc, list):
|
|
|
+ #FIXME: What happens if we have more than 8 ADCs in the future?
|
|
|
+ #This should NOT be hardcoded!!!
|
|
|
data = [[],[],[],[], [],[],[],[]]
|
|
|
for item in range(len(data)):
|
|
|
if item in adc:
|
|
@@ -273,12 +353,24 @@ class DataSet(object):
|
|
|
else:
|
|
|
data[item] = np.zeros(len(pdata))
|
|
|
#print('train', len(data))
|
|
|
+
|
|
|
+ #Note: Data is now stored as
|
|
|
+ #[[ADC 1: Bunch1, Bunch2, Bunch3 ... Bunch n]
|
|
|
+ # [ADC 2: Bunch1, Bunch2, Bunch3 ... Bunch n]
|
|
|
+ # [ADC 3: Bunch1, Bunch2, Bunch3 ... Bunch n]
|
|
|
+ # ...
|
|
|
+ # [ADC m: Bunch1, Bunch2, Bunch3 ... Bunch n]]
|
|
|
return np.array(data)
|
|
|
-
|
|
|
+
|
|
|
+
|
|
|
if calibrate:
|
|
|
return theCalibration.calibrateY(pdata[:,adc], adc, self.calibId)
|
|
|
+
|
|
|
+ #FIXME: Missing range checking for ADC
|
|
|
+ #This should ideally happen at the BEGINNING of the function
|
|
|
return pdata[:,adc]
|
|
|
|
|
|
+
|
|
|
def combined(self, adc=0, frm=0, to=-1, calibrate=False, turnbyturn=False, mean=False, workingChannels=[0,1,2,3,4,5,6,7]):
|
|
|
"""
|
|
|
generates one array with all adc
|
|
@@ -289,7 +381,10 @@ class DataSet(object):
|
|
|
|
|
|
:return: 2D List [0] contains X data and Y data of all. [1] only for selected adc
|
|
|
"""
|
|
|
+
|
|
|
if len(workingChannels) < 2:
|
|
|
+ #FIXME: This should not be an exception, but rather an expectation
|
|
|
+ #Error handling could be improved
|
|
|
raise ValueError('working_channels must have at least 2 channels; {}'.format(workingChannels))
|
|
|
|
|
|
if turnbyturn:
|
|
@@ -300,47 +395,107 @@ class DataSet(object):
|
|
|
#selector = [0,1,2,3,6,7] #currently not all chanels are working
|
|
|
selector = workingChannels
|
|
|
|
|
|
+
|
|
|
+ #FIXME: WHY are we using .train() to re-arrange our data to be in
|
|
|
+ # ADC-Bunches order, just to transpose it back into Bunch-ADC order, which
|
|
|
+ # is EXACTLY what the raw data was to begin with...?
|
|
|
array = self.train(adc=selector, frm=frm,to=to, calibrate=calibrate).T
|
|
|
array = array[:, np.array(selector)]
|
|
|
|
|
|
+ #At this point, 'array' is basically just a truncated version of the
|
|
|
+ #raw-date in 'self.array' without the bunch-numbers
|
|
|
+
|
|
|
+
|
|
|
if isinstance(adc, list):
|
|
|
adc = adc[0]
|
|
|
|
|
|
finedelays = np.array(self.fineDelays())
|
|
|
- if finedelays is None:
|
|
|
+ if finedelays is None:
|
|
|
finedelays = np.array([0,0,0,0])
|
|
|
if self.adcNumber >4:
|
|
|
+ #FIXME: Highly dangerous! What happens if we will have more than
|
|
|
+ #8 ADCs in the future?
|
|
|
finedelays = np.repeat(finedelays, 2)
|
|
|
|
|
|
+
|
|
|
if calibrate:
|
|
|
for i in range(self.adcNumber):
|
|
|
finedelays[i] = (theCalibration.calibrateX(i, self.c330, self.c25, finedelays[i], self.c25b, self.calibId) - theCalibration.calibrateX(0, self.c330, self.c25, 0, self.c25b, self.calibId) )*1e12
|
|
|
else:
|
|
|
finedelays = finedelays*3
|
|
|
|
|
|
+
|
|
|
+
|
|
|
+ #FIXME: Why? What purpose does this serve?
|
|
|
+ #Why do we divide by 100!?
|
|
|
if self.datax is not None:
|
|
|
finedelays = self.datax - np.min(self.datax)
|
|
|
finedelays = finedelays/100.0
|
|
|
|
|
|
+
|
|
|
+
|
|
|
if not turnbyturn:
|
|
|
- a = np.array(np.reshape(np.repeat(np.arange(0, len(array)), len(selector)),-1),dtype=np.float)
|
|
|
+ #FIXME: How about we nest ALL the calls! Genious!! (NOT!!)
|
|
|
+ a = np.array(
|
|
|
+ np.reshape(
|
|
|
+ np.repeat(
|
|
|
+ np.arange(0, len(array)), len(selector)), -1), dtype=np.float)
|
|
|
+
|
|
|
+ #'array' should have numBunches many entries. So 't' should now be
|
|
|
+ # [0,1,2,3,4 ... NumBunches]
|
|
|
+ t = np.arange(0, len(array))
|
|
|
+
|
|
|
+ #Now 't' should be numADC*numBunches long and internally repeat the
|
|
|
+ #same each element of the previous array len(selector)-times in a row
|
|
|
+ #Think [0,0,0,0, 1,1,1,1, 2,2,2,2 ... len(array)], if len(selector)
|
|
|
+ #would be 4
|
|
|
+ t = np.repeat(t, len(selector))
|
|
|
+
|
|
|
+ #But ... t already IS 1-dimensional? Whay reshape it with -1 AGAIN?
|
|
|
+ t = np.reshape(t, -1)
|
|
|
+
|
|
|
+ #And finally, we turn t into a np.float type array...
|
|
|
+ t = np.array(t, dtype=np.float)
|
|
|
+
|
|
|
+
|
|
|
else:
|
|
|
- a = np.array(np.reshape(np.tile(np.repeat(np.arange(0, 184), len(selector)), len(array)//184),-1),dtype=np.float)
|
|
|
-
|
|
|
- b = np.reshape(np.repeat([finedelays[np.array(selector)]], len(array),0), -1)
|
|
|
- orig_xs = a+b
|
|
|
+ a = np.array(np.reshape(np.tile(np.repeat(np.arange(0, 184), len(selector)), len(array)//184),-1),dtype=np.float)
|
|
|
|
|
|
- # Remove bunch number and flatten array
|
|
|
- array = array.reshape((-1, 1))
|
|
|
+
|
|
|
+
|
|
|
+ b = np.reshape(np.repeat([finedelays[np.array(selector)]], len(array),0), -1)
|
|
|
+ orig_xs = a+b
|
|
|
+
|
|
|
+ # 'a' is a list of all the bunch-bumbers (basically X-Coordinates) which
|
|
|
+ # the individual datapoints from the ADCs belong to.
|
|
|
+ # 'b' is a list of all fine-delays for all the ADCs, repeated for numBunches times.
|
|
|
+ # This means 'a+b' is X-Coordinates with respect to the individual
|
|
|
+ # delays of the ADCs. The "calibrated" X-Coordinates, so to say
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ # Flatten array
|
|
|
array = array.flatten()
|
|
|
|
|
|
+ # Okay, so if I understand this correctly, 'array' should now be
|
|
|
+ # basically just one long string of values with all the the ADCs in
|
|
|
+ # sequence.
|
|
|
+ # Basically:
|
|
|
+ # [ADC 1 Bunch1, ADC 1 Bunch2, ADC 1 Bunch3 ... ADC 1 Bunch n,
|
|
|
+ # ADC 2 Bunch1, ADC 2 Bunch2, ADC 2 Bunch3 ... ADC 2 Bunch n,
|
|
|
+ # ...
|
|
|
+ # ADC M Bunch1, ADC M Bunch2, ADC M Bunch3 ... ADC M Bunch n]
|
|
|
+
|
|
|
+
|
|
|
if turnbyturn and mean:
|
|
|
array = np.mean(array.reshape((-1, 184, len(selector))),0)
|
|
|
orig_xs = np.mean(orig_xs.reshape((-1, 184, len(selector))),0)
|
|
|
-
|
|
|
+
|
|
|
array = array.reshape(-1)
|
|
|
orig_xs = orig_xs.reshape(-1)
|
|
|
|
|
|
+
|
|
|
#print(adc)
|
|
|
ret = [np.array([orig_xs, array])]
|
|
|
|
|
@@ -358,10 +513,21 @@ class DataSet(object):
|
|
|
adc = self.adcNumber
|
|
|
ret.append(np.array([orig_xs[adc::len(selector)], array[adc::len(selector)]]))
|
|
|
|
|
|
- #print(ret)
|
|
|
+
|
|
|
+ # Okay, so what comes out of this is an array with 2x2xnumBunches size.
|
|
|
+ # [0][0] contains 'orig_xs', telling which element belongs to which X level
|
|
|
+ # [0][1] contains the flattened ADC array, giving the Y levels for each element
|
|
|
+
|
|
|
+ # [1][0] and [1][1] are the same, but for only 1 specific ADC, so the
|
|
|
+ # plotting can color that one ADC differently. Which is completely
|
|
|
+ # nonesensical, because we could have just pulled that data out [0] either
|
|
|
+ # way...
|
|
|
+
|
|
|
+
|
|
|
return ret
|
|
|
|
|
|
|
|
|
+
|
|
|
def loadFromRawData(self, rawData):
|
|
|
print('loadfromrawdata')
|
|
|
#self.printData(rawData[:32+32])
|
|
@@ -388,7 +554,7 @@ class DataSet(object):
|
|
|
|
|
|
|
|
|
def loadFromFile(self, frm, to):
|
|
|
-
|
|
|
+
|
|
|
if self.noFile:
|
|
|
return False
|
|
|
|
|
@@ -403,7 +569,7 @@ class DataSet(object):
|
|
|
#print('loadfromfile')
|
|
|
#self.printdata(self.fp[:32+32])
|
|
|
#print('-------------------')
|
|
|
-
|
|
|
+
|
|
|
headerInfo = self.dataHasHeader(self.fp)
|
|
|
if True in headerInfo:
|
|
|
#logging.vinfo("Header detected.")
|
|
@@ -433,7 +599,7 @@ class DataSet(object):
|
|
|
if self.frm > frm or self.to < tto:
|
|
|
if self.type == RAW_FILE:
|
|
|
self.array = self.decodeData(self.fp[frm*self.adcNumber//2:tto*self.adcNumber//2])
|
|
|
-
|
|
|
+
|
|
|
self.frm = frm
|
|
|
self.to = tto
|
|
|
return True
|
|
@@ -442,13 +608,18 @@ class DataSet(object):
|
|
|
|
|
|
|
|
|
def decodeData(self, data):
|
|
|
+ #This method assumes any potential header data has already been removed
|
|
|
self.adcNumber = self.dataAdcCount(data)
|
|
|
try:
|
|
|
end = np.where(data==0xDEADDEAD)[0][0]
|
|
|
data = data[:end]
|
|
|
except Exception as e:
|
|
|
+ #FIXME: No error handling!?
|
|
|
+ #Is not finding a filling pattern actually an exception?
|
|
|
+ #Shouldn't this be EXPECTED?
|
|
|
#print('decode_data', e)
|
|
|
pass
|
|
|
+
|
|
|
#data = data[np.where(data != 0xDEADDEAD)] # This is the new filling
|
|
|
#print('len data', len(data))
|
|
|
# Make sure we read multiple of adcNumber
|
|
@@ -457,12 +628,18 @@ class DataSet(object):
|
|
|
|
|
|
bunch_low = data & 0xfff
|
|
|
bunch_high = np.right_shift(data, 12) & 0xfff
|
|
|
+
|
|
|
+ #FIXME: Same as with the adcNumber thing. Why does the mask use 12 bits,
|
|
|
+ # if we shift all but 8 bits to the right?
|
|
|
bunch_number = np.right_shift(data, 24) & 0xfff
|
|
|
|
|
|
+
|
|
|
bunch_low = bunch_low.reshape(-1, self.adcNumber)
|
|
|
bunch_high = bunch_high.reshape(-1, self.adcNumber)
|
|
|
|
|
|
if self.invert:
|
|
|
+ #FIXME: What's the logic behind this math...?
|
|
|
+ #Isn't this the same as 2x2048 - bunch ?
|
|
|
bunch_high = 2048 - (bunch_high-2048)
|
|
|
bunch_low = 2048 - (bunch_low-2048)
|
|
|
|
|
@@ -473,7 +650,7 @@ class DataSet(object):
|
|
|
result[1::2, self.adcNumber] = bunch_number[::self.adcNumber] + 1
|
|
|
|
|
|
#result = result[:int(self.bunchesPerTurn * (math.floor(result.shape[0] // self.bunchesPerTurn))), :]
|
|
|
-
|
|
|
+
|
|
|
if self.shiftFMC2:
|
|
|
if self.v: print('shift FMC2 by ', self.shiftFMC2)
|
|
|
#print('decode_data ', result.shape)
|
|
@@ -491,7 +668,7 @@ class DataSet(object):
|
|
|
else:
|
|
|
tmp.append(result[shift:,i])
|
|
|
|
|
|
-
|
|
|
+
|
|
|
result = np.array(tmp, dtype=np.uint16).T
|
|
|
|
|
|
result = result[:int(self.bunchesPerTurn * (math.floor(result.shape[0] // self.bunchesPerTurn))), :]
|
|
@@ -499,7 +676,18 @@ class DataSet(object):
|
|
|
#print(result)
|
|
|
return result
|
|
|
|
|
|
+
|
|
|
def dataAdcCount(self, data):
|
|
|
+ #FIXME: Shift-Right would also clone any sign-bits on the very far left
|
|
|
+ # end of the data repeatedly, when shifting to the right. This might
|
|
|
+ # produce incorrect results, since we shift all but 8 bits off the data,
|
|
|
+ # but then check for 12 bits (0xFFF), meaning we might catch up to 4
|
|
|
+ # 'cloned' sign bits in the mask
|
|
|
+ #Additionally, why do we even shift in the first place...?
|
|
|
+ #We are using a mask anyway, why not just make the mask use the correct
|
|
|
+ #bits...? And why do we apply the shift and the mask to the ENTIRE
|
|
|
+ #DATASET, when all we end up checking is just the first 10 entries...?
|
|
|
+
|
|
|
bunch_number = np.right_shift(data, 24) & 0xfff
|
|
|
ctr = 0
|
|
|
b0 = bunch_number[0]
|
|
@@ -511,6 +699,8 @@ class DataSet(object):
|
|
|
if ctr < 4:
|
|
|
ctr = 4
|
|
|
if self.v: print('ADC number ', ctr)
|
|
|
+ #FIXME: Where is the case with 8 ADCs?
|
|
|
+ #What about any other number of ADCs?
|
|
|
if ctr < 4:
|
|
|
ctr = 4
|
|
|
#return 8
|
|
@@ -531,6 +721,8 @@ class DataSet(object):
|
|
|
"""
|
|
|
Not supported for KAPTURE-2
|
|
|
"""
|
|
|
+ #FIXME: Good to know, but what prevents me from wrongly using it anyway?
|
|
|
+
|
|
|
global HEADER_SIZE_BYTES
|
|
|
if header_info[2] == True:
|
|
|
HEADER_SIZE_BYTES = 64
|
|
@@ -559,9 +751,9 @@ class DataSet(object):
|
|
|
parsed['delay_th'] = header[2] >> 8 & 0xf
|
|
|
parsed['delay_fpga'] = header[2] >> 12 & 0xf
|
|
|
|
|
|
- parsed['fine_delay_adc'] = np.array([header[1] & 0xff,
|
|
|
+ parsed['fine_delay_adc'] = np.array([header[1] & 0xff,
|
|
|
header[1] >> 8 & 0xff,
|
|
|
- header[1] >> 16 & 0xff,
|
|
|
+ header[1] >> 16 & 0xff,
|
|
|
header[1] >> 24 & 0xff])
|
|
|
|
|
|
assert header[0] >> 28 == 0xF, 'Highest 4 bits of field 0 is supposed to be 0xF'
|
|
@@ -569,6 +761,8 @@ class DataSet(object):
|
|
|
|
|
|
if verbose: print(parsed)
|
|
|
except Exception as e:
|
|
|
+ #FIXME: So... if the decoding from header breaks, we just return a
|
|
|
+ #broken 'parsed' object!?
|
|
|
pass
|
|
|
#traceback.print_exc()
|
|
|
return parsed
|
|
@@ -581,6 +775,7 @@ class DataSet(object):
|
|
|
if self.noFile:
|
|
|
return None
|
|
|
|
|
|
+ #FIXME: Why is the name of the Log File hardcoded!?
|
|
|
logFile = os.path.join(os.path.dirname(self.fileName), 'Measurement_board_6028.log')
|
|
|
if not os.path.isfile(logFile):
|
|
|
return None
|
|
@@ -598,6 +793,7 @@ class DataSet(object):
|
|
|
return None
|
|
|
|
|
|
|
|
|
+ #FIXME: Why is this split into one private and one public method?
|
|
|
def _readLogfile(self, file):
|
|
|
defaultKeys = ['Number of Turns', 'Number of Skipped Turns', 'T/H Delay', '25ps Delay', 'ADC 1 Delay', 'ADC 2 Delay', 'ADC 3 Delay', 'ADC 4 Delay', 'ADC Delays', "25ps Delay 2", "T/H Delay 2", 'Stage Step']
|
|
|
out = []
|
|
@@ -625,6 +821,7 @@ class DataSet(object):
|
|
|
return out
|
|
|
|
|
|
|
|
|
+
|
|
|
def getFromLog(self):
|
|
|
"""
|
|
|
used to get information from the Logfile.
|
|
@@ -635,7 +832,7 @@ class DataSet(object):
|
|
|
print('no Log found')
|
|
|
return False
|
|
|
|
|
|
- try:
|
|
|
+ try:
|
|
|
self.isAcquisition=False
|
|
|
try:
|
|
|
if "Acquisition" in self.log['0']:
|
|
@@ -643,7 +840,7 @@ class DataSet(object):
|
|
|
elif "Acquisition" in self.log['1']:
|
|
|
self.isAcquisition = True
|
|
|
except:
|
|
|
- pass
|
|
|
+ pass
|
|
|
try:
|
|
|
self.skippedTurns = int(self.log['Number of Skipped Turns'])
|
|
|
except:
|
|
@@ -651,11 +848,13 @@ class DataSet(object):
|
|
|
self.c330 = int(self.log['T/H Delay'])
|
|
|
self.c25 = int(self.log['25ps Delay'])
|
|
|
self.f = np.array([float(v) for v in self.log['ADC Delays'][1:-1].split(',')])
|
|
|
+
|
|
|
try:
|
|
|
self.datax = np.array([float(v) for v in self.log['datax'][1:-1].split(',')])
|
|
|
except:
|
|
|
self.datax = None
|
|
|
pass
|
|
|
+
|
|
|
self.adcNumber = len(self.f)
|
|
|
file = self.log['Filename'].split('_')[-1]
|
|
|
self.swap_adc = float(file[:-4]) < 1543859487
|
|
@@ -674,7 +873,7 @@ class DataSet(object):
|
|
|
self.workingChannels = np.array([float(v) for v in self.log['Working Channels'][1:-1].split(',')])
|
|
|
except:
|
|
|
self.workingChannels = None
|
|
|
-
|
|
|
+
|
|
|
try:
|
|
|
self.shiftFMC2 = int(self.log['shiftFMC2'])
|
|
|
except:
|