dataset.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. import logging
  2. import time
  3. import numpy as np
  4. from numpy.polynomial.polynomial import polyval
  5. from ... import config
  6. def _pad_array(array):
  7. height, width = array.shape
  8. # Miriam uses floor hence the padding is actually a cutting. Will wait for
  9. # response if this is desired ...
  10. pwidth = 2**np.floor(np.log2(width))
  11. padded = np.zeros((height, pwidth))
  12. padded[:, :width] = array[:, :pwidth]
  13. return padded
  14. #try:
  15. #import reikna.cluda
  16. #import reikna.fft
  17. #_plans = {}
  18. #_in_buffers = {}
  19. #_out_buffers = {}
  20. #_api = reikna.cluda.ocl_api()
  21. #_thr = _api.Thread.create()
  22. #def _fft(array):
  23. #start = time.time()
  24. #padded = _pad_array(array).astype(np.complex64)
  25. #height, width = padded.shape
  26. #if width in _plans:
  27. #fft = _plans[width]
  28. #in_dev = _in_buffers[width]
  29. #out_dev = _out_buffers[width]
  30. #else:
  31. #fft = reikna.fft.FFT(padded, axes=(1,)).compile(_thr)
  32. #in_dev = _thr.to_device(padded)
  33. #out_dev = _thr.empty_like(in_dev)
  34. #_plans[width] = fft
  35. #_in_buffers[width] = in_dev
  36. #_out_buffers[width] = out_dev
  37. #fft(out_dev, in_dev)
  38. #logging.debug("GPU fft: {} s".format(time.time() - start))
  39. #return out_dev.get()[:, :width / 2 + 1]
  40. #logging.info("Using GPU based FFT!")
  41. #except ImportError:
  42. #logging.debug("Failed to import reikna package. Falling back to Numpy FFT.")
  43. def _fft(array):
  44. start = time.time()
  45. freqs = np.fft.rfft(_pad_array(array))
  46. logging.debug("np fft: {} s".format(time.time() - start))
  47. return freqs
  48. BUNCHES_PER_TURN = config.bunches_per_turn
  49. HEADER_SIZE_BYTES = 32
  50. class DataSet(object):
  51. def __init__(self, array, filename, header=None):
  52. self.filename = filename
  53. self.array = array
  54. self._heatmaps = {}
  55. self._ffts = {}
  56. self.header = header
  57. @property
  58. def skipped_turns(self):
  59. if self.header:
  60. return self.header['skipped_turns']
  61. else:
  62. return 1
  63. def bunch(self, number):
  64. return self.array[self.array[:, 4] == number]
  65. def num_bunches(self):
  66. return self.array.shape[0]
  67. def num_turns(self):
  68. return self.num_bunches() / BUNCHES_PER_TURN
  69. def heatmap(self, adc=1, frm=0, to=-1, bunch_frm=0, bunch_to=-1):
  70. if not 1 <= adc <= 4:
  71. raise ValueError('adc must be in [1,4]')
  72. if not adc in self._heatmaps:
  73. heatmap = self.array[:,adc-1].reshape(-1, BUNCHES_PER_TURN).transpose()
  74. self._heatmaps[adc] = heatmap
  75. return self._heatmaps[adc][bunch_frm:bunch_to, frm:to]
  76. def fft(self, adc=1, frm=0, to=-1, drop_first_bin=False):
  77. if not 1 <= adc <= 4:
  78. raise ValueError('adc must be in [1,4]')
  79. # if not adc in self._ffts:
  80. # heatmap = self.heatmap(adc, frm, to)
  81. # self._ffts[adc] = np.fft.fft2(heatmap, axes=[1])
  82. # return self._ffts[adc]
  83. heatmap = self.heatmap(adc, frm, to)
  84. if drop_first_bin:
  85. return _fft(heatmap)[:, 1:]
  86. else:
  87. return _fft(heatmap)
  88. def fft_max_freq(self):
  89. return (self.num_turns() // 2 + 1) * self.fft_freq_dist()
  90. def fft_freq_dist(self):
  91. return 1.0/(self.num_turns() * (self.skipped_turns + 1) * config.tRev)
  92. def train(self, adc=1, frm=0, to=-1, **kwargs):
  93. pdata = self.array[frm:to, :-1]
  94. return pdata[:,adc-1]
  95. def follow(self, adc=1, frm=0, to=-1, bunch=0, **kwargs):
  96. """Follow one bunch through time"""
  97. # pdata = self.array[frm:to, :-1]
  98. pdata = self.array[bunch::BUNCHES_PER_TURN, adc-1]
  99. pdata = pdata[frm:to]
  100. return pdata
  101. def combined(self, frm=0, to=-1, show_reconstructed=True):
  102. array = self.array[frm:to, :]
  103. N_s = 16
  104. N_p = array.shape[0]
  105. # Remove bunch number and flatten array
  106. array = array[:,:4].reshape((-1, 1))
  107. array = array.flatten()
  108. # plot original data
  109. orig_xs = np.arange(0, array.shape[0])
  110. # axis.plot(orig_xs, array, '.')
  111. ret = [np.array([orig_xs, array])]
  112. # 4 rows for each ADC
  113. ys = array.reshape((4, -1), order='F')
  114. # multi-fit for each column of 4 ADCs, unfortunately, xs' are always the
  115. # same, so we have to offset them later when computing the value
  116. xs = [1, 2, 3, 4]
  117. c = np.polynomial.polynomial.polyfit(xs, ys, 6)
  118. smooth_xs = np.linspace(1, 4, N_s)
  119. fitted_ys = polyval(smooth_xs, c, tensor=True)
  120. if True:
  121. # Output maxima
  122. ys = np.max(fitted_ys, axis=1)
  123. xs = 4 * ((np.argmax(fitted_ys, axis=1) + 1) / float(N_s)) + orig_xs[::4] - .5
  124. # axis.plot(xs, ys, '.', color="red")
  125. ret.append(np.array([xs, ys]))
  126. if False:
  127. # Debug output
  128. ys = fitted_ys.reshape((-1, 1))
  129. ys = ys.flatten()
  130. xs = np.repeat(np.arange(0, 4 * N_p, 4), N_s) + np.tile(np.linspace(0, 3, N_s), N_p)
  131. # axis.plot(xs, ys, '.', alpha=0.3)
  132. ret.append(np.array([xs, ys]))
  133. return ret