io.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. import os
  2. import math
  3. import logging
  4. import numpy as np
  5. # from ...config import bunches_per_turn as BUNCHES_PER_TURN
  6. from ... import config
  7. from board import HEADER_SIZE_BYTES
  8. from dataset import DataSet
  9. import board
  10. BUNCHES_PER_TURN = config.bunches_per_turn
  11. def is_data_consistent(dataset):
  12. if len(dataset.array) == 0:
  13. return False
  14. bunch_numbers = dataset.array[:, -1]
  15. expected = np.tile(np.arange(0, BUNCHES_PER_TURN), bunch_numbers.shape[0] / BUNCHES_PER_TURN)
  16. wrong_indices = np.argwhere(bunch_numbers != expected)
  17. if wrong_indices.shape[0] > 0:
  18. first_error = bunch_numbers.shape[0] - wrong_indices.shape[0]
  19. logging.info('Data inconsistent at offset %i'%first_error)
  20. np.savetxt('wrongdump', dataset.array[first_error - 3: first_error + 3])
  21. filling = bunch_numbers[wrong_indices[0][0]:]
  22. expected_filling = np.tile([222, 223], filling.shape[0] / 2)
  23. wrong_filling_indices = np.argwhere(filling != expected_filling)
  24. if wrong_filling_indices.shape[0] > 2: # Some times filling does not start immediately... Why? I have NO IDEA!
  25. return False
  26. else:
  27. return True
  28. else:
  29. return True
  30. def _cached_exist(filename):
  31. return os.path.exists(os.path.abspath('{}.npy'.format(filename)))
  32. def decode_data(data):
  33. # data = data[np.where(data != 0x01234567)]
  34. data = data[np.where(data != 0xDEADDEAD)] # This is the new filling
  35. # Make sure we read multiple of fours
  36. data = data[:4 * (math.floor(data.size / 4))]
  37. bunch_low = data & 0xfff
  38. bunch_high = np.right_shift(data, 12) & 0xfff
  39. bunch_number = np.right_shift(data, 24) & 0xfff
  40. bunch_low = bunch_low.reshape(-1, 4)
  41. bunch_high = bunch_high.reshape(-1, 4)
  42. result = np.empty((bunch_low.shape[0] + bunch_high.shape[0], 5), dtype=np.uint16)
  43. result[0::2,:4] = bunch_low
  44. result[1::2,:4] = bunch_high
  45. result[0::2, 4] = bunch_number[::4]
  46. result[1::2, 4] = bunch_number[::4] + 1
  47. result = result[:184 * (math.floor(result.shape[0] / 184)), :]
  48. return result
  49. def data_has_header(data):
  50. possible_header = data[0:board.HEADER_SIZE_BYTES/4]
  51. back = possible_header[-1] & 0xF8888888 == 0xF8888888
  52. front = possible_header[0] & 0xF8888888 == 0xF8888888
  53. return (front, back)
  54. def get_num_of_skipped_turns(data, header_info):
  55. header = data[0:board.HEADER_SIZE_BYTES/4]
  56. if header_info[0]:
  57. return header[-1] & 0b00111111
  58. elif header_info[1]:
  59. return header[0] & 0b00111111
  60. def parse_header(data, header_info):
  61. """
  62. Parse the Header and return the values in a dictionary
  63. :param data: the data which contains a header
  64. :return: dictionary with header entries
  65. """
  66. dic = {"skipped_turns": get_num_of_skipped_turns(data, header_info)}
  67. return dic
  68. def read_from_file(filename, force=False, header=False, cache=False):
  69. """
  70. Read data from file
  71. :param filename: file to read
  72. :param force: force reread and do not take values from cache
  73. :param header: only for backwards compatibility
  74. :param cache: save cache
  75. :return: dataset
  76. """
  77. if _cached_exist(filename) and not force:
  78. cached_filename = '{}.npy'.format(filename)
  79. logging.vinfo("Read pre-computed data from {}".format(cached_filename))
  80. return DataSet(np.load(cached_filename), filename)
  81. with open(filename, 'rb') as f:
  82. logging.vinfo("Read data from {}".format(filename))
  83. data = np.fromfile(f, dtype=np.uint32)
  84. if len(data) == 0:
  85. logging.error("File with 0b read.")
  86. return DataSet(data, filename, header)
  87. # If header is sent with the data, truncate it
  88. header_info = data_has_header(data)
  89. if True in header_info:
  90. logging.vinfo("Header detected.")
  91. # We read words of 4 bytes each
  92. header = parse_header(data, header_info)
  93. splice_words = HEADER_SIZE_BYTES / 4
  94. data = data[splice_words:]
  95. else:
  96. logging.vinfo("No Header detected.")
  97. header = None
  98. result = decode_data(data)
  99. dataset = DataSet(result, filename, header)
  100. if cache:
  101. logging.vinfo('Saving pre-computed data')
  102. np.save('{}.npy'.format(filename), result)
  103. return dataset
  104. def read_from_string(raw_data, force=False, header=False, cache=False, cache_filename="_heb_data_cache"):
  105. if _cached_exist(cache_filename) and not force:
  106. cache_file = '{}.npy'.format(cache_filename)
  107. logging.vinfo("Read pre-computed data from {}".format(cache_file))
  108. return DataSet(np.load(cache_file), cache_filename)
  109. logging.vinfo("Read data directly from device.")
  110. logging.vinfo("Read %i bytes of data" % len(raw_data))
  111. data = np.fromstring(raw_data, dtype=np.uint32)
  112. #If header is sent with the data, truncate it
  113. header_info = data_has_header(data)
  114. if True in header_info:
  115. # We read words of 4 bytes each
  116. header = parse_header(data, header_info)
  117. splice_words = HEADER_SIZE_BYTES / 4
  118. data = data[splice_words:]
  119. else:
  120. header = None
  121. result = decode_data(data)
  122. dataset = DataSet(result, "HEB Live Data", header)
  123. if cache:
  124. logging.vinfo('Saving pre-computed data')
  125. np.save('{}.npy'.format(cache_filename), result)
  126. return dataset