io.py 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. import os
  2. import math
  3. import logging
  4. import numpy as np
  5. from heb import DataSet, BUNCHES_PER_TURN, HEADER_SIZE_BYTES
  6. def is_data_consistent(dataset):
  7. bunch_numbers = dataset.array[:, -1]
  8. expected = np.tile(np.arange(0, BUNCHES_PER_TURN), bunch_numbers.shape[0] / BUNCHES_PER_TURN)
  9. wrong_indices = np.argwhere(bunch_numbers != expected)
  10. if wrong_indices.shape[0] > 0:
  11. filling = bunch_numbers[wrong_indices[0][0]:]
  12. expected_filling = np.tile([0xdead], filling.shape[0])
  13. wrong_filling_indices = np.argwhere(filling != expected_filling)
  14. if wrong_filling_indices.shape[0] > 2: # Some times filling does not start immediately... Why? I have NO IDEA!
  15. return False
  16. else:
  17. return True
  18. else:
  19. return True
  20. def _cached_exist(filename):
  21. return os.path.exists(os.path.abspath('{}.npy'.format(filename)))
  22. def decode_data(data):
  23. data = data[np.where(data != 0x01234567)]
  24. # Make sure we read multiple of fours
  25. data = data[:4 * (math.floor(data.size / 4))]
  26. bunch_low = data & 0xfff
  27. bunch_high = np.right_shift(data, 12) & 0xfff
  28. bunch_number = np.right_shift(data, 24) & 0xfff
  29. bunch_low = bunch_low.reshape(-1, 4)
  30. bunch_high = bunch_high.reshape(-1, 4)
  31. result = np.empty((bunch_low.shape[0] + bunch_high.shape[0], 5), dtype=np.float)
  32. result[0::2,:4] = bunch_low
  33. result[1::2,:4] = bunch_high
  34. result[0::2, 4] = bunch_number[::4]
  35. result[1::2, 4] = bunch_number[::4] + 1
  36. result = result[:184 * (math.floor(result.shape[0] / 184)), :]
  37. return result
  38. def read_from_file(filename, force=False, header=False, cache=False):
  39. if _cached_exist(filename) and not force:
  40. cached_filename = '{}.npy'.format(filename)
  41. logging.info("Read pre-computed data from {}".format(cached_filename))
  42. return DataSet(np.load(cached_filename), filename)
  43. with open(filename, 'rb') as f:
  44. logging.info("Read data from {}".format(filename))
  45. data = np.fromfile(f, dtype=np.uint32)
  46. #If header is sent with the data, truncate it
  47. if header:
  48. # We read words of 4 bytes each
  49. splice_words = HEADER_SIZE_BYTES / 4
  50. data = data[splice_words:]
  51. result = decode_data(data)
  52. dataset = DataSet(result, filename)
  53. if cache:
  54. logging.info('Saving pre-computed data')
  55. np.save('{}.npy'.format(filename), result)
  56. return dataset
  57. def read_from_string(raw_data, force=False, header=False, cache=False, cache_filename="_heb_data_cache"):
  58. if _cached_exist(cache_filename) and not force:
  59. cache_file = '{}.npy'.format(cache_filename)
  60. logging.info("Read pre-computed data from {}".format(cache_file))
  61. return DataSet(np.load(cache_file), cache_filename)
  62. logging.info("Read data directly from device.")
  63. logging.info("Read %i bytes of data" % len(raw_data))
  64. data = np.fromstring(raw_data, dtype=np.uint32)
  65. #If header is sent with the data, truncate it
  66. if header:
  67. # We read words of 4 bytes each
  68. splice_words = HEADER_SIZE_BYTES / 4
  69. data = data[splice_words:]
  70. result = decode_data(data)
  71. dataset = DataSet(result, "HEB Live Data")
  72. if cache:
  73. logging.info('Saving pre-computed data')
  74. np.save('{}.npy'.format(cache_filename), result)
  75. return dataset