123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155 |
- import os
- import math
- import logging
- import numpy as np
- # from ...config import bunches_per_turn as BUNCHES_PER_TURN
- from ... import config
- from board import HEADER_SIZE_BYTES
- from dataset import DataSet
- import board
- BUNCHES_PER_TURN = config.bunches_per_turn
- def is_data_consistent(dataset):
- if len(dataset.array) == 0:
- return False
- bunch_numbers = dataset.array[:, -1]
- expected = np.tile(np.arange(0, BUNCHES_PER_TURN), bunch_numbers.shape[0] / BUNCHES_PER_TURN)
- wrong_indices = np.argwhere(bunch_numbers != expected)
- if wrong_indices.shape[0] > 0:
- first_error = bunch_numbers.shape[0] - wrong_indices.shape[0]
- logging.info('Data inconsistent at offset %i'%first_error)
- np.savetxt('wrongdump', dataset.array[first_error - 3: first_error + 3])
- filling = bunch_numbers[wrong_indices[0][0]:]
- expected_filling = np.tile([222, 223], filling.shape[0] / 2)
- wrong_filling_indices = np.argwhere(filling != expected_filling)
- if wrong_filling_indices.shape[0] > 2: # Some times filling does not start immediately... Why? I have NO IDEA!
- return False
- else:
- return True
- else:
- return True
- def _cached_exist(filename):
- return os.path.exists(os.path.abspath('{}.npy'.format(filename)))
- def decode_data(data):
- # data = data[np.where(data != 0x01234567)]
- data = data[np.where(data != 0xDEADDEAD)] # This is the new filling
- # Make sure we read multiple of fours
- data = data[:int(4 * (math.floor(data.size / 4)))]
- bunch_low = data & 0xfff
- bunch_high = np.right_shift(data, 12) & 0xfff
- bunch_number = np.right_shift(data, 24) & 0xfff
- bunch_low = bunch_low.reshape(-1, 4)
- bunch_high = bunch_high.reshape(-1, 4)
- result = np.empty((bunch_low.shape[0] + bunch_high.shape[0], 5), dtype=np.uint16)
- result[0::2,:4] = bunch_low
- result[1::2,:4] = bunch_high
- result[0::2, 4] = bunch_number[::4]
- result[1::2, 4] = bunch_number[::4] + 1
- result = result[:int(BUNCHES_PER_TURN * (math.floor(result.shape[0] / BUNCHES_PER_TURN))), :]
- return result
- def data_has_header(data):
- possible_header = data[0:board.HEADER_SIZE_BYTES/4]
- back = possible_header[-1] & 0xF8888888 == 0xF8888888
- front = possible_header[0] & 0xF8888888 == 0xF8888888
- return (front, back)
- def get_num_of_skipped_turns(data, header_info):
- header = data[0:board.HEADER_SIZE_BYTES/4]
- if header_info[0]:
- return header[-1] & 0b00111111
- elif header_info[1]:
- return header[0] & 0b00111111
- def parse_header(data, header_info):
- """
- Parse the Header and return the values in a dictionary
- :param data: the data which contains a header
- :return: dictionary with header entries
- """
- dic = {"skipped_turns": get_num_of_skipped_turns(data, header_info)}
- return dic
- def read_from_file(filename, force=False, header=False, cache=False):
- """
- Read data from file
- :param filename: file to read
- :param force: force reread and do not take values from cache
- :param header: only for backwards compatibility
- :param cache: save cache
- :return: dataset
- """
- if _cached_exist(filename) and not force:
- cached_filename = '{}.npy'.format(filename)
- logging.vinfo("Read pre-computed data from {}".format(cached_filename))
- return DataSet(np.load(cached_filename), filename)
- with open(filename, 'rb') as f:
- logging.vinfo("Read data from {}".format(filename))
- data = np.fromfile(f, dtype=np.uint32)
- if len(data) == 0:
- logging.error("File with 0b read.")
- return DataSet(data, filename, header)
- # If header is sent with the data, truncate it
- header_info = data_has_header(data)
- if True in header_info:
- logging.vinfo("Header detected.")
- # We read words of 4 bytes each
- header = parse_header(data, header_info)
- splice_words = HEADER_SIZE_BYTES / 4
- data = data[splice_words:]
- else:
- logging.vinfo("No Header detected.")
- header = None
- result = decode_data(data)
- dataset = DataSet(result, filename, header)
- if cache:
- logging.vinfo('Saving pre-computed data')
- np.save('{}.npy'.format(filename), result)
- return dataset
- def read_from_string(raw_data, force=False, header=False, cache=False, cache_filename="_heb_data_cache"):
- if _cached_exist(cache_filename) and not force:
- cache_file = '{}.npy'.format(cache_filename)
- logging.vinfo("Read pre-computed data from {}".format(cache_file))
- return DataSet(np.load(cache_file), cache_filename)
- logging.vinfo("Read data directly from device.")
- logging.vinfo("Read %i bytes of data" % len(raw_data))
- data = np.fromstring(raw_data, dtype=np.uint32)
- #If header is sent with the data, truncate it
- header_info = data_has_header(data)
- if True in header_info:
- # We read words of 4 bytes each
- header = parse_header(data, header_info)
- splice_words = HEADER_SIZE_BYTES / 4
- data = data[splice_words:]
- else:
- header = None
- result = decode_data(data)
- dataset = DataSet(result, "HEB Live Data", header)
- if cache:
- logging.vinfo('Saving pre-computed data')
- np.save('{}.npy'.format(cache_filename), result)
- return dataset
|