def data_generator(data_file, index_list, batch_size=1, binary=True):
nb_subjects = len(index_list)
while True:
shuffle(index_list)
nb_batches = nb_subjects/batch_size
// TODO: Edge case? Currently this is handled by flooring the number of training/testing samples
After Change
y_list.append(data_file.root.truth[index])
if len(x_list) == batch_size:
x = np.asarray(x_list)
y = np.asarray(y_list)
x_list = list()
y_list = list()
if binary:
y[y > 0] = 1