-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathDecodeTensor.m
2340 lines (2065 loc) · 113 KB
/
DecodeTensor.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
classdef DecodeTensor < handle
methods(Static) %higher level functions, collecting data
function dispatch(dispatch_index, padded, distance_cutoff)
%Dispatching decoding as a function of number of cells using
%default options
if ~exist('padded', 'var')
padded = false;
end
if ~exist('distance_cutoff', 'var')
distance_cutoff = 0;
end
[source_path, mouse_name] = DecodeTensor.default_datasets(dispatch_index);
opt = DecodeTensor.default_opt;
if padded
opt.neural_data_type = 'FST_padded';
end
opt.restrict_cell_distance = distance_cutoff; %e.g. 15
DecodeTensor.decode_series(source_path, mouse_name, opt);
end
function dispatch_filt(dispatch_index, data_type)
%index is from 1 to 107
d = DecodeTensor.cons_filt(dispatch_index, true);
opt = DecodeTensor.default_opt;
if exist('data_type', 'var')
opt.neural_data_type = data_type;
end
DecodeTensor.decode_series(d{1}, d{2}, opt);
end
function dispatch_datasize_filt(dispatch_index, data_type)
%index is from 1 to 107
d = DecodeTensor.cons_filt(dispatch_index, true);
opt = DecodeTensor.default_opt;
if exist('data_type', 'var')
opt.neural_data_type = data_type;
end
DecodeTensor.decode_datasize_series(d{1}, d{2}, opt);
end
function decode_series(source_path, mouse_id, opt)
%%Decoding performance as a function of number of neurons
% in three settings: unshuffled, shuffled, and diagonal
% The shuffled setting is when the training and testing sets
% are both shuffled.
% The diagonal setting is when the learning model is
% insensitive to correlations (e.g. naive Bayes models, or any
% model where the training data is trial shuffled prior to
% learning)
% saves records into a file containing the following fields:
% Mouse (representing mouse ID)
% Setting (representing either 'unshuffled', 'shuffled', or
% 'diagonal')
% NumNeurons (number of neurons used for decoding)
% DataSize (number of trials [of each direction] used for
% decoding)
% MeanErrors (absolute mean error of decoding)
% MSE (Mean squared error of decoding)
%
% These records are later entered into a SQLite database
% they are not entered in this function to allow this function
% to run in parallel with itself.
%table_name = 'decoding';
%field_names = {'Mouse', 'SessionID', 'Setting', 'NumNeurons', 'DataSize', 'MinDist', 'MeanErrors', 'MSE', 'SampleID'};
%Load the data tensor and the direction of motion labels for
%each trial.
session_id = regexp(source_path, '_([0-9]|&)+-', 'match');
session_id = session_id{1}(2:end-1);
if opt.restrict_cell_distance == 0
[data_tensor, tr_dir] = DecodeTensor.tensor_loader(source_path, mouse_id, opt);
num_neurons = size(data_tensor, 1);
else
[data_tensor, tr_dir, cell_coords] = DecodeTensor.tensor_loader(source_path, mouse_id, opt);
%f_dmat = @(v) sqrt((v(:,1)-v(:,1)').^2 + (v(:,2)-v(:,2)').^ 2);
%d_mat = f_dmat(cell_coords);
%remainers = cell_distance_filter(d_mat, opt.restrict_cell_distance);
remainers = ising_distance_constraint(cell_coords, opt.restrict_cell_distance);
num_neurons = sum(remainers);
fprintf('restricting cell distance to %d px, %d / %d cells remain\n', ...
opt.restrict_cell_distance, num_neurons, length(remainers));
data_tensor = data_tensor(remainers,:,:);
end
%If the caller requests to limit the number of trials used for
%decoding then first check if there are enough trials
%available, otherwise use the maximal number of trials such
%that there are an equal number for each direction of motion.
if opt.restrict_trials > 0
num_trials = opt.restrict_trials;
max_trials = min(sum(tr_dir == 1), sum(tr_dir == -1));
if num_trials > max_trials
return;
end
else
num_trials = min(sum(tr_dir == 1), sum(tr_dir == -1));
end
%Load the SVM ensembles, the ordinary one, and the one that
%shuffles training data before learning to serve as the
%diagonal decoder.
%These are in the form of a struct which contains fields called
%'train' and 'test', to be used as follows:
% (if performing classification from X data to y labels
% model = alg.train(X, y)
% y_prediction = alg.test(model, X)
% (in actual usage training and testing data should be
% different)
alg = my_algs('ecoclin');
alg_diag = my_algs('ecoclin', 'shuf');
%The neuron ensemble sizes to decode from are defined to be
% [1, d, 2*d, 3*d, ..., n*d, MAX_NEURONS]
% or [1, d:d:MAX_NEURONS, MAX_NEURONS]
% where d represents opt.d_neurons, the increment in the number
% of neurons
neuron_series = opt.d_neurons:opt.d_neurons:num_neurons;
if neuron_series(1) ~= 1
neuron_series = [1 neuron_series];
end
if neuron_series(end) ~= num_neurons
neuron_series = [neuron_series num_neurons];
end
%Collecting records from unshuffled, shuffled & diagonal
%decoding. The records are saved and later inputted to a SQLite
%database.
db_queue = cell(numel(neuron_series),3);
for i = numel(neuron_series):-1:1
sample_id = randi(2^16);
n_neu = neuron_series(i);
err_res = DecodeTensor.decode_all(data_tensor, tr_dir, opt.bin_width, alg, n_neu, num_trials);
%[mean_err, MSE] = DecodeTensor.decode_tensor(data_tensor, tr_dir, opt.bin_width, alg, false,...
% n_neu, num_trials);
db_queue{i,1} = ...
{mouse_id, session_id, 'unshuffled', n_neu, num_trials, opt.restrict_cell_distance, err_res.mean_err.unshuffled, err_res.MSE.unshuffled, sample_id};
fprintf('n_neu=%d\tmean_err = %.2f\n', n_neu, err_res.mean_err.unshuffled);
%[mean_err_s, MSE_s] = DecodeTensor.decode_tensor(data_tensor, tr_dir, opt.bin_width, alg, true,...
% n_neu, num_trials);
db_queue{i,2} = ...
{mouse_id, session_id, 'shuffled', n_neu, num_trials, opt.restrict_cell_distance, err_res.mean_err.shuffled, err_res.MSE.shuffled, sample_id};
fprintf('n_neu=%d\tmean_err_s = %.2f\n', n_neu, err_res.mean_err.shuffled);
%[mean_err_d, MSE_d] = DecodeTensor.decode_tensor(data_tensor, tr_dir, opt.bin_width, alg_diag, false,...
% n_neu, num_trials);
db_queue{i,3} = ...
{mouse_id, session_id, 'diagonal', n_neu, num_trials, opt.restrict_cell_distance, err_res.mean_err.diagonal, err_res.MSE.diagonal, sample_id};
fprintf('n_neu=%d\tmean_err_d = %.2f\n\n', n_neu, err_res.mean_err.diagonal);
end
if ~exist('records', 'dir')
mkdir('records');
end
save(sprintf('records/decoding_record_%s.mat', timestring), 'db_queue');
end
function opt = default_opt
%%Default options to use for analysis
% opt.total_length = total length of the track (118cm)
% opt.cutoff_p = percentile at which the length of the track in
% pixels is determined. (5%ile)
% opt.samp_freq = frame sampling frequency (20Hz)
% opt.v_thresh = throw away frames slower than this speed
% (4cm/s)
% opt.n_bins = number of spatial bins to create (20)
% opt.d_neurons = number of neurons to skip when decoding as a
% function of size of ensemble (30)
% opt.restrict_trials = only use this many trials (for each
% direction of motion) (156 <-- this leaves 3 eligible
% mice)
% opt.neural_data_type = 'rawTraces' OR 'rawProbs' OR
% 'spikeDeconv' ('rawTraces' by default)
% opt.bin_width = width of a place bin in cm (by default this
% is total length / number of bins)
opt.total_length = 118; %cm
opt.cutoff_p = 5; %percentile
opt.samp_freq = 20; %Hz
opt.v_thresh = 4; %cm/s
opt.n_bins = 20; % was temporarily 50
opt.leeway_frac = 1/20;
opt.d_neurons = 10;%30;
opt.restrict_trials = -1;
opt.neural_data_type = 'events_transients';
opt.bin_width = opt.total_length/opt.n_bins;
%extra
opt.d_trials = 10;
opt.first_half = false;
opt.pad_seconds = -1;%0.8;
opt.discard_incomplete_trials = true;% was true
opt.restrict_cell_distance = 0;
opt.interactive = false;
end
function [source_path, mouse_name, session_id] = default_datasets(index, pref)
if ~exist('pref', 'var')
pref = DecodeTensor.linear_track_path;
end
%These are the paths to selected recorded sessions from 10 mice
source_paths = {...
fullfile(pref, 'Mouse2023/Mouse-2023-20150316-linear-track/Mouse-2023-20150316_091246-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2024/Mouse-2024-20150311-linear-track/Mouse-2024-20150311_073912-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2028/Mouse-2028-20150327-linear-track/Mouse-2028-20150327_105544-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2012/Mouse-2012-20141217-linear-track/Mouse-2012-20141217_104915-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2019/Mouse-2019-20150331-linear-track/Mouse-2019-20150331_125138-linear-track-TracesAndEvents.mat'),....
fullfile(pref, 'Mouse2022/Mouse-2022-20150326-linear-track/Mouse-2022-20150326_093722-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2026/Mouse-2026-20150305-linear-track/Mouse-2026-20150305_115921-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2021/Mouse-2021-20150326-linear-track/Mouse-2021-20150326_085536-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2025/Mouse-2025-20150313-linear-track/Mouse-2025-20150313_103035-linear-track-TracesAndEvents.mat'),...
fullfile(pref, 'Mouse2029/Mouse-2029-20150321-linear-track/Mouse-2029-20150321_122709-linear-track-TracesAndEvents.mat'),...
};
%These are the corresponding mouse ID's
mouse_names = {'Mouse2023', 'Mouse2024', 'Mouse2028', 'Mouse2012',...
'Mouse2019', 'Mouse2022', 'Mouse2026', 'Mouse2021', 'Mouse2025', 'Mouse2029'};
session_ids = {'091246', '073912', '105544', '104915', '125138', '093722', '115921', '085536', '103035', '122709'};
source_path = source_paths{index};
mouse_name = mouse_names{index};
session_id = session_ids{index};
end
function id = qid(indices)
for j = 1:numel(indices)
index = indices(j);
[~,~,id{j}] = DecodeTensor.default_datasets(index);
end
end
function [source_path, mouse_name] = default_datasets_OLD(index)
%These are the paths to selected recorded sessions from 8 mice
source_paths = {...
'../linear_track/Mouse2010/Mouse-2010-20141125-linear-track/Mouse-2010-20141125_000156-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2012/Mouse-2012-20150114-linear-track/Mouse-2012-20150114_140933-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2019/Mouse-2019-20150311-linear-track/Mouse-2019-20150311_101049-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2022/Mouse-2022-20150326-linear-track/Mouse-2022-20150326_093722-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2023/Mouse-2023-20150326-linear-track/Mouse-2023-20150326_101329-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2024/Mouse-2024-20150311-linear-track/Mouse-2024-20150311_073912-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2026/Mouse-2026-20150303-linear-track/Mouse-2026-20150303_095846-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2028/Mouse-2028-20150327-linear-track/Mouse-2028-20150327_105544-linear-track-TracesAndEvents.mat'...
'../linear_track/Mouse2025/Mouse-2025-20150303-linear-track/Mouse-2025-20150303_091715-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2011/Mouse-2011-20150209-linear-track/Mouse-2011-20150209_092502-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2029/Mouse-2029-20150311-linear-track/Mouse-2029-20150311_131945-linear-track-TracesAndEvents.mat',...
'../linear_track/Mouse2021/Mouse-2021-20150326-linear-track/Mouse-2021-20150326_085536-linear-track-TracesAndEvents.mat',...
};
%These are the corresponding mouse ID's
mouse_names = {'Mouse2010', 'Mouse2012', 'Mouse2019', 'Mouse2022',...
'Mouse2023', 'Mouse2024', 'Mouse2026', 'Mouse2028', 'Mouse2025', 'Mouse2011', 'Mouse2029', 'Mouse2021'};
source_path = source_paths{index};
mouse_name = mouse_names{index};
end
function [T, d, cell_coords] = tensor_loader(source_path, mouse_id, opt)
%%Load data tensor from file, from path and name of the mouse
% uses opt struct for options
% opt.neural_data_type = 'rawTraces' OR 'rawProbs' OR
% 'spikeDeconv'
% opt.n_bins = number of bins to use, e.g. 20
load(source_path);
[~,w_] = fileparts(source_path);
if strcmp(w_, 'Mouse-2026-20150329_115639-linear-track-TracesAndEvents')
% special_74_cut_350 = true;
% tracesEvents.position = tracesEvents.position(351:end,:);
% tracesEvents.velocity = tracesEvents.velocity(351:end,:);
% tracesEvents.rawProb = tracesEvents.rawProb(351:end,:);
% tracesEvents.rawTraces = tracesEvents.rawTraces(351:end,:);
% tracesEvents.zTraces = tracesEvents.zTraces(351:end,:);
% tracesEvents.zProb = tracesEvents.zProb(351:end,:);
% tracesEvents.tresholdEvents = tracesEvents.tresholdEvents(351:end,:);
% tracesEvents.spikeDeconvTrace = tracesEvents.spikeDeconvTrace(351:end,:);
% tracesEvents.spikeDeconv = tracesEvents.spikeDeconv(351:end,:);
% tracesEvents.spikeML = tracesEvents.spikeML(351:end,:);
F = fields(tracesEvents);
for f_i = 1:numel(F)
tracesEvents.(F{f_i}) = tracesEvents.(F{f_i})(351:end,:);
end
end
track_coord = tracesEvents.position(:,1);
if any(strcmp(opt.neural_data_type, {'FST_events', 'FST_filled', 'FST_padded', 'IED', 'WED', 'HD', 'HD_gamma'}))
fieldname = 'rawTraces';
elseif strcmp(opt.neural_data_type, 'binary_spikes')
fieldname = 'HD_spikes';
else
fieldname = opt.neural_data_type;
end
if isfield(tracesEvents, fieldname)
X = tracesEvents.(fieldname);
else
error('No such field %s. Options are rawTraces, rawProb, spikeDeconv, FST_events, FST_filled, FST_padded', fieldname);
end
%if strcmp(mouse_id, 'Mouse2022')
% track_coord = track_coord(91:end);
% X = X(91:end,:);
%end
if strcmp(opt.neural_data_type, 'FST_events')
X = Utils.event_detection(X);
end
if strcmp(opt.neural_data_type, 'FST_filled')
X = Utils.event_detection(X, true);
end
if strcmp(opt.neural_data_type, 'FST_padded')
X = Utils.event_detection(X);
if opt.pad_seconds < 0
opt.pad_seconds = 0.5;
end
X_bin_full_padded = conv2(X, ones(opt.pad_seconds*20,1), 'full');
X_bin_full_padded = X_bin_full_padded(1:size(X,1),:);
X = X_bin_full_padded;
end
if strcmp(opt.neural_data_type, 'IED')
X = iterative_event_detection(X);
end
if strcmp(opt.neural_data_type, 'WED')
if opt.pad_seconds < 0
opt.pad_seconds = 0.5;
end
if isfield(opt, 'WED_base')
fprintf('Using saved WED\n');
X_ = conv2(opt.WED_base, ones(round(opt.pad_seconds*20),1), 'full');
X_ = X_(1:size(X,1),:);
X = X_;
else
X = wavelet_event_detection(X, 'z_val', 1, 'Progress', true, 'fps', 20, 'OutType', 'onset', 'KernelWidth', max(1,round(opt.pad_seconds*20)));
end
end
if strcmp(opt.neural_data_type, 'HD') || strcmp(opt.neural_data_type, 'HD_gamma')
use_default_paddings = opt.pad_seconds < 0;
if strcmp(opt.neural_data_type, 'HD')
if use_default_paddings
opt.pad_seconds = 0.5;
end
transient_profile = ones(round(opt.pad_seconds*20),1);
else
if use_default_paddings
opt.pad_seconds = 1.5;
end
T = opt.pad_seconds;
Fs = 20;
gamma_sample_times = linspace(0,7.5,T * Fs + 1);
gamma_sample_times = gamma_sample_times(2:end);
gamma = @(t) t.*exp(1-t);
transient_profile = gamma(gamma_sample_times);
transient_profile = transient_profile(:);
end
if isfield(opt, 'WED_base')
fprintf('Using saved WED\n');
X_ = conv2(opt.WED_base, transient_profile, 'full');
X_ = X_(1:size(X,1),:);
X = X_;
else
X = hyperdetect(X, 'z_val', 3, 'Progress', false, 'OutType', 'onset');
X_ = conv2(X, transient_profile, 'full');
X_ = X_(1:size(X,1),:);
X = X_;
end
X = X ./ (std(X) + eps);
end
if strcmp(opt.neural_data_type, 'binary_spikes')
X = X ~= 0;
end
if opt.first_half
total_length = numel(track_coord);
half_length = floor(total_length/2);
track_coord = track_coord(1:half_length);
X = X(1:half_length,:);
end
[~, ~, tr_s, tr_e, tr_dir, tr_bins, ~] = DecodeTensor.new_sel(track_coord, opt);
data_tensor = DecodeTensor.construct_tensor(X, tr_bins, opt.n_bins, tr_s, tr_e);
T = data_tensor; d = tr_dir;
if nargout == 3
cell_coords = tracesEvents.cellAnatomicLocat;
assert(size(cell_coords,1) == size(T,1), 'mismatch between cell coord numbers and number of traces');
end
end
function results_table = adjacent_metrics(data_tensor, tr_dir, num_neurons, num_trials, no_decoding)
if ~exist('no_decoding', 'var')
no_decoding = false;
end
[data_tensor, tr_dir] = DecodeTensor.cut_tensor(data_tensor, tr_dir, num_neurons, num_trials);
[T1, d1, T2, d2, ~] = DecodeTensor.holdout_half(data_tensor, tr_dir);
T1s = DecodeTensor.shuffle_tensor(T1, d1);
T2s = DecodeTensor.shuffle_tensor(T2, d2);
[N, K, T] = size(T1);
results_table = cell2table(repmat({zeros(1,(K-1)*2)}, 3,6),...
'VariableNames', {'se', 'ce', 'ue', 'sr', 'cr', 'ur'},...
'RowNames', {'f', 'd', 'm'});
n_ize = @(x) x./(norm(x) + eps);
ndir = @(x, S) dot(x, S * x(:));
alg = my_algs('linsvm');
for k = 1:K-1
for d_i = 1:2
dirs = [-1 1];
direction = dirs(d_i);
X1_neg = squeeze(T1(:, k, d1==direction))';
X1_pos = squeeze(T1(:, k+1, d1==direction))';
X1 = [X1_neg ; X1_pos];
X1s_neg = squeeze(T1s(:, k, d1==direction))';
X1s_pos = squeeze(T1s(:, k+1, d1==direction))';
X1s = [X1s_neg ; X1s_pos];
ks1 = [-ones(sum(d1==direction),1) ; ones(sum(d1==direction),1)];
X2_neg = squeeze(T2(:, k, d2==direction))';
X2_pos = squeeze(T2(:, k+1, d2==direction))';
%X2 = [X2_neg ; X2_pos];
X2s_neg = squeeze(T2s(:, k, d2==direction))';
X2s_pos = squeeze(T2s(:, k+1, d2==direction))';
%X2s = [X2s_neg ; X2s_pos];
%ks2 = [-ones(sum(d2==direction),1) ; ones(sum(d2==direction),1)];
if ~no_decoding
model = alg.train(X1, ks1);
model_d = alg.train(X1s, ks1);
end
model_mu = mean(X1_pos, 'omitnan') - mean(X1_neg, 'omitnan');
if ~no_decoding
w_normed = n_ize(model.Beta);
wd_normed = n_ize(model_d.Beta);
else
w_normed = zeros(size(model_mu));
wd_normed = w_normed;
end
dmu_normed = n_ize(model_mu)';
for code_c = results_table.Properties.VariableNames
code = code_c{1};
switch code(2)
case 'r'
X_neg = X1_neg;
X_pos = X1_pos;
Xs_neg = X1s_neg;
Xs_pos = X1s_pos;
case 'e'
X_neg = X2_neg;
X_pos = X2_pos;
Xs_neg = X2s_neg;
Xs_pos = X2s_pos;
otherwise
error('only options are r and e for train and test');
end
%my_dmu = mean(X_pos) - mean(X_neg);
%my_sigma = (cov(X_pos) + cov(X_neg))/2;
%my_sigma_s = (cov(Xs_pos) + cov(Xs_neg))/2;
switch code(1)
case 's'
my_dmu = mean(X_pos, 'omitnan') - mean(X_neg, 'omitnan');
results_table{'f', code}(2*(k-1)+d_i) = dot(w_normed, my_dmu).^2;
results_table{'d', code}(2*(k-1)+d_i) = dot(wd_normed, my_dmu).^2;
results_table{'m', code}(2*(k-1)+d_i) = dot(dmu_normed, my_dmu).^2;
case 'c'
my_sigma = (cov(X_pos, 'omitrows') + cov(X_neg, 'omitrows'))/2;
results_table{'f', code}(2*(k-1)+d_i) = ndir(w_normed, my_sigma);
results_table{'d', code}(2*(k-1)+d_i) = ndir(wd_normed, my_sigma);
results_table{'m', code}(2*(k-1)+d_i) = ndir(dmu_normed, my_sigma);
case 'u'
my_sigma_s = (cov(Xs_pos, 'omitrows') + cov(Xs_neg, 'omitrows'))/2;
results_table{'f', code}(2*(k-1)+d_i) = ndir(w_normed, my_sigma_s);
results_table{'d', code}(2*(k-1)+d_i) = ndir(wd_normed, my_sigma_s);
results_table{'m', code}(2*(k-1)+d_i) = ndir(dmu_normed, my_sigma_s);
otherwise
error('only options are s,c, and u for signal, correlated noise, uncorrelated noise');
end
end
end
end
end
function [noise_var, noise_var_s, noise_var_d, m2, m2_s, m2_d, mean_dist2]...
= adjacent_decoder_noise(data_tensor, tr_dir, num_neurons, num_trials)
[data_tensor, tr_dir] = DecodeTensor.cut_tensor(data_tensor, tr_dir, num_neurons, num_trials);
[T1, d1, T2, d2, ~] = DecodeTensor.holdout_half(data_tensor, tr_dir);
T1s = DecodeTensor.shuffle_tensor(T1, d1);
T2s = DecodeTensor.shuffle_tensor(T2, d2);
[N, K, T] = size(T1);
[noise_var, noise_var_s, noise_var_d, m2, m2_s, m2_d, mean_dist2] = deal(zeros(K-1, 2));
%progressbar('Place', 'Direction', 'Setting', 'Division');
alg = my_algs('linsvm');
for k = 1:K-1
for d_i = 1:2
dirs = [-1 1];
direction = dirs(d_i);
X1_neg = squeeze(T1(:, k, d1==direction))';
X1_pos = squeeze(T1(:, k+1, d1==direction))';
X1 = [X1_neg ; X1_pos];
X1s_neg = squeeze(T1s(:, k, d1==direction))';
X1s_pos = squeeze(T1s(:, k+1, d1==direction))';
X1s = [X1s_neg ; X1s_pos];
ks1 = [-ones(sum(d1==direction),1) ; ones(sum(d1==direction),1)];
X2_neg = squeeze(T2(:, k, d2==direction))';
X2_pos = squeeze(T2(:, k+1, d2==direction))';
X2 = [X2_neg ; X2_pos];
X2s_neg = squeeze(T2s(:, k, d2==direction))';
X2s_pos = squeeze(T2s(:, k+1, d2==direction))';
X2s = [X2s_neg ; X2s_pos];
ks2 = [-ones(sum(d2==direction),1) ; ones(sum(d2==direction),1)];
model1 = alg.train(X1, ks1);
%progressbar([], [], [], 1/2);
model2 = alg.train(X2, ks2);
%progressbar([], [], [], 2/2);
%progressbar([], [], 1/2);
model1s = alg.train(X1s, ks1);
%progressbar([], [], [], 1/2);
model2s = alg.train(X2s, ks2);
%progressbar([], [], [], 2/2);
%progressbar([], [], 2/2);
beta1_normed = model1.Beta ./ (norm(model1.Beta) + eps);
beta2_normed = model2.Beta ./ (norm(model2.Beta) + eps);
beta1s_normed = model1s.Beta ./ (norm(model1s.Beta) + eps);
beta2s_normed = model2s.Beta ./ (norm(model2s.Beta) + eps);
sc2_neg = X2_neg * beta1_normed;
sc2_pos = X2_pos * beta1_normed;
sc1_neg = X1_neg * beta2_normed;
sc1_pos = X1_pos * beta2_normed;
sc2s_neg = X2s_neg * beta1s_normed;
sc2s_pos = X2s_pos * beta1s_normed;
sc1s_neg = X1s_neg * beta2s_normed;
sc1s_pos = X1s_pos * beta2s_normed;
sc2d_neg = X2_neg * beta1s_normed;
sc2d_pos = X2_pos * beta1s_normed;
sc1d_neg = X1_neg * beta2s_normed;
sc1d_pos = X1_pos * beta2s_normed;
%if any(isnan(sc2_neg(:))) || any(isnan(sc2_pos(:))) || any(isnan(sc1_neg(:))) || any(isnan(sc2_pos(:)))
% keyboard;
%end
noise_var(k, d_i) = ...
mean([var(sc2_neg) var(sc2_pos)...
var(sc1_neg) var(sc1_pos)]);
m2(k, d_i) = mean([(mean(sc2_neg) - mean(sc2_pos)).^2,...
(mean(sc1_neg) - mean(sc1_pos)).^2]);
noise_var_s(k, d_i) = ...
mean([var(sc2s_neg) var(sc2s_pos)...
var(sc1s_neg) var(sc1s_pos)]);
m2_s(k, d_i) = mean([(mean(sc2s_neg) - mean(sc2s_pos)).^2,...
(mean(sc1s_neg) - mean(sc1s_pos)).^2]);
noise_var_d(k, d_i) = ...
mean([var(sc2d_neg) var(sc2d_pos)...
var(sc1d_neg) var(sc1d_pos)]);
m2_d(k, d_i) = mean([(mean(sc2d_neg) - mean(sc2d_pos)).^2,...
(mean(sc1d_neg) - mean(sc1d_pos)).^2]);
mean_dist2(k, d_i) = norm(mean([X1_neg;X2_neg]) - mean([X1_pos;X2_pos])).^2;
%progressbar([], d_i/2);
end
%progressbar(k/(K-1));
end
end
function err_res = decode_all(data_tensor, tr_dir, binsize, alg, num_neurons, num_trials, no_dir_shuf)
if ~exist('no_dir_shuf', 'var')
no_dir_shuf = false;
end
[data_tensor, tr_dir] = DecodeTensor.cut_tensor(data_tensor, tr_dir, num_neurons, num_trials);
[T1, d1, T2, d2, ~] = DecodeTensor.holdout_half(data_tensor, tr_dir);
%T1s = DecodeTensor.shuffle_tensor(T1, d1);
%T2s = DecodeTensor.shuffle_tensor(T2, d2);
%no_dir_shuf = true;
if no_dir_shuf
d1 = d1.^2;
d2 = d2.^2;
end
[sup_X1, sup_ks1] = DecodeTensor.tensor2dataset(T1, d1);
[sup_X2, sup_ks2] = DecodeTensor.tensor2dataset(T2, d2);
%[sup_X1s, sup_ks1s] = DecodeTensor.tensor2dataset(T1s, d1);
sup_X1s = shuffle(sup_X1, sup_ks1); sup_ks1s = sup_ks1;
%[sup_X2s, sup_ks2s] = DecodeTensor.tensor2dataset(T2s, d2);
sup_X2s = shuffle(sup_X2, sup_ks2); sup_ks2s = sup_ks2;
mean_err_func = @(ks, ps) mean(abs(ceil(ks/2) - ceil(ps/2))) * binsize;
MSE_func = @(ks, ps) mean((ceil(ks/2) - ceil(ps/2)).^2) * binsize.^2;
model1 = alg.train(sup_X1, sup_ks1);
model2 = alg.train(sup_X2, sup_ks2);
model1s = alg.train(sup_X1s, sup_ks1s);
model2s = alg.train(sup_X2s, sup_ks2s);
sup_ps2 = alg.test(model1, sup_X2);
sup_ps2s = alg.test(model1s, sup_X2s);
sup_ps2d = alg.test(model1s, sup_X2);
sup_ps1 = alg.test(model2, sup_X1);
sup_ps1s = alg.test(model2s, sup_X1s);
sup_ps1d = alg.test(model2s, sup_X1);
mean_err2 = mean_err_func(sup_ks2, sup_ps2);
mean_err2s = mean_err_func(sup_ks2, sup_ps2s);
mean_err2d = mean_err_func(sup_ks2, sup_ps2d);
mean_err1 = mean_err_func(sup_ks1, sup_ps1);
mean_err1s = mean_err_func(sup_ks1, sup_ps1s);
mean_err1d = mean_err_func(sup_ks1, sup_ps1d);
MSE2 = MSE_func(sup_ks2, sup_ps2);
MSE2s = MSE_func(sup_ks2, sup_ps2s);
MSE2d = MSE_func(sup_ks2, sup_ps2d);
MSE1 = MSE_func(sup_ks1, sup_ps1);
MSE1s = MSE_func(sup_ks1, sup_ps1s);
MSE1d = MSE_func(sup_ks1, sup_ps1d);
err_res.mean_err.unshuffled = mean([mean_err1 mean_err2]);
err_res.mean_err.shuffled = mean([mean_err1s mean_err2s]);
err_res.mean_err.diagonal = mean([mean_err1d mean_err2d]);
err_res.MSE.unshuffled = mean([MSE1 MSE2]);
err_res.MSE.shuffled = mean([MSE1s MSE2s]);
err_res.MSE.diagonal = mean([MSE1d MSE2d]);
end
function [mean_err, MSE, ps, ks, model, stats] = decode_tensor(data_tensor, tr_dir,...
binsize, alg, shuf, num_neurons, num_trials, pls_dims)
%%Running decoding on data tensor given decoding algorithm,
% number of neurons, number of trials to use (per direction of
% motion), and whether or not to shuffle the data after
% splitting the tensor in half along the trial dimension.
% After splitting tensor in half, report errors as the average
% error of training on one half and testing on the other and
% vice versa. Also reporting the predicted bins (predicted
% based on learning a model from whichever half-tensor the
% predicted trial doesn't belong to.
%
%Source: DecodeTensor.decode_tensor : (data tensor, ...
% trial direction, size of one bin in cm, algorithm struct, ...
% number of neurons to use for decoding, ...
% number of trials [of each direction] to use for decoding) →
% (mean decoding error, mean squared decoding error, ...
% predicted bins, correct bins)
if ~exist('pls_dims', 'var')
pls_dims = -1;
stats = [];
elseif pls_dims < 0
stats = [];
end
%Cutting down the data to a requested number of neurons &
%number of trials
[data_tensor, tr_dir] = DecodeTensor.cut_tensor(data_tensor, tr_dir, num_neurons, num_trials);
[~, n_bins, ~] = size(data_tensor);
%Dividing the data in half (T1, T2) data tensors and (d1, d2)
%trial labels, labeling 1 for rightward trial and -1 for
%leftward trial.
[T1, d1, T2, d2, division] = DecodeTensor.holdout_half(data_tensor, tr_dir);
if shuf
T1 = DecodeTensor.shuffle_tensor(T1, d1);
T2 = DecodeTensor.shuffle_tensor(T2, d2);
end
%Converting tensors to data matrix for supervised learning
%problem
[sup_X1, sup_ks1] = DecodeTensor.tensor2dataset(T1, d1);%%%blorgg
[sup_X2, sup_ks2] = DecodeTensor.tensor2dataset(T2, d2);%%%blorgg
if pls_dims > 0
len1 = numel(sup_ks1); len2 = numel(sup_ks2);
X = [sup_X1 ; sup_X2]; ks = [sup_ks1 ; sup_ks2];
%ks_place = ceil(ks/2); ks_dir = mod(ks,2);
[XS, stats, origin] = Utils.pls_short(X, [ceil(ks/2), mod(ks,2)], pls_dims);
X_r = XS(:,1:pls_dims);
sup_X1 = X_r(1:len1,:);
sup_X2 = X_r(len1+1:len1+len2,:);
end
%Error measurement functions. @(k)ceil(k/2) throws away
%direction information.
mean_err_func = @(ks, ps) mean(abs(ceil(ks/2) - ceil(ps/2))) * binsize;
MSE_func = @(ks, ps) mean((ceil(ks/2) - ceil(ps/2)).^2) * binsize.^2;
%Train on first half
model = alg.train(sup_X1, sup_ks1);
%Test on second half
sup_ps2 = alg.test(model, sup_X2);
%Errors on second half
mean_err2 = mean_err_func(sup_ks2, sup_ps2);
MSE2 = MSE_func(sup_ks2, sup_ps2);
%Train on second half
model = alg.train(sup_X2, sup_ks2);
%Test on first half
sup_ps1 = alg.test(model, sup_X1);
%Errors on first half
mean_err1 = mean_err_func(sup_ks1, sup_ps1);
MSE1 = MSE_func(sup_ks1, sup_ps1);
%Average first and second half errors
mean_err = mean([mean_err1 mean_err2]);
MSE = mean([MSE1 MSE2]);
%ps( repmat(division,1,n_bins)) = sup_ps1;
ps( reshape(repmat(division, n_bins, 1), [], 1)) = sup_ps1;
%ps(~repmat(division,1,n_bins)) = sup_ps2;
ps(~reshape(repmat(division, n_bins, 1), [], 1)) = sup_ps2;
%ks( repmat(division,1,n_bins)) = sup_ks1;
ks( reshape(repmat(division, n_bins, 1), [], 1)) = sup_ks1;
%ks(~repmat(division,1,n_bins)) = sup_ks2;
ks(~reshape(repmat(division, n_bins, 1), [], 1)) = sup_ks2;
if shuf
data_tensor = DecodeTensor.shuffle_tensor(data_tensor, tr_dir);
end
[tot_X, tot_ks] = DecodeTensor.tensor2dataset(data_tensor, tr_dir);
assert(isequal(ks(:), tot_ks(:)));%%Sanity check
if pls_dims > 0
%len1 = numel(sup_ks1); len2 = numel(sup_ks2);
%X = [sup_X1 ; sup_X2]; ks = [sup_ks1 ; sup_ks2];
%ks_place = ceil(ks/2); ks_dir = mod(ks,2);
[XS, stats, origin] = Utils.pls_short(tot_X, [ceil(tot_ks/2), mod(tot_ks,2)], pls_dims);
X_r = XS(:,1:pls_dims);
tot_X = X_r;
%sup_X1 = X_r(1:len1,:);
%sup_X2 = X_r(len1+1:len1+len2,:);
end
if nargout > 4
model = alg.train(tot_X, tot_ks);
end
end
end
methods(Static) %functions involving decoding pipeline decisions
function [cpp, vel, trial_start, trial_end,...
trial_direction, track_bins, track_dir_bins] = new_sel_ends(XY, opt)
opt.total_length = 120;
opt.ends = 3.5;
pix_coord = XY(:,1);
pix_bottom = prctile(pix_coord, opt.cutoff_p);
pix_top = prctile(pix_coord, 100-opt.cutoff_p);
cm_coord = (pix_coord - pix_bottom)./(pix_top - pix_bottom) .* opt.total_length;
cpp = opt.total_length ./ (pix_top - pix_bottom);
vel = [0; diff(cm_coord)] .* opt.samp_freq;
mid_start = opt.ends;
mid_end = opt.total_length - opt.ends;
track_bins = ceil(opt.n_bins.*(cm_coord - mid_start)./(mid_end - mid_start));
track_bins(track_bins < 1) = 0;
track_bins(track_bins > opt.n_bins) = opt.n_bins + 1;
track_dir_bins = -(sign(vel)==1) + 2.*track_bins;
trial_candidates = (cm_coord > mid_start) & (cm_coord < mid_end);
tc_diff = diff(trial_candidates);
trial_start = find(tc_diff > 0);
trial_end = find(tc_diff < 0);
n_tc = numel(trial_end);
trial_start = trial_start(1:n_tc);
trial_direction = zeros(1,n_tc);
has_all_bins = false(1,n_tc);
for i = 1:n_tc
if all(vel(trial_start(i):trial_end(i)) > opt.v_thresh)
trial_direction(i) = 1;
elseif all(vel(trial_start(i):trial_end(i)) < -opt.v_thresh)
trial_direction(i) = -1;
end
tr_bins = track_bins(trial_start(i):trial_end(i));
has_all_bins(i) = all(sum(tr_bins(:) == (1:opt.n_bins))~=0);
end
fprintf('Just velocity: %d, thrown out due to not all bins: %d\n', sum(trial_direction~=0), sum((trial_direction~=0)&~has_all_bins));
vel_filt = (trial_direction ~= 0) & has_all_bins;
trial_start = trial_start(vel_filt);
trial_end = trial_end(vel_filt);
trial_direction = trial_direction(vel_filt);
end
function [cpp, vel, trial_start, trial_end,...
trial_direction, track_bins, track_dir_bins] = new_sel(XY, opt)
%%Selecting trials:
% The length of the track that is accessible to the mouse is
% taken to be 118cm.
%
% The frame sampling frequency is taken to be 20Hz.
%
% The relationship between camera tracking pixels and cm on
% the track is determined such that 118cm corresponds to
% the range of the track coordinate in pixels
% (difference between 95%ile and 5%ile instead
% of absolute range is used). The cm/pix value is ~0.18 cm/pix.
%
% Velocity is calculated by prepending 0 to the diff of
% the track coordinate, then multiplying by the cm/pix
% conversion factor and by the frame sampling frequency.
%
% Trials are defined as contiguous durations where the speed
% is greater than a threshold value (4cm/s). Trials that do
% not satisfy the condition that they start at the bottom 5%
% of the track and end at the top 5% of the track
% (or vice versa) are removed. This leeway percentage must be
% equal to the inverse of the number of bins (20). The full
% track length is defined as the range between the 5%ile and
% 95%ile of the track coordinate.
%
%
%%Dividing into bins:
% The part of the track within 5%ile and 95%ile of the track
% coordinate is divided into 20 equally spaced bins.
% Each sample is assigned a bin based on its track coordinate
% and direction of motion. There are two sets of bins, one for
% each direction of motion. Both sets of bins are interleaved
% (rightward bins assigned odd numbers,
% leftward bins assigned even numbers) from 1 to 40.
% Track values that are beyond 5%ile and 95%ile are clipped
% to be within the range for the purpose
% of calculating bin numbers.
%
% Finally discard all trials that do not contain at least one
% sample from each bin. This can occur during fast motion, or
% if the number of place bins chosen is too high.
%
% opt is the struct of options
% opt.total_length = total length of the track (118cm)
% opt.cutoff_p = percentile at which the length of the track in
% pixels is determined. (5%ile)
% opt.samp_freq = frame sampling frequency (20Hz)
% opt.v_thresh = throw away frames slower than this speed
% (4cm/s)
% opt.n_bins = number of spatial bins to create (20)
%
% Source: DecodeTensor.new_sel : (track coordinate, opt) →
% (cm per pixel, velocity, trial starting frames, ...
% trial ending frames, trial direction, 1:n_bins bin trace, ...
% 1:2*n_bins bin/direction trace)
total_length = opt.total_length; %cm
cutoff_p = opt.cutoff_p; %percentile
samp_freq = opt.samp_freq; %Hz
v_thresh = opt.v_thresh; %cm/s
n_bins = opt.n_bins;
%leeway_frac = 1/n_bins;
leeway_frac = opt.leeway_frac;
track_coord = XY(:,1);
%define the track distance in pixels to be between 5 and 95
%percentiles
track_range = (prctile(track_coord, 100-cutoff_p) -...
prctile(track_coord, cutoff_p));
%identify centimeters per pixel
cpp = total_length / track_range;
%calculation of velocity in cm/s
vel = [0; diff(track_coord)] .* cpp .* samp_freq;
if opt.interactive
figure;
time_coord = (1:numel(track_coord))./samp_freq;
position_coord = cpp.*(track_coord - prctile(track_coord, cutoff_p));
lgn(1) = plot(time_coord, position_coord, '-ok');
l_ = refline(0, 0); l_.Color = 'k'; l_.LineStyle = ':';
l_ = refline(0, total_length); l_.Color = 'k'; l_.LineStyle = ':';
ylim([0 total_length]);
xlim([1300 1500]);
xlabel 'Time (s)'
ylabel 'Position (cm)'
%pause;
end
%select only frames above the threshold velocity (4cm/s by
%default)
fast_frames = abs(vel) > v_thresh;
%define trial start and end times as contiguous fast frames
trial_start = find(diff(fast_frames) == 1);
trial_end = find(diff(fast_frames) == -1);
trial_start = trial_start(1:numel(trial_end));
if opt.interactive
hold on;
position_coord_non_trials_only = position_coord;
for tr_i = 1:numel(trial_start)
position_coord_non_trials_only(trial_start(tr_i):trial_end(tr_i)) = nan;
end
lgn(2) = plot(time_coord, position_coord_non_trials_only, '-or');
num_trials_total = numel(trial_start);
title(sprintf('Trials found: %d (>%.2f cm/s)', num_trials_total, v_thresh));
%pause;
end
%filter out the trials that don't start at one end of the
%track and end at the other end of the track
sc = track_coord(trial_start); ec = track_coord(trial_end);
b = prctile(track_coord, cutoff_p) + track_range*leeway_frac;
t = prctile(track_coord, 100-cutoff_p) - track_range*leeway_frac;
regular_trial = ((sc < b) & (ec > t)) | ((ec < b) & (sc > t));
trial_start = trial_start(regular_trial);
trial_end = trial_end(regular_trial);
if opt.interactive
position_coord_irregular_trials_only = position_coord;
position_coord_irregular_trials_only(~isnan(position_coord_non_trials_only)) = nan;
for tr_i = 1:numel(trial_start)
position_coord_irregular_trials_only(trial_start(tr_i):trial_end(tr_i)) = nan;
end
lgn(3) = plot(time_coord, position_coord_irregular_trials_only, '-og');
num_trials_regular = numel(trial_start);
title(sprintf('Total trials: %d, Regular trials: %d', num_trials_total, num_trials_regular));
%title ''
for b_i = 1:n_bins
l_ = refline(0, total_length*b_i/n_bins); l_.Color = 'k'; l_.LineStyle = ':';
end
legend(lgn, 'Used trials (fast part starts & ends at edge bins)', 'Not trials', 'Irregular trials (fast part starts or ends at a non-edge bin)');
pause;
end
%determine the direction of motion for each trial
trial_direction((track_coord(trial_start) < b) & (track_coord(trial_end) > t)) = 1;
trial_direction((track_coord(trial_end) < b) & (track_coord(trial_start) > t)) = -1;
%function for converting pixel valued position to place bin index
binner = @(y, n_bins)...
ceil(n_bins.*(y - prctile(track_coord, cutoff_p))./track_range);
%add in direction information, rightward motion encoded as odd
%place-direction bin index, and leftward motion encoded as even
%place-direction bin index
add_in_direction = @(bins, vel) -(sign(vel)==1) + 2.*bins;
track_bins = binner(track_coord, n_bins);
%ensure that the bin index values are clamped to the proper
%bounds
track_bins(track_bins < 1) = 1;
track_bins(track_bins > n_bins) = n_bins;
track_dir_bins = add_in_direction(track_bins, vel);
%if a trial does not contain a sample from all bins, discard
%the trial
if opt.discard_incomplete_trials
keep_trial = true(size(trial_start));
for tr_i = 1:numel(trial_start)
bins_present = track_bins(trial_start(tr_i):trial_end(tr_i));
for b = 1:n_bins
if sum(bins_present == b) == 0
keep_trial(tr_i) = false;
end
end
end
trial_start = trial_start(keep_trial);
trial_end = trial_end(keep_trial);
trial_direction = trial_direction(keep_trial);
else
keep_trial = true(size(trial_start));
end
%fprintf('Total trials: %d\tThrowing away %d\tkeeping %d\n', numel(keep_trial), sum(~keep_trial), sum(keep_trial));
end
function [data_tensor, counts_mat] = construct_tensor(X, tr_bins, n_bins, tr_s, tr_e)
%%Constructing data tensor:
% The data is transformed into a tensor with three axes:
% neurons, bins, and trials. For each trial, all the samples
% with an associated bin are averaged and entered
% into the tensor. The neural traces are calculated from
% the filters produced by CellMax (“rawTraces�? field).
%
% Source: DecodeTensor.construct_tensor :
% (raw traces, 1:n_bins bin trace, n_bins, ...
% trial starting frames, trial ending frames) →
% (data tensor, counts matrix [how many samples were averaged])
[~, n_neurons] = size(X);
n_trials = length(tr_s);
data_tensor = zeros(n_neurons, n_bins, n_trials);
counts_mat = zeros(n_bins, n_trials);
for tr_i = 1:n_trials
trial_slice = tr_s(tr_i):tr_e(tr_i);
trial_data = X(trial_slice, :);
trial_bins = tr_bins(trial_slice);
for b = 1:n_bins
data_tensor(:, b, tr_i) =...
mean(trial_data(trial_bins == b,:),1);
counts_mat(b, tr_i) = size(trial_data(trial_bins == b,:),1);
end
end
%assert(all(counts_mat(:)~=0), 'some trial(s) have zero samples in a place bin, consider using fewer place bins');
end