init
This commit is contained in:
		
							
								
								
									
										47
									
								
								PDToolkit/@PDTrial/PDTrial.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								PDToolkit/@PDTrial/PDTrial.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,47 @@ | ||||
| classdef PDTrial | ||||
|     properties | ||||
|         session | ||||
|          | ||||
|         time | ||||
|         data | ||||
|          | ||||
|         trial_start | ||||
|         trial_end         | ||||
|          | ||||
|         baseline_onset | ||||
|         baseline_offset | ||||
|         baseline | ||||
|          | ||||
|         stimulus_onset | ||||
|         stimulus_offset | ||||
|                  | ||||
|         markers | ||||
|         labels | ||||
|          | ||||
|         quality | ||||
|         valid | ||||
|         type | ||||
|         stats %% q1, q2,q3, fit, etc. | ||||
|          | ||||
|         blink_count | ||||
|         missing_data_count | ||||
|         blinks | ||||
|         % required for plotting | ||||
|         settings | ||||
|          | ||||
|         % pupil deconvolution | ||||
|         deconvolution | ||||
|     end | ||||
|      | ||||
|     methods  | ||||
|         function[obj] = PDTrial(varargin) | ||||
|             data = struct('raw', [],  'interpolated', [],'filtered',[], 'logtransformed',[],'baseline',[]); | ||||
|             eye  = struct('uncorrected', data,'baseline_corrected', data); | ||||
|             obj.data = struct('left', eye,'right', eye); | ||||
|              | ||||
|         end | ||||
|     end | ||||
|      | ||||
| end | ||||
|  | ||||
|          | ||||
							
								
								
									
										66
									
								
								PDToolkit/@PDTrial/calculate_statistics.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								PDToolkit/@PDTrial/calculate_statistics.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,66 @@ | ||||
| function[trials] = calculate_statistics(trials) | ||||
|  | ||||
| item_struct = struct('time', [], 'value', []); | ||||
| stats_struct = struct('max', item_struct, ... | ||||
|     'mean', [], ... | ||||
|     'median', [], ... | ||||
|     'mode', [], ... | ||||
|     'std', [], ... | ||||
|     'q1', [],... | ||||
|     'q2', [],... | ||||
|     'q3', [],... | ||||
|     'auc', []); | ||||
| for t = 1:length(trials) | ||||
|     trial = trials(t); | ||||
|     eyes = fieldnames(trial.data); | ||||
|      | ||||
|     for e = 1:length(eyes) | ||||
|         try             | ||||
|             eye = eyes{e}; | ||||
|             trial.stats.(eye) = stats_struct; | ||||
|              | ||||
|             signal = trial.data.(eye).baseline_corrected.filtered; | ||||
|              | ||||
|             if isempty(signal) | ||||
|                 continue; | ||||
|             end | ||||
|              | ||||
|             % only relevant after stimulus onset | ||||
|             trial_after_so  = trial.data.(eye).baseline_corrected.filtered(find(trial.time > trial.stimulus_onset)); | ||||
|             trial_before_so = trial.data.(eye).baseline_corrected.filtered(find(trial.time <= trial.stimulus_onset)); | ||||
|              | ||||
|             % calculate stats from trial trace | ||||
|             max_after_so = max(trial_after_so); | ||||
|             max_ind = length(trial_before_so) + find(trial_after_so==max_after_so); | ||||
|             delta_t = trial.time(max_ind(1)) - trial.stimulus_onset; | ||||
|              | ||||
|             mean_after_so = mean(trial_after_so); | ||||
|             std_after_so =  std(trial_after_so); | ||||
|             mode_after_so = mode(trial_after_so); | ||||
|             median_after_so = median(trial_after_so); | ||||
|              | ||||
|             q1 = prctile(trial_after_so, 25); | ||||
|             q2 = median_after_so; | ||||
|             q3 = prctile(trial_after_so, 75); | ||||
|              | ||||
|             % store values in trial | ||||
|             trial.stats.(eye).max.value = max_after_so; | ||||
|             trial.stats.(eye).max.time = delta_t; | ||||
|             trial.stats.(eye).median = median_after_so; | ||||
|             trial.stats.(eye).mean = mean_after_so; | ||||
|             trial.stats.(eye).std = std_after_so; | ||||
|             trial.stats.(eye).mode = mode_after_so; | ||||
|             trial.stats.(eye).q1 = q1; | ||||
|             trial.stats.(eye).q2 = q2; | ||||
|             trial.stats.(eye).q3 = q3; | ||||
|             trial.stats.(eye).auc = trapz(trial_after_so); | ||||
|              | ||||
|         catch | ||||
|             warning('No statistics generated for trial'); | ||||
|         end | ||||
|     end | ||||
|     % store trial back in array | ||||
|     trials(t) = trial; | ||||
| end | ||||
|  | ||||
| end | ||||
							
								
								
									
										28
									
								
								PDToolkit/@PDTrial/correct_for_baseline.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								PDToolkit/@PDTrial/correct_for_baseline.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| function[obj] = correct_for_baseline(obj, settings) | ||||
| %% correction for baseline measure | ||||
| %  The actual baseline is calculated in getBaseline | ||||
| eyes = fieldnames(obj.data); | ||||
| for e = 1:length(eyes) | ||||
|     eye = eyes{e}; | ||||
|      | ||||
|     signals = fieldnames(obj.data.(eye).uncorrected); | ||||
|  | ||||
|     baseline = getBaseline(obj, settings); | ||||
|     obj.baseline = baseline; | ||||
|      | ||||
|     for i = 1:length(signals) | ||||
|      | ||||
|         signal = getfield(obj.data.(eye).uncorrected, signals{i}); | ||||
|  | ||||
|         if (~isnan(baseline)) | ||||
|             %% apply the correction | ||||
|             corrected_signal = (signal - baseline) / baseline;  | ||||
|         else | ||||
|             %% no correction is applied | ||||
|             corrected_signal = signal; | ||||
|         end | ||||
|      | ||||
|         obj.data.(eye).baseline_corrected = setfield(obj.data.(eye).baseline_corrected, signals{i}, corrected_signal); | ||||
|     end | ||||
| end | ||||
|  | ||||
							
								
								
									
										32
									
								
								PDToolkit/@PDTrial/deconvolve.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								PDToolkit/@PDTrial/deconvolve.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| function[obj] = deconvolve(obj, settings) | ||||
|  | ||||
| %% Perform pupil deconvolution based on the methods | ||||
| %  described in Wierda et al PNAS 2012 | ||||
|  | ||||
| o=optimset; | ||||
| o.MaxFunEvals = 10000; | ||||
| o.MaxIter = 10000; | ||||
|  | ||||
| fs = 50; % Hz downsampling | ||||
|  | ||||
| y = obj.data.baseline_corrected.interpolated; | ||||
|  | ||||
| t_orig = obj.time; | ||||
|  | ||||
| nt = round((t_orig(end) - t_orig(1)) / (1000/fs)); | ||||
| t_ds = linspace(t_orig(1), t_orig(end), nt); | ||||
| yq = interp1(t_orig, y, t_ds); | ||||
|  | ||||
|  | ||||
| obj.deconvolution.time  = t_ds; | ||||
| obj.deconvolution.input = yq; | ||||
|  | ||||
| init_params = [ 1 zeros(1,length(obj.labels)-1) ]; | ||||
| init_slope = (obj.deconvolution.input(end) -obj.deconvolution.input(1)) / length(t_ds); | ||||
|  | ||||
| %% perform search with initial params : slope = 0; pulse weight 1 | ||||
|  | ||||
| final_params = fminsearch(@obj.evaluate_model, [init_slope init_params], o); | ||||
|  | ||||
| obj.deconvolution.params = final_params; | ||||
| obj.deconvolution.output = obj.prf_convolve(obj.stick_model(final_params(2:end)),final_params(1)); | ||||
							
								
								
									
										2
									
								
								PDToolkit/@PDTrial/detect_blinks.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								PDToolkit/@PDTrial/detect_blinks.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,2 @@ | ||||
| function[obj] = remove_blinks(obj) | ||||
|  | ||||
							
								
								
									
										22
									
								
								PDToolkit/@PDTrial/display_and_log.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								PDToolkit/@PDTrial/display_and_log.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| function[obj] = display_and_log(obj, varargin) | ||||
|  | ||||
| if nargin > 2 | ||||
|     text = varargin{1}; | ||||
|     truncate = varargin{2}; | ||||
| else | ||||
|     if nargin == 2 | ||||
|         text = varargin{1}; | ||||
|         truncate=0; | ||||
|     end | ||||
| end | ||||
|  | ||||
| code = 'a+'; | ||||
| if truncate | ||||
|     code = 'w+'; | ||||
| end | ||||
|  | ||||
| display(sprintf(text)); | ||||
| fp = fopen('information.txt',code); | ||||
| text = [text '\n']; | ||||
| fprintf(fp,  text); | ||||
| fclose(fp); | ||||
							
								
								
									
										12
									
								
								PDToolkit/@PDTrial/evaluate_model.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								PDToolkit/@PDTrial/evaluate_model.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,12 @@ | ||||
| function[residual] = evaluate_model(obj, varargin) | ||||
|  | ||||
| y = obj.deconvolution.input'; | ||||
|  | ||||
| slope = varargin{1}(1); | ||||
| params = varargin{1}(2:end); | ||||
|  | ||||
| x = obj.prf_convolve(obj.stick_model(params),slope); | ||||
|  | ||||
| plot(y, 'k'); hold on; plot(x, 'r-');drawnow; hold off; | ||||
|  | ||||
| residual = sum((y - x).^2); | ||||
							
								
								
									
										67
									
								
								PDToolkit/@PDTrial/getBaseline.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								PDToolkit/@PDTrial/getBaseline.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,67 @@ | ||||
| function[baseline] = getBaseline(trial, settings) | ||||
| %% Based on baseline interval samples, this routine  | ||||
| %  determines a baseline value that can be subsequently | ||||
| %  used to correct the raw signal | ||||
| % | ||||
| %  In this routine, currently 4 methods are available: | ||||
| %  1) no correction | ||||
| %  2) mean value across baseline samples | ||||
| %  3) minimum value of all baseline samples | ||||
| %  4) mean value for the lowest n percent of baseline samples | ||||
| %  5) offset value for a fitted line of all baseline samples | ||||
|  | ||||
| baseline=NaN; | ||||
|  | ||||
| if isempty(trial.baseline_onset) | ||||
|     trial.baseline_onset = trial.trial_start; | ||||
| end | ||||
|  | ||||
| if isempty(trial.baseline_offset)  | ||||
|      | ||||
|     if isempty(trial.stimulus_offset) | ||||
|         warning('Stimulus onset has not been set! Bailing out...'); | ||||
|         return; | ||||
|     end | ||||
|      | ||||
|     trial.baseline_offset = trial.stimulus_onset; | ||||
| end | ||||
|  | ||||
| eyes = fieldnames(trial.data); | ||||
|  | ||||
| for e = 1:length(eyes) | ||||
|     eye = eyes{e}; | ||||
|     signal = trial.data.(eye).uncorrected.interpolated; | ||||
|      | ||||
|     if isempty(signal) | ||||
|         baseline_signal = NaN; | ||||
|     else | ||||
|         baseline_signal = trial.data.(eye).uncorrected.interpolated(intersect(find(trial.time >= trial.baseline_onset),... | ||||
|                                                                 find(trial.time < trial.baseline_offset))); | ||||
|     end | ||||
| end | ||||
|  | ||||
|  | ||||
| switch settings.BaselineCorrection | ||||
|      | ||||
|     case 1  | ||||
|         baseline = NaN; % No correction | ||||
|     case 2 % Mean value  | ||||
|         baseline = nanmean(baseline_signal); | ||||
|          | ||||
|     case 3 % Min-value between baseline | ||||
|         baseline = min(baseline_signal); | ||||
|          | ||||
|     case 4 % Percentile  | ||||
|           | ||||
|         percentage = settings.BaselineCorrectionPercentile; | ||||
|         perc_val   = prctile(baseline_signal, percentage); | ||||
|          | ||||
|         baseline = perc_val; %mean(baseline_signal(find(baseline_signal <= perc_val))); | ||||
|     case 5 % Offset (fitted line) | ||||
|         P = polyfit(1:length(baseline_signal), baseline_signal, 0); | ||||
|         baseline = P(1); | ||||
|     case 6 % Fixed point | ||||
|         baseline = baseline_signam | ||||
| end | ||||
| end | ||||
|  | ||||
							
								
								
									
										43
									
								
								PDToolkit/@PDTrial/getIndexForEvent.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								PDToolkit/@PDTrial/getIndexForEvent.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,43 @@ | ||||
| function[index] = getIndexForEvent(objs, event) | ||||
|  | ||||
| ton_codings  = {'trial onset','trial on', 'start trial', 'trial start', 'trial_start'}; | ||||
| toff_codings = {'trial offset','trial off', 'end trial', 'trial end', 'trial_end'}; | ||||
| son_codings  = {'stimulus onset','stimulus on', 'soa', 'stim on', 'stim_on', 'stimulus start', 'stim_start'}; | ||||
| soff_codings = {'stimulus offset','stimulus off', 'stim off', 'stim_off', 'stim end', 'stim_end'}; | ||||
| bon_codings    = {'baseline on', 'baseline start', 'baseline onset', 'bl on', 'bl'}; | ||||
| boff_codings    = {'baseline off', 'baseline end', 'baseline offset', 'bl off'}; | ||||
| all_codings = [ton_codings toff_codings son_codings soff_codings bon_codings boff_codings]; | ||||
|  | ||||
| for o = 1:length(objs) | ||||
|     obj = objs(o); | ||||
|     if ismember(event, son_codings) | ||||
|         time = obj.stimulus_onset; | ||||
|     end | ||||
|      | ||||
|     if ismember(event, soff_codings) | ||||
|         time = obj.stimulus_offset; | ||||
|     end | ||||
|      | ||||
|     if ismember(event, bon_codings) | ||||
|         time = obj.baseline_onset; | ||||
|     end | ||||
|      | ||||
|     if ismember(event, boff_codings) | ||||
|         time = obj.baseline_offset; | ||||
|     end | ||||
|      | ||||
|     if ~ismember(event, all_codings) | ||||
|         %% search for label | ||||
|         ind = regexp(event, [obj.labels.name]); | ||||
|         time = obj.labels(ind).time; | ||||
|     end | ||||
|      | ||||
|     pre_ind = find(obj.time <= time); | ||||
|     if isempty(pre_ind) | ||||
|         event_index = NaN; | ||||
|     else | ||||
|         event_index = pre_ind(end); | ||||
|     end | ||||
|      | ||||
|     index(o) = event_index; | ||||
| end | ||||
							
								
								
									
										45
									
								
								PDToolkit/@PDTrial/getTrialDataShiftedForMarker.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								PDToolkit/@PDTrial/getTrialDataShiftedForMarker.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,45 @@ | ||||
| function[time trace] = getTrialDataShiftedForMarker(trial, type, baseline_corrected, marker) | ||||
| %% get signal pivoted based on marker | ||||
|  | ||||
| time  = []; | ||||
| trace = []; | ||||
|  | ||||
| if baseline_corrected | ||||
|     signals = trial.data.baseline_corrected; | ||||
|     fields = fieldnames(signals); | ||||
| else | ||||
|     signals = trial.data.uncorrected; | ||||
|     fields = fieldnames(signals); | ||||
| end | ||||
|  | ||||
| if ismember(type, fields)     | ||||
|     trace = signals.(type);     | ||||
|     time  = trial.time; | ||||
| else     | ||||
|     error('Could not find signal type in trial data'); | ||||
| end | ||||
|  | ||||
| %% TODO add:    markers | ||||
| ton_codings  = {'trial onset','trial on', 'start trial', 'trial start', 'trial_start'}; | ||||
| toff_codings = {'trial offset','trial off', 'end trial', 'trial end', 'trial_end'}; | ||||
|  | ||||
| son_codings  = {'stimulus onset','stimulus on', 'soa', 'stim on', 'stim_on'}; | ||||
| soff_codings = {'stimulus offset','stimulus off', 'stim off', 'stim_off'}; | ||||
| bon_codings    = {'baseline on', 'baseline start', 'baseline onset', 'bl on', 'bl'}; | ||||
| boff_codings    = {'baseline off', 'baseline end', 'baseline offset', 'bl off'}; | ||||
|  | ||||
| possible_markers = {'trial_start','trial_end',... | ||||
|                     'baseline_onset','baseline_offset',... | ||||
|                     'stimulus_onset', 'stimulus_offset'}; | ||||
|  | ||||
| if ismember(marker, possible_markers)     | ||||
|     marker_timestamp = trial.(marker); | ||||
| else | ||||
|     error('Marker type is not defined for trial'); | ||||
| end | ||||
|  | ||||
| time = time - trial.(marker); | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										25816
									
								
								PDToolkit/@PDTrial/information.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25816
									
								
								PDToolkit/@PDTrial/information.txt
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										565
									
								
								PDToolkit/@PDTrial/inpaint_nans.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										565
									
								
								PDToolkit/@PDTrial/inpaint_nans.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,565 @@ | ||||
| function B=inpaint_nans(obj, A,method) | ||||
| % INPAINT_NANS: in-paints over nans in an array | ||||
| % usage: B=INPAINT_NANS(A)          % default method | ||||
| % usage: B=INPAINT_NANS(A,method)   % specify method used | ||||
| % | ||||
| % Solves approximation to one of several pdes to | ||||
| % interpolate and extrapolate holes in an array | ||||
| % | ||||
| % arguments (input): | ||||
| %   A - nxm array with some NaNs to be filled in | ||||
| % | ||||
| %   method - (OPTIONAL) scalar numeric flag - specifies | ||||
| %       which approach (or physical metaphor to use | ||||
| %       for the interpolation.) All methods are capable | ||||
| %       of extrapolation, some are better than others. | ||||
| %       There are also speed differences, as well as | ||||
| %       accuracy differences for smooth surfaces. | ||||
| % | ||||
| %       methods {0,1,2} use a simple plate metaphor. | ||||
| %       method  3 uses a better plate equation, | ||||
| %                 but may be much slower and uses | ||||
| %                 more memory. | ||||
| %       method  4 uses a spring metaphor. | ||||
| %       method  5 is an 8 neighbor average, with no | ||||
| %                 rationale behind it compared to the | ||||
| %                 other methods. I do not recommend | ||||
| %                 its use. | ||||
| % | ||||
| %       method == 0 --> (DEFAULT) see method 1, but | ||||
| %         this method does not build as large of a | ||||
| %         linear system in the case of only a few | ||||
| %         NaNs in a large array. | ||||
| %         Extrapolation behavior is linear. | ||||
| %          | ||||
| %       method == 1 --> simple approach, applies del^2 | ||||
| %         over the entire array, then drops those parts | ||||
| %         of the array which do not have any contact with | ||||
| %         NaNs. Uses a least squares approach, but it | ||||
| %         does not modify known values. | ||||
| %         In the case of small arrays, this method is | ||||
| %         quite fast as it does very little extra work. | ||||
| %         Extrapolation behavior is linear. | ||||
| %          | ||||
| %       method == 2 --> uses del^2, but solving a direct | ||||
| %         linear system of equations for nan elements. | ||||
| %         This method will be the fastest possible for | ||||
| %         large systems since it uses the sparsest | ||||
| %         possible system of equations. Not a least | ||||
| %         squares approach, so it may be least robust | ||||
| %         to noise on the boundaries of any holes. | ||||
| %         This method will also be least able to | ||||
| %         interpolate accurately for smooth surfaces. | ||||
| %         Extrapolation behavior is linear. | ||||
| % | ||||
| %         Note: method 2 has problems in 1-d, so this | ||||
| %         method is disabled for vector inputs. | ||||
| %          | ||||
| %       method == 3 --+ See method 0, but uses del^4 for | ||||
| %         the interpolating operator. This may result | ||||
| %         in more accurate interpolations, at some cost | ||||
| %         in speed. | ||||
| %          | ||||
| %       method == 4 --+ Uses a spring metaphor. Assumes | ||||
| %         springs (with a nominal length of zero) | ||||
| %         connect each node with every neighbor | ||||
| %         (horizontally, vertically and diagonally) | ||||
| %         Since each node tries to be like its neighbors, | ||||
| %         extrapolation is as a constant function where | ||||
| %         this is consistent with the neighboring nodes. | ||||
| % | ||||
| %       method == 5 --+ See method 2, but use an average | ||||
| %         of the 8 nearest neighbors to any element. | ||||
| %         This method is NOT recommended for use. | ||||
| % | ||||
| % | ||||
| % arguments (output): | ||||
| %   B - nxm array with NaNs replaced | ||||
| % | ||||
| % | ||||
| % Example: | ||||
| %  [x,y] = meshgrid(0:.01:1); | ||||
| %  z0 = exp(x+y); | ||||
| %  znan = z0; | ||||
| %  znan(20:50,40:70) = NaN; | ||||
| %  znan(30:90,5:10) = NaN; | ||||
| %  znan(70:75,40:90) = NaN; | ||||
| % | ||||
| %  z = inpaint_nans(znan); | ||||
| % | ||||
| % | ||||
| % See also: griddata, interp1 | ||||
| % | ||||
| % Author: John D'Errico | ||||
| % e-mail address: woodchips@rochester.rr.com | ||||
| % Release: 2 | ||||
| % Release date: 4/15/06 | ||||
|  | ||||
|  | ||||
| % I always need to know which elements are NaN, | ||||
| % and what size the array is for any method | ||||
| [n,m]=size(A); | ||||
| A=A(:); | ||||
| nm=n*m; | ||||
| k=isnan(A(:)); | ||||
|  | ||||
| % list the nodes which are known, and which will | ||||
| % be interpolated | ||||
| nan_list=find(k); | ||||
| known_list=find(~k); | ||||
|  | ||||
| % how many nans overall | ||||
| nan_count=length(nan_list); | ||||
|  | ||||
| % convert NaN indices to (r,c) form | ||||
| % nan_list==find(k) are the unrolled (linear) indices | ||||
| % (row,column) form | ||||
| [nr,nc]=ind2sub([n,m],nan_list); | ||||
|  | ||||
| % both forms of index in one array: | ||||
| % column 1 == unrolled index | ||||
| % column 2 == row index | ||||
| % column 3 == column index | ||||
| nan_list=[nan_list,nr,nc]; | ||||
|  | ||||
| % supply default method | ||||
| if (nargin<3) || isempty(method) | ||||
|   method = 0; | ||||
| elseif ~ismember(method,0:5) | ||||
|   error 'If supplied, method must be one of: {0,1,2,3,4,5}.' | ||||
| end | ||||
|  | ||||
| % for different methods | ||||
| switch method | ||||
|  case 0 | ||||
|   % The same as method == 1, except only work on those | ||||
|   % elements which are NaN, or at least touch a NaN. | ||||
|    | ||||
|   % is it 1-d or 2-d? | ||||
|   if (m == 1) || (n == 1) | ||||
|     % really a 1-d case | ||||
|     work_list = nan_list(:,1); | ||||
|     work_list = unique([work_list;work_list - 1;work_list + 1]); | ||||
|     work_list(work_list <= 1) = []; | ||||
|     work_list(work_list >= nm) = []; | ||||
|     nw = numel(work_list); | ||||
|      | ||||
|     u = (1:nw)'; | ||||
|     fda = sparse(repmat(u,1,3),bsxfun(@plus,work_list,-1:1), ... | ||||
|       repmat([1 -2 1],nw,1),nw,nm); | ||||
|   else | ||||
|     % a 2-d case | ||||
|      | ||||
|     % horizontal and vertical neighbors only | ||||
|     talks_to = [-1 0;0 -1;1 0;0 1]; | ||||
|     neighbors_list=identify_neighbors(n,m,nan_list,talks_to); | ||||
|      | ||||
|     % list of all nodes we have identified | ||||
|     all_list=[nan_list;neighbors_list]; | ||||
|      | ||||
|     % generate sparse array with second partials on row | ||||
|     % variable for each element in either list, but only | ||||
|     % for those nodes which have a row index > 1 or < n | ||||
|     L = find((all_list(:,2) > 1) & (all_list(:,2) < n)); | ||||
|     nl=length(L); | ||||
|     if nl>0 | ||||
|       fda=sparse(repmat(all_list(L,1),1,3), ... | ||||
|         repmat(all_list(L,1),1,3)+repmat([-1 0 1],nl,1), ... | ||||
|         repmat([1 -2 1],nl,1),nm,nm); | ||||
|     else | ||||
|       fda=spalloc(n*m,n*m,size(all_list,1)*5); | ||||
|     end | ||||
|      | ||||
|     % 2nd partials on column index | ||||
|     L = find((all_list(:,3) > 1) & (all_list(:,3) < m)); | ||||
|     nl=length(L); | ||||
|     if nl>0 | ||||
|       fda=fda+sparse(repmat(all_list(L,1),1,3), ... | ||||
|         repmat(all_list(L,1),1,3)+repmat([-n 0 n],nl,1), ... | ||||
|         repmat([1 -2 1],nl,1),nm,nm); | ||||
|     end | ||||
|   end | ||||
|    | ||||
|   % eliminate knowns | ||||
|   rhs=-fda(:,known_list)*A(known_list); | ||||
|   k=find(any(fda(:,nan_list(:,1)),2)); | ||||
|    | ||||
|   % and solve... | ||||
|   B=A; | ||||
|   B(nan_list(:,1))=fda(k,nan_list(:,1))\rhs(k); | ||||
|    | ||||
|  case 1 | ||||
|   % least squares approach with del^2. Build system | ||||
|   % for every array element as an unknown, and then | ||||
|   % eliminate those which are knowns. | ||||
|  | ||||
|   % Build sparse matrix approximating del^2 for | ||||
|   % every element in A. | ||||
|    | ||||
|   % is it 1-d or 2-d? | ||||
|   if (m == 1) || (n == 1) | ||||
|     % a 1-d case | ||||
|     u = (1:(nm-2))'; | ||||
|     fda = sparse(repmat(u,1,3),bsxfun(@plus,u,0:2), ... | ||||
|       repmat([1 -2 1],nm-2,1),nm-2,nm); | ||||
|   else | ||||
|     % a 2-d case | ||||
|      | ||||
|     % Compute finite difference for second partials | ||||
|     % on row variable first | ||||
|     [i,j]=ndgrid(2:(n-1),1:m); | ||||
|     ind=i(:)+(j(:)-1)*n; | ||||
|     np=(n-2)*m; | ||||
|     fda=sparse(repmat(ind,1,3),[ind-1,ind,ind+1], ... | ||||
|       repmat([1 -2 1],np,1),n*m,n*m); | ||||
|      | ||||
|     % now second partials on column variable | ||||
|     [i,j]=ndgrid(1:n,2:(m-1)); | ||||
|     ind=i(:)+(j(:)-1)*n; | ||||
|     np=n*(m-2); | ||||
|     fda=fda+sparse(repmat(ind,1,3),[ind-n,ind,ind+n], ... | ||||
|       repmat([1 -2 1],np,1),nm,nm); | ||||
|   end | ||||
|    | ||||
|   % eliminate knowns | ||||
|   rhs=-fda(:,known_list)*A(known_list); | ||||
|   k=find(any(fda(:,nan_list),2)); | ||||
|    | ||||
|   % and solve... | ||||
|   B=A; | ||||
|   B(nan_list(:,1))=fda(k,nan_list(:,1))\rhs(k); | ||||
|    | ||||
|  case 2 | ||||
|   % Direct solve for del^2 BVP across holes | ||||
|  | ||||
|   % generate sparse array with second partials on row | ||||
|   % variable for each nan element, only for those nodes | ||||
|   % which have a row index > 1 or < n | ||||
|    | ||||
|   % is it 1-d or 2-d? | ||||
|   if (m == 1) || (n == 1) | ||||
|     % really just a 1-d case | ||||
|     error('Method 2 has problems for vector input. Please use another method.') | ||||
|      | ||||
|   else | ||||
|     % a 2-d case | ||||
|     L = find((nan_list(:,2) > 1) & (nan_list(:,2) < n)); | ||||
|     nl=length(L); | ||||
|     if nl>0 | ||||
|       fda=sparse(repmat(nan_list(L,1),1,3), ... | ||||
|         repmat(nan_list(L,1),1,3)+repmat([-1 0 1],nl,1), ... | ||||
|         repmat([1 -2 1],nl,1),n*m,n*m); | ||||
|     else | ||||
|       fda=spalloc(n*m,n*m,size(nan_list,1)*5); | ||||
|     end | ||||
|      | ||||
|     % 2nd partials on column index | ||||
|     L = find((nan_list(:,3) > 1) & (nan_list(:,3) < m)); | ||||
|     nl=length(L); | ||||
|     if nl>0 | ||||
|       fda=fda+sparse(repmat(nan_list(L,1),1,3), ... | ||||
|         repmat(nan_list(L,1),1,3)+repmat([-n 0 n],nl,1), ... | ||||
|         repmat([1 -2 1],nl,1),n*m,n*m); | ||||
|     end | ||||
|      | ||||
|     % fix boundary conditions at extreme corners | ||||
|     % of the array in case there were nans there | ||||
|     if ismember(1,nan_list(:,1)) | ||||
|       fda(1,[1 2 n+1])=[-2 1 1]; | ||||
|     end | ||||
|     if ismember(n,nan_list(:,1)) | ||||
|       fda(n,[n, n-1,n+n])=[-2 1 1]; | ||||
|     end | ||||
|     if ismember(nm-n+1,nan_list(:,1)) | ||||
|       fda(nm-n+1,[nm-n+1,nm-n+2,nm-n])=[-2 1 1]; | ||||
|     end | ||||
|     if ismember(nm,nan_list(:,1)) | ||||
|       fda(nm,[nm,nm-1,nm-n])=[-2 1 1]; | ||||
|     end | ||||
|      | ||||
|     % eliminate knowns | ||||
|     rhs=-fda(:,known_list)*A(known_list); | ||||
|      | ||||
|     % and solve... | ||||
|     B=A; | ||||
|     k=nan_list(:,1); | ||||
|     B(k)=fda(k,k)\rhs(k); | ||||
|      | ||||
|   end | ||||
|    | ||||
|  case 3 | ||||
|   % The same as method == 0, except uses del^4 as the | ||||
|   % interpolating operator. | ||||
|    | ||||
|   % del^4 template of neighbors | ||||
|   talks_to = [-2 0;-1 -1;-1 0;-1 1;0 -2;0 -1; ... | ||||
|       0 1;0 2;1 -1;1 0;1 1;2 0]; | ||||
|   neighbors_list=identify_neighbors(n,m,nan_list,talks_to); | ||||
|    | ||||
|   % list of all nodes we have identified | ||||
|   all_list=[nan_list;neighbors_list]; | ||||
|    | ||||
|   % generate sparse array with del^4, but only | ||||
|   % for those nodes which have a row & column index | ||||
|   % >= 3 or <= n-2 | ||||
|   L = find( (all_list(:,2) >= 3) & ... | ||||
|             (all_list(:,2) <= (n-2)) & ... | ||||
|             (all_list(:,3) >= 3) & ... | ||||
|             (all_list(:,3) <= (m-2))); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     % do the entire template at once | ||||
|     fda=sparse(repmat(all_list(L,1),1,13), ... | ||||
|         repmat(all_list(L,1),1,13) + ... | ||||
|         repmat([-2*n,-n-1,-n,-n+1,-2,-1,0,1,2,n-1,n,n+1,2*n],nl,1), ... | ||||
|         repmat([1 2 -8 2 1 -8 20 -8 1 2 -8 2 1],nl,1),nm,nm); | ||||
|   else | ||||
|     fda=spalloc(n*m,n*m,size(all_list,1)*5); | ||||
|   end | ||||
|    | ||||
|   % on the boundaries, reduce the order around the edges | ||||
|   L = find((((all_list(:,2) == 2) | ... | ||||
|              (all_list(:,2) == (n-1))) & ... | ||||
|             (all_list(:,3) >= 2) & ... | ||||
|             (all_list(:,3) <= (m-1))) | ... | ||||
|            (((all_list(:,3) == 2) | ... | ||||
|              (all_list(:,3) == (m-1))) & ... | ||||
|             (all_list(:,2) >= 2) & ... | ||||
|             (all_list(:,2) <= (n-1)))); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(all_list(L,1),1,5), ... | ||||
|       repmat(all_list(L,1),1,5) + ... | ||||
|         repmat([-n,-1,0,+1,n],nl,1), ... | ||||
|       repmat([1 1 -4 1 1],nl,1),nm,nm); | ||||
|   end | ||||
|    | ||||
|   L = find( ((all_list(:,2) == 1) | ... | ||||
|              (all_list(:,2) == n)) & ... | ||||
|             (all_list(:,3) >= 2) & ... | ||||
|             (all_list(:,3) <= (m-1))); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(all_list(L,1),1,3), ... | ||||
|       repmat(all_list(L,1),1,3) + ... | ||||
|         repmat([-n,0,n],nl,1), ... | ||||
|       repmat([1 -2 1],nl,1),nm,nm); | ||||
|   end | ||||
|    | ||||
|   L = find( ((all_list(:,3) == 1) | ... | ||||
|              (all_list(:,3) == m)) & ... | ||||
|             (all_list(:,2) >= 2) & ... | ||||
|             (all_list(:,2) <= (n-1))); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(all_list(L,1),1,3), ... | ||||
|       repmat(all_list(L,1),1,3) + ... | ||||
|         repmat([-1,0,1],nl,1), ... | ||||
|       repmat([1 -2 1],nl,1),nm,nm); | ||||
|   end | ||||
|    | ||||
|   % eliminate knowns | ||||
|   rhs=-fda(:,known_list)*A(known_list); | ||||
|   k=find(any(fda(:,nan_list(:,1)),2)); | ||||
|    | ||||
|   % and solve... | ||||
|   B=A; | ||||
|   B(nan_list(:,1))=fda(k,nan_list(:,1))\rhs(k); | ||||
|    | ||||
|  case 4 | ||||
|   % Spring analogy | ||||
|   % interpolating operator. | ||||
|    | ||||
|   % list of all springs between a node and a horizontal | ||||
|   % or vertical neighbor | ||||
|   hv_list=[-1 -1 0;1 1 0;-n 0 -1;n 0 1]; | ||||
|   hv_springs=[]; | ||||
|   for i=1:4 | ||||
|     hvs=nan_list+repmat(hv_list(i,:),nan_count,1); | ||||
|     k=(hvs(:,2)>=1) & (hvs(:,2)<=n) & (hvs(:,3)>=1) & (hvs(:,3)<=m); | ||||
|     hv_springs=[hv_springs;[nan_list(k,1),hvs(k,1)]]; | ||||
|   end | ||||
|  | ||||
|   % delete replicate springs | ||||
|   hv_springs=unique(sort(hv_springs,2),'rows'); | ||||
|    | ||||
|   % build sparse matrix of connections, springs | ||||
|   % connecting diagonal neighbors are weaker than | ||||
|   % the horizontal and vertical springs | ||||
|   nhv=size(hv_springs,1); | ||||
|   springs=sparse(repmat((1:nhv)',1,2),hv_springs, ... | ||||
|      repmat([1 -1],nhv,1),nhv,nm); | ||||
|    | ||||
|   % eliminate knowns | ||||
|   rhs=-springs(:,known_list)*A(known_list); | ||||
|    | ||||
|   % and solve... | ||||
|   B=A; | ||||
|   B(nan_list(:,1))=springs(:,nan_list(:,1))\rhs; | ||||
|    | ||||
|  case 5 | ||||
|   % Average of 8 nearest neighbors | ||||
|    | ||||
|   % generate sparse array to average 8 nearest neighbors | ||||
|   % for each nan element, be careful around edges | ||||
|   fda=spalloc(n*m,n*m,size(nan_list,1)*9); | ||||
|    | ||||
|   % -1,-1 | ||||
|   L = find((nan_list(:,2) > 1) & (nan_list(:,3) > 1));  | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([-n-1, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|    | ||||
|   % 0,-1 | ||||
|   L = find(nan_list(:,3) > 1); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([-n, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|  | ||||
|   % +1,-1 | ||||
|   L = find((nan_list(:,2) < n) & (nan_list(:,3) > 1)); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([-n+1, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|  | ||||
|   % -1,0 | ||||
|   L = find(nan_list(:,2) > 1); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([-1, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|  | ||||
|   % +1,0 | ||||
|   L = find(nan_list(:,2) < n); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([1, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|  | ||||
|   % -1,+1 | ||||
|   L = find((nan_list(:,2) > 1) & (nan_list(:,3) < m));  | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([n-1, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|    | ||||
|   % 0,+1 | ||||
|   L = find(nan_list(:,3) < m); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([n, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|  | ||||
|   % +1,+1 | ||||
|   L = find((nan_list(:,2) < n) & (nan_list(:,3) < m)); | ||||
|   nl=length(L); | ||||
|   if nl>0 | ||||
|     fda=fda+sparse(repmat(nan_list(L,1),1,2), ... | ||||
|       repmat(nan_list(L,1),1,2)+repmat([n+1, 0],nl,1), ... | ||||
|       repmat([1 -1],nl,1),n*m,n*m); | ||||
|   end | ||||
|    | ||||
|   % eliminate knowns | ||||
|   rhs=-fda(:,known_list)*A(known_list); | ||||
|    | ||||
|   % and solve... | ||||
|   B=A; | ||||
|   k=nan_list(:,1); | ||||
|   B(k)=fda(k,k)\rhs(k); | ||||
|    | ||||
| end | ||||
|  | ||||
| % all done, make sure that B is the same shape as | ||||
| % A was when we came in. | ||||
| B=reshape(B,n,m); | ||||
|  | ||||
|  | ||||
| % ==================================================== | ||||
| %      end of main function | ||||
| % ==================================================== | ||||
| % ==================================================== | ||||
| %      begin subfunctions | ||||
| % ==================================================== | ||||
| function neighbors_list=identify_neighbors(n,m,nan_list,talks_to) | ||||
| % identify_neighbors: identifies all the neighbors of | ||||
| %   those nodes in nan_list, not including the nans | ||||
| %   themselves | ||||
| % | ||||
| % arguments (input): | ||||
| %  n,m - scalar - [n,m]=size(A), where A is the | ||||
| %      array to be interpolated | ||||
| %  nan_list - array - list of every nan element in A | ||||
| %      nan_list(i,1) == linear index of i'th nan element | ||||
| %      nan_list(i,2) == row index of i'th nan element | ||||
| %      nan_list(i,3) == column index of i'th nan element | ||||
| %  talks_to - px2 array - defines which nodes communicate | ||||
| %      with each other, i.e., which nodes are neighbors. | ||||
| % | ||||
| %      talks_to(i,1) - defines the offset in the row | ||||
| %                      dimension of a neighbor | ||||
| %      talks_to(i,2) - defines the offset in the column | ||||
| %                      dimension of a neighbor | ||||
| %       | ||||
| %      For example, talks_to = [-1 0;0 -1;1 0;0 1] | ||||
| %      means that each node talks only to its immediate | ||||
| %      neighbors horizontally and vertically. | ||||
| %  | ||||
| % arguments(output): | ||||
| %  neighbors_list - array - list of all neighbors of | ||||
| %      all the nodes in nan_list | ||||
|  | ||||
| if ~isempty(nan_list) | ||||
|   % use the definition of a neighbor in talks_to | ||||
|   nan_count=size(nan_list,1); | ||||
|   talk_count=size(talks_to,1); | ||||
|    | ||||
|   nn=zeros(nan_count*talk_count,2); | ||||
|   j=[1,nan_count]; | ||||
|   for i=1:talk_count | ||||
|     nn(j(1):j(2),:)=nan_list(:,2:3) + ... | ||||
|         repmat(talks_to(i,:),nan_count,1); | ||||
|     j=j+nan_count; | ||||
|   end | ||||
|    | ||||
|   % drop those nodes which fall outside the bounds of the | ||||
|   % original array | ||||
|   L = (nn(:,1)<1)|(nn(:,1)>n)|(nn(:,2)<1)|(nn(:,2)>m);  | ||||
|   nn(L,:)=[]; | ||||
|    | ||||
|   % form the same format 3 column array as nan_list | ||||
|   neighbors_list=[sub2ind([n,m],nn(:,1),nn(:,2)),nn]; | ||||
|    | ||||
|   % delete replicates in the neighbors list | ||||
|   neighbors_list=unique(neighbors_list,'rows'); | ||||
|    | ||||
|   % and delete those which are also in the list of NaNs. | ||||
|   neighbors_list=setdiff(neighbors_list,nan_list,'rows'); | ||||
|    | ||||
| else | ||||
|   neighbors_list=[]; | ||||
| end | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										18
									
								
								PDToolkit/@PDTrial/logtransform.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								PDToolkit/@PDTrial/logtransform.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,18 @@ | ||||
| function[obj] = logtransform(obj) | ||||
| %% perform logtransformation of all data | ||||
|  | ||||
| % signals = fieldnames(obj.data.uncorrected); | ||||
|  | ||||
| % for i = 1:length(signals) | ||||
|  | ||||
| eyes = fieldnames(obj.data); | ||||
| for e = 1:length(eyes) | ||||
|     eye = eyes{e}; | ||||
|      | ||||
|     signal = obj.data.(eye).uncorrected.filtered; | ||||
|     logtransformed_signal = log(signal); | ||||
|    | ||||
|     obj.data.(eye).uncorrected.logtransformed = logtransformed_signal; | ||||
| end | ||||
|  | ||||
|  | ||||
							
								
								
									
										206
									
								
								PDToolkit/@PDTrial/plot.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										206
									
								
								PDToolkit/@PDTrial/plot.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,206 @@ | ||||
| function[obj]= plot(varargin) | ||||
| cla; | ||||
| hold on; | ||||
|  | ||||
| obj = varargin{1}; | ||||
| settings = struct('BaselineCorrection', 0,... | ||||
|                   'FilterSize', 100, ... | ||||
|                   'BlinkExtension', [10 10],... | ||||
|                   'MaximumBlinkSize', 150,... | ||||
|                   'QualityThreshold',50); | ||||
|  | ||||
| if nargin > 1 | ||||
|     settings = varargin{2}; | ||||
| end | ||||
|  | ||||
| %% get plotting options | ||||
| options = get(gcf, 'UserData'); | ||||
|      | ||||
| if ~isfield(options,'raw') | ||||
|     options.raw = 0; | ||||
|     options.logtransformed = 1; | ||||
|     options.interpolated = 1; | ||||
|     options.filtered = 1; | ||||
|     options.blinks = 1; | ||||
|     options.markers = 1; | ||||
|     options.labels   = 1; | ||||
|     options.baseline = 0; | ||||
|     options.baseline_corrected = 0; | ||||
|     options.deconvolution = 0; | ||||
|      | ||||
|     options.drawing = options; | ||||
|     options.drawing.raw = {'-', [1 0 0], 3}; | ||||
|     options.drawing.logtransformed = {'-', [0 0 0.5], 3}; | ||||
|     options.drawing.interpolated= {'-', [0.5 0 0], 3}; | ||||
|     options.drawing.filtered= {'-', [0 0.5 0], 3}; | ||||
|     options.drawing.markers= {'-.', [0.5 0 0], 3}; | ||||
|     options.drawing.baseline= {'--', [0 0 1], 3}; | ||||
|     options.drawing.baseline_corrected= {'--', [0 0 0], 3}; | ||||
|      | ||||
| end | ||||
|  | ||||
| if (options.baseline_corrected) | ||||
|     field = 'baseline_corrected'; | ||||
|     options.baseline = 0; | ||||
| else     | ||||
|    field = 'uncorrected'; | ||||
|              | ||||
| end | ||||
|  | ||||
|  | ||||
| miny = 1e6; maxy = 0; | ||||
|  | ||||
| signals = fieldnames(options); | ||||
| for i = 1:length(signals) | ||||
|     if (strcmp(signals{i}, 'drawing') || ... | ||||
|         strcmp(signals{i}, 'markers') || ... | ||||
|         strcmp(signals{i}, 'labels')  || ... | ||||
|         strcmp(signals{i}, 'blinks')  || ... | ||||
|         strcmp(signals{i}, 'deconvolution')  || ... | ||||
|         strcmp(signals{i}, 'baseline_corrected')) | ||||
|                 continue;    | ||||
|     end | ||||
|          | ||||
|     if getfield(options, signals{i}) %% plotting is enabled | ||||
|          | ||||
|         signal = getfield(getfield(obj.data, field), signals{i}); | ||||
|         y = [min(signal) max(signal) ]; | ||||
|         len = min(length(obj.time),  length(signal)); | ||||
|          | ||||
|         drawing = getfield(options.drawing, signals{i}); | ||||
|         ls = drawing{1}; color = drawing{2}; lw = drawing{3}; | ||||
|          | ||||
|         if (strcmp(signals{i}, 'filtered')) | ||||
|             signal(1:settings.FilterSize) = NaN; | ||||
|             signal(len - settings.FilterSize:len) = NaN; | ||||
|         end | ||||
|          | ||||
|         miny = min(min(signal),miny); | ||||
|         maxy = max(max(signal),maxy); | ||||
|  | ||||
|          | ||||
|         plot(obj.time(1:len), signal(1:len), 'LineWidth', lw, 'Color', color, 'LineStyle', ls); | ||||
|       | ||||
|     end | ||||
| end | ||||
|  | ||||
|  | ||||
|  | ||||
| if miny > maxy | ||||
|     tmp = maxy; | ||||
|     maxy = miny | ||||
|     miny = tmp; | ||||
| end | ||||
|  | ||||
|  | ||||
| if (options.baseline) | ||||
|     len = length(obj.data.uncorrected.interpolated); | ||||
|     baseline_signal = repmat(obj.baseline, 1,len); | ||||
|     ls = options.drawing.baseline{1};  | ||||
|     color = options.drawing.baseline{2};  | ||||
|     lw = options.drawing.baseline{3}; | ||||
|      | ||||
|     len = min(length(obj.time),len); | ||||
|     plot(obj.time(1:len), baseline_signal, 'Color', color, 'LineWidth', lw, 'LineStyle', ls); | ||||
|      | ||||
|     %% real baseline signal plotted | ||||
|     baseline_ind = intersect(find(obj.time > obj.baseline_onset), ... | ||||
|                                 find(obj.time <= obj.baseline_offset)); | ||||
|     b_time  = obj.time(baseline_ind); | ||||
|     b_value = obj.data.uncorrected.interpolated(baseline_ind); | ||||
|      | ||||
|     plot(b_time, b_value, 'Color', color, 'LineWidth', lw, 'LineStyle', ls); | ||||
|      | ||||
| end | ||||
|  | ||||
| if options.blinks | ||||
|  | ||||
|     nans = find(isnan(obj.data.uncorrected.raw)); | ||||
|     dnans = diff(nans); | ||||
|  | ||||
|     i=1; | ||||
|      | ||||
|     if (~isempty(nans))     | ||||
|         x1=obj.time(nans(i)); | ||||
|     end | ||||
|     %end | ||||
|  | ||||
|     xlist=[]; | ||||
|     while i < length(nans) | ||||
|         if dnans(i) ~=1 | ||||
|             x2 = obj.time(nans(i)); | ||||
|             xlist = [xlist; x1 x2]; | ||||
|             x1 = obj.time(nans(i+1)); | ||||
|         end | ||||
|         i=i+1; | ||||
|     end | ||||
|   | ||||
|     for t = 1:size(xlist,1); | ||||
|       x = [xlist(t,1) xlist(t,1) xlist(t,2) xlist(t,2)]; | ||||
|       y = [miny maxy maxy miny]; | ||||
|       fa=fill(x,y,[1 0 0]); | ||||
|      set(fa, 'EdgeAlpha', .5, 'EdgeColor',[1 0 0]); | ||||
|      alpha(fa, 0.5); | ||||
|     end | ||||
|      | ||||
|     plot([obj.time(obj.settings.FilterSize) obj.time(obj.settings.FilterSize)], [miny maxy], 'r-'); | ||||
|     plot([obj.time(end-obj.settings.FilterSize) obj.time(end-obj.settings.FilterSize)], [miny maxy], 'r-'); | ||||
| end | ||||
|   | ||||
| if ~isempty(miny) | ||||
|      ylim([miny*.9 maxy*1.1]); %% fix y limits | ||||
| end | ||||
|  | ||||
| % %% add 50% grey squares on top of the filter startup effects | ||||
| % ends = get(gca,'XLim'); | ||||
| %  | ||||
| % box1 = [ends(1) obj.time(obj.settings.FilterSize) obj.time(obj.settings.FilterSize) ends(1)]; | ||||
| % box2 = [ends(2) obj.time(end-obj.settings.FilterSize) obj.time(end-obj.settings.FilterSize) ends(2)]; | ||||
| % boxy = [miny*.95 miny*.95 maxy*1.1 maxy*1.1]; | ||||
| % %h(1) = fill(box1, boxy,[.5 0 0]); | ||||
| % %h(2) = fill(box2, boxy,[.5 0 0]); | ||||
| % %alpha(h, .2); | ||||
| %  | ||||
| if options.markers | ||||
|     markers = {'trial_start', 'trial_end', 'baseline_onset', 'baseline_offset','stimulus_onset', 'stimulus_offset'};     | ||||
|     marker_colors = {[0 0 0], [0 0 0], [0 0 1], [0 0 1], [1 0.5 0], [1 0.5 0]}; | ||||
|      | ||||
|     for m = 1:length(markers) | ||||
|         if ~isempty(obj.(markers{m}))  | ||||
|             obj.(markers{m}) | ||||
|              | ||||
|             plot([obj.(markers{m})(1) obj.(markers{m})(1)], [miny maxy], 'Color', marker_colors{m}, 'LineWidth', 2); | ||||
|         end | ||||
|     end | ||||
| end | ||||
|  | ||||
| if options.labels | ||||
|      | ||||
|     for l = 1:length(obj.labels)    | ||||
|         if ~(isfield(obj.labels(l), 'color') && length(obj.labels(l))==3) | ||||
|             obj.labels(l).color = [ .5 .5 .5]; | ||||
|         end | ||||
|         plot([obj.labels(l).time obj.labels(l).time], [miny maxy], 'Color', obj.labels(l).color, 'LineWidth', 2); | ||||
|          | ||||
|     end | ||||
| end | ||||
|  | ||||
|  | ||||
| if options.deconvolution | ||||
|  | ||||
|     if isfield(obj.deconvolution, 'params') | ||||
|         params   = obj.deconvolution.params; | ||||
|         modelfit = obj.prf_convolve(obj.stick_model(params(2:end)),params(1)); | ||||
|  | ||||
|         plot(obj.deconvolution.time,modelfit, 'k:'); | ||||
|      | ||||
|     end | ||||
| end | ||||
|  | ||||
|      | ||||
| %qt = text((obj.trial_end - (obj.trial_end - obj.trial_start)/4),... | ||||
| %           miny + ((maxy - miny) /5), sprintf('Quality: %2.2f%%', obj.quality)); | ||||
| qt = title(sprintf('Quality: %2.2f%%', obj.quality)); | ||||
| set(qt, 'FontName', 'Verdana'); | ||||
| set(qt, 'FontWeight', 'bold'); | ||||
| set(qt, 'FontSize', 10); | ||||
							
								
								
									
										17
									
								
								PDToolkit/@PDTrial/preprocess.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								PDToolkit/@PDTrial/preprocess.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| function[obj] = preprocess(obj, settings) | ||||
| %% Preprocessing one trial | ||||
|  | ||||
| %% Filter the blinks | ||||
| obj = obj.remove_blinks(settings); | ||||
|  | ||||
| % Smooth the data | ||||
| obj = obj.smooth(settings); | ||||
|  | ||||
| % Calculate logtransformed data | ||||
| obj = obj.logtransform; | ||||
|  | ||||
| % Correct for baseline | ||||
| obj = obj.correct_for_baseline(settings); | ||||
|  | ||||
| % Extract statistics | ||||
| obj = obj.calculate_statistics; | ||||
							
								
								
									
										24
									
								
								PDToolkit/@PDTrial/prf.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								PDToolkit/@PDTrial/prf.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| function[obj h] = pupil_response_function(obj, t, n=10.1, t_max=930, f=1/(10^27)) | ||||
|  | ||||
| %%  Hoeks and Levelt, Behavior Research Methods 1993, vol 25(1) pp 16-26: | ||||
| %   | ||||
| %   Parameters of the Erlang Gamma are n, t_max | ||||
| %   # n+1 = number of laters | ||||
| %   # t_max = response maximum | ||||
| %   # f = scaling factor | ||||
| % | ||||
| %   Erlang Gamma = Gamma distribution with shape parameter (k) set to an integer | ||||
| %   For more information, see: https://en.wikipedia.org/wiki/Erlang_distribution | ||||
| % | ||||
| %   Hoeks and Levelt reported for n a range of 10.1 +/- 4.1,  | ||||
| %   NB: They question the use of 10.1 as a value for all subjects, | ||||
| %   but following deconvolution this is a minor issue. | ||||
| % | ||||
| %   t_max value reported in H&L : 930 ms with a standard deviation of 190 | ||||
| %   ms. | ||||
| % | ||||
|  | ||||
| h = f.*(t.^n) .* exp(-n .*t ./ t_max); | ||||
| h(0) = 0; | ||||
|  | ||||
|  | ||||
							
								
								
									
										13
									
								
								PDToolkit/@PDTrial/prf_convolve.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								PDToolkit/@PDTrial/prf_convolve.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,13 @@ | ||||
| function[model] = prf_convolve(obj, stick_model, slope) | ||||
|  | ||||
| start_trial= obj.deconvolution.time(1); | ||||
| time = obj.deconvolution.time - start_trial; | ||||
|  | ||||
| [obj prf]= obj.pupil_response_function(time); | ||||
|  | ||||
| model = conv(stick_model, prf, 'same')'; | ||||
|  | ||||
|  | ||||
| slope_model = 1:length(model); | ||||
| slope_model = slope_model .* slope; | ||||
| model = model + slope_model'; | ||||
							
								
								
									
										20
									
								
								PDToolkit/@PDTrial/pupil_response_function.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								PDToolkit/@PDTrial/pupil_response_function.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| function[obj h ] = pupil_response_function(obj, t, varargin) | ||||
| %% Hoeks and Levelt Pupillary Response function | ||||
| % | ||||
| %   # n+1 = number of laters | ||||
| %   # t_max = response maximum | ||||
| %   # f = scaling factor | ||||
| if nargin == 2 | ||||
|     n=10.1; | ||||
|     t_max=930; | ||||
|     f=1/(10^27); | ||||
| else | ||||
|     n = varargin{1}; | ||||
|     t_max= varargin{2}; | ||||
|     f= varargin{3}; | ||||
| end | ||||
|  | ||||
| h = f .* (t.^n) .* exp(-n .*t ./ t_max); | ||||
| h(1) = 0; | ||||
|  | ||||
|  | ||||
							
								
								
									
										43
									
								
								PDToolkit/@PDTrial/removeDuplicateLabels.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								PDToolkit/@PDTrial/removeDuplicateLabels.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,43 @@ | ||||
| function[obj n] = removeDuplicateLabels(obj) | ||||
| %% Remove duplicate labels which may have been created in the trial object | ||||
|  | ||||
| if isempty(obj.labels) | ||||
|     return | ||||
| end | ||||
|  | ||||
| times = [obj.labels.time]; | ||||
| labels = {obj.labels.label}; | ||||
|  | ||||
| labels_to_prune = []; | ||||
| for l = 1:length(times) | ||||
|      | ||||
|     current_label_time = times(l); | ||||
|     current_label = labels{l}; | ||||
|     other_ind = setdiff(1:length(times),l); | ||||
|      | ||||
|     %% match timings | ||||
|     identical_timings_ind = find(times(other_ind)==current_label_time); | ||||
|      | ||||
|     if isempty(identical_timings_ind) | ||||
|         continue; | ||||
|     else | ||||
|          | ||||
|         for i = 1:length(identical_timings_ind)            | ||||
|             % match label | ||||
|             if (strcmp(labels{identical_timings_ind(i)}, current_label)) | ||||
|                 labels_to_prune = [labels_to_prune identical_timings_ind(i)]; | ||||
|             end | ||||
|         end | ||||
|     end     | ||||
| end | ||||
| n = length(labels_to_prune); | ||||
| label_struct = obj.labels; | ||||
| label_stuct(labels_to_prune) = []; % remove duplicates | ||||
|  | ||||
| obj.labels = label_struct; % store pruned set of labels | ||||
|     | ||||
|          | ||||
|      | ||||
|     | ||||
|      | ||||
|      | ||||
							
								
								
									
										72
									
								
								PDToolkit/@PDTrial/remove_blinks.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								PDToolkit/@PDTrial/remove_blinks.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | ||||
| function[obj] = remove_blinks(obj, settings) | ||||
|  | ||||
| eyes = fieldnames(obj.data); | ||||
| for e = 1:length(eyes) | ||||
|     eye = eyes{e}; | ||||
|     signal = obj.data.(eye).uncorrected.raw; | ||||
|  | ||||
|     %no data available for eye | ||||
|     if isempty(signal) | ||||
|         continue | ||||
|     end | ||||
|      | ||||
|     %% Kill the missing datapoints | ||||
|     missing = find(obj.time == signal); | ||||
|     signal(missing) = NaN;     | ||||
|      | ||||
|     %% store original signal for reference | ||||
|     obj.data.(eye).uncorrected.raw = signal; | ||||
|     obj.blink_count.(eye) = 0; | ||||
|     obj.missing_data_count.(eye) = 0; | ||||
|     %% Kill the blinks (pupil dilation dip) | ||||
|     nans = find(isnan(signal)); | ||||
|  | ||||
|     %% Count the NaNs before extending them. | ||||
|     nans=find(isnan(signal)); | ||||
|     dnans = diff(nans); | ||||
|  | ||||
|     missing_data_list = find(dnans>1); | ||||
|     missing_data_count = 0; | ||||
|     if ~isempty(missing_data_list) | ||||
|         blink_list(:,1)   = nans(missing_data_list); | ||||
|         blink_list(:,2)   = dnans(missing_data_list); | ||||
|         blink_list(:,3)   = dnans(missing_data_list) < settings.MaximumBlinkSize; | ||||
|  | ||||
|         obj.blink_count.(eye) = length(find(blink_list(:,3))); | ||||
|         obj.missing_data_count.(eye) = length(missing_data_list); | ||||
|  | ||||
|         obj.blinks.(eye) = blink_list; | ||||
|     end | ||||
|  | ||||
|     %% Extend the NaNs | ||||
|     for n = 1:length(nans) | ||||
|         window = [nans(n)-settings.BlinkExtension(1):nans(n)+settings.BlinkExtension(2)]; | ||||
|         window(find(window<1)) = []; %% kill the negative indices for early blinks | ||||
|         signal(window) = NaN; | ||||
|     end | ||||
|  | ||||
|     if isempty(obj.blink_count.(eye)) | ||||
|         obj.blink_count.(eye) = 0; | ||||
|     end | ||||
|  | ||||
|     if isempty(obj.missing_data_count.(eye)) | ||||
|         obj.missing_data_count.(eye) = 0; | ||||
|     end | ||||
|      | ||||
|     %% determine quality; | ||||
|     obj.quality.(eye) =  100 - (length(find(isnan(signal))) / length(signal)*100); | ||||
|     obj.valid = 1; | ||||
|    | ||||
|     if (obj.quality.(eye) < settings.QualityThreshold) | ||||
|         obj.display_and_log(sprintf('\t*) %2d blinks filtered (%2d missing data events); quality : %2d percent -> Trial excluded\n', obj.blink_count.(eye), obj.missing_data_count.(eye),round(obj.quality.(eye)))); | ||||
|         obj.valid.(eye) = 0; | ||||
|     else | ||||
|         obj.display_and_log(sprintf('\t*) %2d blinks filtered (%2d missing data events); quality : %2d percent', obj.blink_count.(eye), obj.missing_data_count.(eye),round(obj.quality.(eye))));     | ||||
|     end | ||||
|  | ||||
|     %% Interpolate NaNs | ||||
|     signal= obj.inpaint_nans(signal); | ||||
|  | ||||
|     obj.data.(eye).uncorrected.interpolated = signal; | ||||
|  | ||||
| end | ||||
							
								
								
									
										3
									
								
								PDToolkit/@PDTrial/setSettings.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								PDToolkit/@PDTrial/setSettings.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| function[obj] = setSettings(obj, settings) | ||||
|  | ||||
| obj.settings = settings; | ||||
							
								
								
									
										11
									
								
								PDToolkit/@PDTrial/smooth.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								PDToolkit/@PDTrial/smooth.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| function[obj] = smooth(obj, settings) | ||||
|  | ||||
| eyes = fieldnames(obj.data); | ||||
| for e = 1:length(eyes) | ||||
|     eye = eyes{e}; | ||||
|      | ||||
|     if ~isempty(obj.data.(eye).uncorrected.interpolated)     | ||||
|         obj.data.(eye).uncorrected.filtered = conv(obj.data.(eye).uncorrected.interpolated, ones(1,settings.FilterSize), 'same') / settings.FilterSize; | ||||
|     end | ||||
| end | ||||
|  | ||||
							
								
								
									
										25
									
								
								PDToolkit/@PDTrial/stick_model.m
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								PDToolkit/@PDTrial/stick_model.m
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| function[model] = stick_model(obj, varargin) | ||||
|  | ||||
| if nargin == 1 | ||||
|    amplitudes = ones(1, length(obj.labels));     | ||||
| else | ||||
|     amplitudes = varargin{1}; | ||||
|      | ||||
| end | ||||
|  | ||||
| if (length(amplitudes) ~= length(obj.labels)) | ||||
|     error('More parameters than events.'); | ||||
| end | ||||
|  | ||||
| start_trial= obj.time(1); | ||||
| time = obj.deconvolution.time - start_trial; | ||||
|  | ||||
| model = zeros(1, length(time)); | ||||
|  | ||||
| for l = 1:length(obj.labels) | ||||
|     onset = obj.labels(l).time - start_trial;            | ||||
|     post_onset_ind = find(time > onset);     | ||||
|     model(post_onset_ind(1)) = amplitudes(l);     | ||||
| end | ||||
|  | ||||
|  | ||||
		Reference in New Issue
	
	Block a user