LBP&Controls #1
@ -4,10 +4,10 @@ function [ResultStruct] = AutocorrStrides(data,FS, StrideTimeRange,ResultStruct
|
||||
% Stride time and regularity from auto correlation (according to Moe-Nilssen and Helbostad, Estimation of gait cycle characteristics by trunk accelerometry. J Biomech, 2004. 37: 121-6.)
|
||||
RangeStart = round(FS*StrideTimeRange(1));
|
||||
RangeEnd = round(FS*StrideTimeRange(2));
|
||||
[AutocorrResult,Lags]=xcov(data,RangeEnd,'unbiased');
|
||||
AutocorrSum = sum(AutocorrResult(:,[1 5 9]),2); % This sum is independent of sensor re-orientation, as long as axes are kept orthogonal
|
||||
AutocorrResult2= [AutocorrResult(:,[1 5 9]),AutocorrSum];
|
||||
IXRange = (numel(Lags)-(RangeEnd-RangeStart)):numel(Lags);
|
||||
[AutocorrResult,Lags]=xcov(data,RangeEnd,'unbiased'); % The unbiased alternative is produced by dividing the raw autocorrelation coefficient by the number of samples representing the overlapping part of the time series and the time-lagged replication:
|
||||
AutocorrSum = sum(AutocorrResult(:,[1 5 9]),2); % This sum is independent of sensor re-orientation, as long as axes are kept orthogonal. 1: VT-VT 5:ML-ML 9:AP-AP
|
||||
AutocorrResult2= [AutocorrResult(:,[1 5 9]),AutocorrSum]; % column 4 represents the sum of autocorr [1 ... 9]
|
||||
IXRange = (numel(Lags)-(RangeEnd-RangeStart)):numel(Lags); % Lags from -400 / 400, IXrange: 421 - 801
|
||||
% check that autocorrelations are positive for any direction,
|
||||
|
||||
AutocorrPlusTrans = AutocorrResult+AutocorrResult(:,[1 4 7 2 5 8 3 6 9]);
|
||||
@ -25,19 +25,13 @@ if isempty(IXRangeNew)
|
||||
|
||||
else
|
||||
StrideTimeSamples = Lags(IXRangeNew(AutocorrSum(IXRangeNew)==max(AutocorrSum(IXRangeNew))));
|
||||
StrideRegularity = AutocorrResult2(Lags==StrideTimeSamples,:)./AutocorrResult2(Lags==0,:); % Moe-Nilssen&Helbostatt,2004
|
||||
StrideRegularity = AutocorrResult2(Lags==StrideTimeSamples,:)./AutocorrResult2(Lags==0,:); % Moe-Nilssen&Helbostatt,2004 / stride regularity was shifted to the average stride time
|
||||
RelativeStrideVariability = 1-StrideRegularity;
|
||||
StrideTimeSeconds = StrideTimeSamples/FS;
|
||||
|
||||
ResultStruct.StrideRegularity_V = StrideRegularity(1);
|
||||
ResultStruct.StrideRegularity_ML = StrideRegularity(2);
|
||||
ResultStruct.StrideRegularity_AP = StrideRegularity(3);
|
||||
ResultStruct.StrideRegularity_All = StrideRegularity(4);
|
||||
ResultStruct.RelativeStrideVariability_V = RelativeStrideVariability(1);
|
||||
ResultStruct.RelativeStrideVariability_ML = RelativeStrideVariability(2);
|
||||
ResultStruct.RelativeStrideVariability_AP = RelativeStrideVariability(3);
|
||||
ResultStruct.RelativeStrideVariability_All = RelativeStrideVariability(4);
|
||||
ResultStruct.StrideTimeSamples = StrideTimeSamples;
|
||||
ResultStruct.StrideTimeSeconds = StrideTimeSeconds;
|
||||
%ResultStruct.StrideTimeSeconds = StrideTimeSeconds;
|
||||
|
||||
end
|
@ -42,10 +42,10 @@ function [L_Estimate,ExtraArgsOut] = CalcMaxLyapWolfFixedEvolv(ThisTimeSeries,FS
|
||||
% 23 October 2012, use fixed evolve time instead of adaptable
|
||||
|
||||
if nargin > 2
|
||||
if isfield(ExtraArgsIn,'J')
|
||||
if isfield(ExtraArgsIn,'J') % logical 0, based on MutualInformation.
|
||||
J=ExtraArgsIn.J;
|
||||
end
|
||||
if isfield(ExtraArgsIn,'m')
|
||||
if isfield(ExtraArgsIn,'m') % logical 1, embedding dimension of 7.
|
||||
m=ExtraArgsIn.m;
|
||||
end
|
||||
end
|
||||
@ -75,9 +75,9 @@ if ~(nanstd(ThisTimeSeries) > 0)
|
||||
return;
|
||||
end
|
||||
|
||||
%% Determine J
|
||||
%% Determine J (Embedding Delay as the first minimum of the average mutual information : Fraser, A.M., Swinney, H.L., 1986. Independent coordinates for strange attractors from mutual information. Phys. Rev. A 33, 1134–1140.)
|
||||
if ~exist('J','var') || isempty(J)
|
||||
% Calculate mutual information and take first local minimum Tau as J
|
||||
% Calculate mutual information and take first local minimum Tau as J; To obtain coordinates for time delayed phase-space embedding that are as independent as possible
|
||||
bV = min(40,floor(sqrt(size(ThisTimeSeries,1))));
|
||||
tauVmax = 70;
|
||||
[mutMPro,cummutMPro,minmuttauVPro] = MutualInformationHisPro(ThisTimeSeries,(0:tauVmax),bV,1); % (xV,tauV,bV,flag)
|
||||
@ -113,6 +113,9 @@ end
|
||||
ExtraArgsOut.m=m;
|
||||
|
||||
%% Create state space based upon J and m
|
||||
% the procedure of computing the LYE involves first the calculation of a phasespace reconstruction for each acceleration time-series,
|
||||
% by creating time-delayed copies of the time-series X(t) , X(t1 + tau),
|
||||
% x(t2 + 2tau), ...
|
||||
N_ss = size(ThisTimeSeries,1)-(m-1)*J;
|
||||
StateSpace=nan(N_ss,m*size(ThisTimeSeries,2));
|
||||
for dim=1:size(ThisTimeSeries,2),
|
||||
@ -121,13 +124,13 @@ for dim=1:size(ThisTimeSeries,2),
|
||||
end
|
||||
end
|
||||
|
||||
%% Parameters for Lyapunov estimation
|
||||
CriticalLen=J*m;
|
||||
max_dist = sqrt(sum(std(StateSpace).^2))/10;
|
||||
max_dist_mult = 5;
|
||||
min_dist = max_dist/2;
|
||||
max_theta = 0.3;
|
||||
evolv = J;
|
||||
%% Parameters for Lyapunov estimation % SEE: Rispens, S. M., Pijnappels, M. A. G. M., Van Dieën, J. H., Van Schooten, K. S., Beek, P. J., & Daffertshofer, A. (2014). A benchmark test of accuracy and precision in estimating dynamical systems characteristics from a time series. Journal of biomechanics, 47(2), 470-475.
|
||||
CriticalLen=J*m; % After a fixed period (here the embedding delay) the nearby point is replaced.
|
||||
max_dist = sqrt(sum(std(StateSpace).^2))/10; % The maximum distance to which the divergence may grow before replacing the neighbour.
|
||||
max_dist_mult = 5; % Whenever points cannot be found, the distance limit is increased stepwise to maximally five times the original limit.
|
||||
min_dist = max_dist/2; % The new nearby point must not be closer than 1/20 of the signal's SD to avoid noise-related artifacts.
|
||||
max_theta = 0.3; % The new nearby point has to point in the same direction in state space as the old nearby point, as seen from the current reference (at least within 0.3 rad)
|
||||
evolv = J; % Embedding delay (time step after which the neighbour is replaced) (approximately 10 % of gait cycle?)
|
||||
|
||||
%% Calculate Lambda
|
||||
[L_Estimate]=div_wolf_fixed_evolv(StateSpace, FS, min_dist, max_dist, max_dist_mult, max_theta, CriticalLen, evolv);
|
||||
|
BIN
ControlsData.mat
Normal file
BIN
ControlsData.mat
Normal file
Binary file not shown.
91
ControlsDetermineLocationTurnsFunc.m
Normal file
91
ControlsDetermineLocationTurnsFunc.m
Normal file
@ -0,0 +1,91 @@
|
||||
function [locsTurns,FilteredData,FootContacts] = ControlsDetermineLocationTurnsFunc(inputData,FS,ApplyRealignment,plotit,ResultStruct)
|
||||
% Description: Determine the location of turns, plot for visual inspection
|
||||
|
||||
% Input: Acc Data (not yet realigned)
|
||||
|
||||
%Realign sensor data to VT-ML-AP frame
|
||||
|
||||
if ApplyRealignment % apply relignment as described in Rispens S, Pijnappels M, van Schooten K, Beek PJ, Daffertshofer A, van Die?n JH (2014).
|
||||
data = inputData(:,[3,2,1]); % NOT THE SAME AS IN THE CLBP DATA; reorder data to 1 = V; 2 = ML; 3 = AP
|
||||
% Consistency of gait characteristics as determined from acceleration data collected at different trunk locations. Gait Posture 2014;40(1):187-92.
|
||||
[RealignedAcc, ~] = RealignSensorSignalHRAmp(data, FS);
|
||||
dataAcc = RealignedAcc;
|
||||
else
|
||||
data = inputData; % Realignment performed in previous stage
|
||||
dataAcc = inputData;
|
||||
end
|
||||
|
||||
% Filter data
|
||||
[B,A] = butter(2,20/(FS/2),'low'); % Filters data very strongly which is needed to determine turns correctly
|
||||
dataStepDetection = filtfilt(B,A,dataAcc);
|
||||
VTAcc = data(:,1);
|
||||
APAcc = data(:,3);
|
||||
|
||||
%% Determine Steps -
|
||||
% USE THE AP ACCELERATION: WIEBREN ZIJLSTRA: ASSESSMENT OF SPATIO-TEMPORAL PARAMTERS DURING UNCONSTRAINED WALKING (2004)
|
||||
|
||||
% In order to run the step detection script we first need to run an autocorrelation function;
|
||||
StrideTimeRange = [0.2 4.0]; % Range to search for stride time (seconds)
|
||||
|
||||
[ResultStruct] = AutocorrStrides(dataStepDetection,FS, StrideTimeRange,ResultStruct); % first guess of stridetimesamples
|
||||
|
||||
% StrideTimeSamples is needed as an input for the stepcountFunc;
|
||||
StrideTimeSamples = ResultStruct.StrideTimeSamples;
|
||||
|
||||
[PksAndLocsCorrected] = StepcountFunc(data,StrideTimeSamples,FS);
|
||||
FootContacts = PksAndLocsCorrected; % (1:2:end,2); previous version
|
||||
numStepsOption2_filt = numel(FootContacts); % counts number of steps;
|
||||
|
||||
%% Determine Turns
|
||||
% To convert the XYZ acceleration vectors at each point in time into scalar values,
|
||||
% calculate the magnitude of each vector. This way, you can detect large changes in overall acceleration,
|
||||
% such as steps taken while walking, regardless of device orientation.
|
||||
magfilt = sqrt(sum((dataStepDetection(:,1).^2) + (dataStepDetection(:,2).^2) + (dataStepDetection(:,3).^2), 2));
|
||||
magNoGfilt = magfilt - mean(magfilt);
|
||||
minPeakHeight2 = 1*std(magNoGfilt); % used to be 2 % based on visual inspection, parameter tuning was performed on *X*standard deviation from MInPeak (used to be 1 SD)
|
||||
[pks, locs] = findpeaks(magNoGfilt, 'MINPEAKHEIGHT', minPeakHeight2,'MINPEAKDISTANCE',0.3*FS); % for step detection
|
||||
numStepsOption2_filt = numel(pks); % counts number of locs for turn detection
|
||||
|
||||
diffLocs = diff(locs); % calculates difference in location magnitude differences
|
||||
avg_diffLocs = mean(diffLocs); % average distance between steps
|
||||
std_diffLocs = std(diffLocs); % standard deviation of distance between steps
|
||||
|
||||
figure;
|
||||
findpeaks(diffLocs, 'MINPEAKHEIGHT', avg_diffLocs, 'MINPEAKDISTANCE',0.2*FS); % these values have been chosen based on visual inspection of the signal
|
||||
line([1 length(diffLocs)],[avg_diffLocs avg_diffLocs])
|
||||
[pks_diffLocs, locs_diffLocs] = findpeaks(diffLocs, 'MINPEAKHEIGHT', avg_diffLocs,'MINPEAKDISTANCE',0.2*FS); % values were initially 5
|
||||
locsTurns = [locs(locs_diffLocs), locs(locs_diffLocs+1)];
|
||||
|
||||
%magNoGfilt_copy = magNoGfilt;
|
||||
VTAcc_copy = data(:,1);
|
||||
APAcc_copy = data(:,3);
|
||||
for k = 1: size(locsTurns,1);
|
||||
VTAcc_copy(locsTurns(k,1):locsTurns(k,2)) = NaN;
|
||||
APAcc_copy(locsTurns(k,1):locsTurns(k,2)) = NaN;
|
||||
end
|
||||
|
||||
% Visualising signal;
|
||||
if plotit
|
||||
figure()
|
||||
subplot(2,1,1)
|
||||
hold on;
|
||||
plot(VTAcc,'b')
|
||||
plot(VTAcc_copy, 'r');
|
||||
title('Inside Straight: Filtered data VT with turns highlighted in blue')
|
||||
line([6000,12000,18000,24000,30000,36000;6000,12000,18000,24000,30000,36000],[5,5,5,5,5,5;10,10,10,10,10,10],'LineWidth',2,'Linestyle','--','color','k')
|
||||
hold on;
|
||||
subplot(2,1,2)
|
||||
plot(APAcc,'g')
|
||||
plot(APAcc_copy, 'r');
|
||||
title('Inside Straight: Filtered data AP with turns highlighted in blue')
|
||||
line([6000,12000,18000,24000,30000,36000;6000,12000,18000,24000,30000,36000],[0,0,0,0,0,0;10,10,10,10,10,10],'LineWidth',2,'Linestyle','--','color','k')
|
||||
hold off;
|
||||
end
|
||||
|
||||
% Check if number of turns * 20 m are making sense based on total
|
||||
% distance measured by researcher.
|
||||
disp(['Number of turns detected = ' num2str(size(locsTurns,1))])
|
||||
%disp(['Total distance measured by researcher was = ' num2str(Distance)])
|
||||
|
||||
FilteredData = dataAcc;
|
||||
end
|
89
DetermineLocationTurnsFunc.m
Normal file
89
DetermineLocationTurnsFunc.m
Normal file
@ -0,0 +1,89 @@
|
||||
function [locsTurns,FilteredData,FootContacts] = DetermineLocationTurnsFunc(inputData,FS,ApplyRealignment,plotit,Distance,ResultStruct)
|
||||
% Description: Determine the location of turns, plot for visual inspection
|
||||
|
||||
% Input: Acc Data (in previous stage realigned?)
|
||||
% Realigned sensor data to VT-ML-AP frame
|
||||
|
||||
if ApplyRealignment % apply realignment as described in Rispens S, Pijnappels M, van Schooten K, Beek PJ, Daffertshofer A, van Die?n JH (2014).
|
||||
data = inputData(:,[3,2,4]); % reorder data to 1 = V; 2 = ML; 3 = AP
|
||||
% Consistency of gait characteristics as determined from acceleration data collected at different trunk locations. Gait Posture 2014;40(1):187-92.
|
||||
[RealignedAcc, ~] = RealignSensorSignalHRAmp(data, FS);
|
||||
dataAcc = RealignedAcc;
|
||||
else % realignment/detrend performed in earlier stage!
|
||||
data = inputData;
|
||||
dataAcc = inputData;
|
||||
end
|
||||
% Filter data
|
||||
[B,A] = butter(2,20/(FS/2),'low'); % Filters data very strongly which is needed to determine turns correctly
|
||||
dataStepDetection = filtfilt(B,A,dataAcc);
|
||||
VTAcc = data(:,1);
|
||||
APAcc = data(:,3);
|
||||
|
||||
%% Determine Steps -
|
||||
% USE THE AP ACCELERATION: WIEBREN ZIJLSTRA: ASSESSMENT OF SPATIO-TEMPORAL PARAMTERS DURING UNCONSTRAINED WALKING (2004)
|
||||
|
||||
% In order to run the step detection script we first need to run an autocorrelation function;
|
||||
StrideTimeRange = [0.2 4.0]; % Range to search for stride time (seconds)
|
||||
|
||||
[ResultStruct] = AutocorrStrides(dataStepDetection,FS, StrideTimeRange,ResultStruct); % first guess of stridetimesamples
|
||||
|
||||
% StrideTimeSamples is needed as an input for the stepcountFunc;
|
||||
StrideTimeSamples = ResultStruct.StrideTimeSamples;
|
||||
|
||||
[PksAndLocsCorrected] = StepcountFunc(data,StrideTimeSamples,FS);
|
||||
FootContacts = PksAndLocsCorrected; % previous version LD: (1:2:end,2);
|
||||
numStepsOption2_filt = numel(FootContacts); % counts number of steps;
|
||||
|
||||
%% Determine Turns
|
||||
% To convert the XYZ acceleration vectors at each point in time into scalar values,
|
||||
% calculate the magnitude of each vector. This way, you can detect large changes in overall acceleration,
|
||||
% such as steps taken while walking, regardless of device orientation.
|
||||
magfilt = sqrt(sum((dataStepDetection(:,1).^2) + (dataStepDetection(:,2).^2) + (dataStepDetection(:,3).^2), 2));
|
||||
magNoGfilt = magfilt - mean(magfilt);
|
||||
minPeakHeight2 = 1*std(magNoGfilt); % used to be 2 % based on visual inspection, parameter tuning was performed on *X*standard deviation from MInPeak (used to be 1 SD)
|
||||
[pks, locs] = findpeaks(magNoGfilt, 'MINPEAKHEIGHT', minPeakHeight2,'MINPEAKDISTANCE',0.4*FS); % for step detection
|
||||
numStepsOption2_filt = numel(pks); % counts number of locs for turn detection
|
||||
|
||||
diffLocs = diff(locs); % calculates difference in location magnitude differences
|
||||
avg_diffLocs = mean(diffLocs); % average distance between steps
|
||||
std_diffLocs = std(diffLocs); % standard deviation of distance between steps
|
||||
|
||||
figure;
|
||||
findpeaks(diffLocs, 'MINPEAKHEIGHT', avg_diffLocs, 'MINPEAKDISTANCE',0.2*FS); % these values have been chosen based on visual inspection of the signal
|
||||
line([1 length(diffLocs)],[avg_diffLocs avg_diffLocs])
|
||||
[pks_diffLocs, locs_diffLocs] = findpeaks(diffLocs, 'MINPEAKHEIGHT', avg_diffLocs,'MINPEAKDISTANCE',0.2*FS); % values were initially 5
|
||||
locsTurns = [locs(locs_diffLocs), locs(locs_diffLocs+1)];
|
||||
|
||||
%magNoGfilt_copy = magNoGfilt;
|
||||
VTAcc_copy = data(:,1);
|
||||
APAcc_copy = data(:,3);
|
||||
for k = 1: size(locsTurns,1);
|
||||
VTAcc_copy(locsTurns(k,1):locsTurns(k,2)) = NaN;
|
||||
APAcc_copy(locsTurns(k,1):locsTurns(k,2)) = NaN;
|
||||
end
|
||||
|
||||
% Visualising signal;
|
||||
if plotit
|
||||
figure()
|
||||
subplot(2,1,1)
|
||||
hold on;
|
||||
plot(VTAcc,'b')
|
||||
plot(VTAcc_copy, 'r');
|
||||
title('Inside Straight: Filtered data VT with turns highlighted in blue')
|
||||
line([6000,12000,18000,24000,30000,36000;6000,12000,18000,24000,30000,36000],[-7,-7,-7,-7,-7,-7;7,7,7,7,7,7],'LineWidth',2,'Linestyle','--','color','k')
|
||||
hold on;
|
||||
subplot(2,1,2)
|
||||
plot(APAcc,'g')
|
||||
plot(APAcc_copy, 'r');
|
||||
title('Inside Straight: Filtered data AP with turns highlighted in blue')
|
||||
line([6000,12000,18000,24000,30000,36000;6000,12000,18000,24000,30000,36000],[-7,-7,-7,-7,-7,-7;7,7,7,7,7,7],'LineWidth',2,'Linestyle','--','color','k')
|
||||
hold off;
|
||||
end
|
||||
|
||||
% Check if number of turns * 20 m are making sense based on total
|
||||
% distance measured by researcher.
|
||||
disp(['Number of turns detected = ' num2str(size(locsTurns,1))])
|
||||
disp(['Total distance measured by researcher was = ' num2str(Distance)])
|
||||
|
||||
FilteredData = dataAcc;
|
||||
end
|
BIN
FINAL_MAP_CLBP_Controls_23052021.xls
Normal file
BIN
FINAL_MAP_CLBP_Controls_23052021.xls
Normal file
Binary file not shown.
BIN
FINAL_NPRSBaseline_Minute_23052021.xls
Normal file
BIN
FINAL_NPRSBaseline_Minute_23052021.xls
Normal file
Binary file not shown.
BIN
FINAL_NPRS_GaitCharacteristics_23052021.xls
Normal file
BIN
FINAL_NPRS_GaitCharacteristics_23052021.xls
Normal file
Binary file not shown.
126
FalseNearestNeighborsSR.m
Normal file
126
FalseNearestNeighborsSR.m
Normal file
@ -0,0 +1,126 @@
|
||||
function fnnM = FalseNearestNeighborsSR(xV,tauV,mV,escape,theiler)
|
||||
% fnnM = FalseNearestNeighbors(xV,tauV,mV,escape,theiler)
|
||||
% FALSENEARESTNEIGHBORS computes the percentage of false nearest neighbors
|
||||
% for a range of delays in 'tauV' and embedding dimensions in 'mV'.
|
||||
% INPUT
|
||||
% xV : Vector of the scalar time series
|
||||
% tauV : A vector of the delay times.
|
||||
% mV : A vector of the embedding dimension.
|
||||
% escape : A factor of escaping from the neighborhood. Default=10.
|
||||
% theiler : the Theiler window to exclude time correlated points in the
|
||||
% search for neighboring points. Default=0.
|
||||
% OUTPUT:
|
||||
% fnnM : A matrix of size 'ntau' x 'nm', where 'ntau' is the number of
|
||||
% given delays and 'nm' is the number of given embedding
|
||||
% dimensions, containing the percentage of false nearest
|
||||
% neighbors.
|
||||
%========================================================================
|
||||
% <FalseNearestNeighbors.m>, v 1.0 2010/02/11 22:09:14 Kugiumtzis & Tsimpiris
|
||||
% This is part of the MATS-Toolkit http://eeganalysis.web.auth.gr/
|
||||
|
||||
%========================================================================
|
||||
% Copyright (C) 2010 by Dimitris Kugiumtzis and Alkiviadis Tsimpiris
|
||||
% <dkugiu@gen.auth.gr>
|
||||
|
||||
%========================================================================
|
||||
% Version: 1.0
|
||||
|
||||
% LICENSE:
|
||||
% This program is free software; you can redistribute it and/or modify
|
||||
% it under the terms of the GNU General Public License as published by
|
||||
% the Free Software Foundation; either version 3 of the License, or
|
||||
% any later version.
|
||||
%
|
||||
% This program is distributed in the hope that it will be useful,
|
||||
% but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
% GNU General Public License for more details.
|
||||
%
|
||||
% You should have received a copy of the GNU General Public License
|
||||
% along with this program. If not, see http://www.gnu.org/licenses/>.
|
||||
|
||||
%=========================================================================
|
||||
% Reference : D. Kugiumtzis and A. Tsimpiris, "Measures of Analysis of Time Series (MATS):
|
||||
% A Matlab Toolkit for Computation of Multiple Measures on Time Series Data Bases",
|
||||
% Journal of Statistical Software, in press, 2010
|
||||
|
||||
% Link : http://eeganalysis.web.auth.gr/
|
||||
%=========================================================================
|
||||
% Updates:
|
||||
% Sietse Rispens, January 2012: use a different kdtree algorithm,
|
||||
% searching k nearest neighbours, to improve performance
|
||||
%
|
||||
%=========================================================================
|
||||
fthres = 0.1; % A factor of the data SD to be used as the maximum radius
|
||||
% for searching for valid nearest neighbor.
|
||||
propthres = fthres; % Limit for the proportion of valid points, i.e. points
|
||||
% for which a nearest neighbor was found. If the proporion
|
||||
% of valid points is beyond this limit, do not compute
|
||||
% FNN.
|
||||
|
||||
n = length(xV);
|
||||
if isempty(escape), escape=10; end
|
||||
if isempty(theiler), theiler=0; end
|
||||
% Rescale to [0,1] and add infinitesimal noise to have distinct samples
|
||||
xmin = min(xV);
|
||||
xmax = max(xV);
|
||||
xV = (xV - xmin) / (xmax-xmin);
|
||||
xV = AddNoise(xV,10^(-10));
|
||||
ntau = length(tauV);
|
||||
nm = length(mV);
|
||||
fnnM = NaN*ones(ntau,nm);
|
||||
for itau = 1:ntau
|
||||
tau = tauV(itau);
|
||||
for im=1:nm
|
||||
m = mV(im);
|
||||
nvec = n-m*tau; % to be able to add the component x(nvec+tau) for m+1
|
||||
if nvec-theiler < 2
|
||||
break;
|
||||
end
|
||||
xM = NaN*ones(nvec,m);
|
||||
for i=1:m
|
||||
xM(:,m-i+1) = xV(1+(i-1)*tau:nvec+(i-1)*tau);
|
||||
end
|
||||
% k-d-tree data structure of the training set for the given m
|
||||
TreeRoot=kdtree_build(xM);
|
||||
% For each target point, find the nearest neighbor, and check whether
|
||||
% the distance increase over the escape distance by adding the next
|
||||
% component for m+1.
|
||||
idxV = NaN*ones(nvec,1);
|
||||
distV = NaN*ones(nvec,1);
|
||||
k0 = 2; % The initial number of nearest neighbors to look for
|
||||
kmax = min(2*theiler + 2,nvec); % The maximum number of nearest neighbors to look for
|
||||
for i=1:nvec
|
||||
tarV = xM(i,:);
|
||||
iV = [];
|
||||
k=k0;
|
||||
kmaxreached = 0;
|
||||
while isempty(iV) && ~kmaxreached
|
||||
[neiindV, neidisV] = kdtree_k_nearest_neighbors(TreeRoot,tarV,k);
|
||||
% [neiM,neidisV,neiindV]=kdrangequery(TreeRoot,tarV,rthres*sqrt(m));
|
||||
[oneidisV,oneiindV]=sort(neidisV);
|
||||
neiindV = neiindV(oneiindV);
|
||||
neidisV = neidisV(oneiindV);
|
||||
iV = find(abs(neiindV(1)-neiindV(2:end))>theiler);
|
||||
if k >= kmax
|
||||
kmaxreached = 1;
|
||||
elseif isempty(iV)
|
||||
k = min(kmax,k*2);
|
||||
end
|
||||
end
|
||||
if ~isempty(iV)
|
||||
idxV(i) = neiindV(iV(1)+1);
|
||||
distV(i) = neidisV(iV(1)+1);
|
||||
end
|
||||
end % for i
|
||||
iV = find(~isnan(idxV));
|
||||
nproper = length(iV);
|
||||
% Compute fnn only if there is a sufficient number of target points
|
||||
% having nearest neighbor (in R^m) within the threshold distance
|
||||
if nproper>propthres*nvec
|
||||
nnfactorV = 1+(xV(iV+m*tau)-xV(idxV(iV)+m*tau)).^2./distV(iV).^2;
|
||||
fnnM(itau,im) = length(find(nnfactorV > escape^2))/nproper;
|
||||
end
|
||||
kdtree_delete(TreeRoot); % Free the pointer to k-d-tree
|
||||
end
|
||||
end
|
@ -2,7 +2,7 @@ function [ResultStruct] = GaitOutcomesTrunkAccFuncIH(inputData,FS,LegLength,Wind
|
||||
|
||||
% DESCRIPTON: Trunk analysis of Iphone data without the need for step detection
|
||||
% CL Nov 2019
|
||||
% Adapted IH feb-april 2020
|
||||
% Adapted LD feb 2021 (IH feb 2020)
|
||||
|
||||
% koloms data of smartphone
|
||||
% 1st column is time data;
|
||||
@ -47,7 +47,6 @@ N_Harm = 12; % Number of harmonics used for harmonic ratio
|
||||
LowFrequentPowerThresholds = ...
|
||||
[0.7 1.4]; % Threshold frequencies for estimation of low-frequent power percentages
|
||||
Lyap_m = 7; % Embedding dimension (used in Lyapunov estimations)
|
||||
Lyap_FitWinLen = round(60/100*FS); % Fitting window length (used in Lyapunov estimations Rosenstein's method)
|
||||
Sen_m = 5; % Dimension, the length of the subseries to be matched (used in sample entropy estimation)
|
||||
Sen_r = 0.3; % Tolerance, the maximum distance between two samples to qualify as match, relative to std of DataIn (used in sample entropy estimation)
|
||||
NStartEnd = [100];
|
||||
@ -59,21 +58,25 @@ ResultStruct = struct();
|
||||
% Apply Realignment & Filter data
|
||||
|
||||
if ApplyRealignment % apply relignment as described in Rispens S, Pijnappels M, van Schooten K, Beek PJ, Daffertshofer A, van Die?n JH (2014).
|
||||
data = inputData(:, [3,2,4]); % reorder data to 1 = V; 2= ML, 3 = AP%
|
||||
data = inputData; % ALREADY REORDERD: reorder data to 1 = V; 2= ML, 3 = AP%
|
||||
% Consistency of gait characteristics as determined from acceleration data collected at different trunk locations. Gait Posture 2014;40(1):187-92.
|
||||
[RealignedAcc, ~] = RealignSensorSignalHRAmp(data, FS);
|
||||
dataAcc = RealignedAcc;
|
||||
[B,A] = butter(2,20/(FS/2),'low');
|
||||
dataAcc_filt = filtfilt(B,A,dataAcc);
|
||||
else % we asume tat data is already reorderd to 1 = V; 2= ML, 3 = AP in an earlier stage;
|
||||
[B,A] = butter(2,20/(FS/2),'low');
|
||||
else % we asume that data for CONTROLS; reorder data to 1 = V; 2 = ML; 3 = AP
|
||||
%data = inputData(:,[3,2,1]);
|
||||
%[RealignedAcc, ~] = RealignSensorSignalHRAmp(data, FS); might not be necessary
|
||||
%dataAcc = RealignedAcc;
|
||||
data = inputData;
|
||||
dataAcc = inputData;
|
||||
dataAcc_filt = filtfilt(B,A,dataAcc);
|
||||
[B,A] = butter(2,20/(FS/2),'low');
|
||||
dataAcc_filt = filtfilt(B,A,inputData);
|
||||
end
|
||||
|
||||
|
||||
%% Step dectection
|
||||
% Determines the number of steps in the signal so that the first 30 and last 30 steps in the signal can be removed
|
||||
%% Step detection
|
||||
% Determines the number of steps in the signal so that the first 1 and last step in the signal can be removed
|
||||
|
||||
if ApplyRemoveSteps
|
||||
|
||||
@ -84,15 +87,15 @@ if ApplyRemoveSteps
|
||||
StrideTimeSamples = ResultStruct.StrideTimeSamples;
|
||||
|
||||
% Calculate the number of steps;
|
||||
[PksAndLocsCorrected] = StepcountFunc(dataAcc_filt,StrideTimeSamples,FS);
|
||||
[PksAndLocsCorrected] = StepcountFunc(data,StrideTimeSamples,FS);
|
||||
% This function selects steps based on negative and positive values.
|
||||
% However to determine the steps correctly we only need one of these;
|
||||
LocsSteps = PksAndLocsCorrected(1:2:end,2);
|
||||
LocsStepsLD = PksAndLocsCorrected;
|
||||
|
||||
%% Cut data & remove currents results
|
||||
% Remove 20 steps in the beginning and end of data
|
||||
dataAccCut = dataAcc(LocsSteps(31):LocsSteps(end-30),:);
|
||||
dataAccCut_filt = dataAcc_filt(LocsSteps(31):LocsSteps(end-30),:);
|
||||
% Remove 1 step in the beginning and end of data
|
||||
dataAccCut = dataAcc(LocsStepsLD(1):LocsStepsLD(end-1),:);
|
||||
dataAccCut_filt = dataAcc_filt(LocsStepsLD(1):LocsStepsLD(end-1),:);
|
||||
|
||||
% Clear currently saved results from Autocorrelation Analysis
|
||||
|
||||
@ -100,6 +103,10 @@ if ApplyRemoveSteps
|
||||
clear PksAndLocsCorrected;
|
||||
clear LocsSteps;
|
||||
|
||||
% Change window length necessary if ApplyRemoveSteps? (16-2-2013 LD)
|
||||
|
||||
WindowLen = 10*FS;
|
||||
|
||||
else;
|
||||
dataAccCut = dataAcc;
|
||||
dataAccCut_filt = dataAcc_filt;
|
||||
@ -108,49 +115,10 @@ end
|
||||
%% Calculate stride parameters
|
||||
ResultStruct = struct; % create empty struct
|
||||
|
||||
% Run function AutoCorrStrides, Outcomeparameters: StrideRegularity,RelativeStrideVariability,StrideTimeSamples,StrideTime
|
||||
% Run function AutoCorrStrides, Outcomeparameters: StrideRegularity AP/VT ,StrideTimeSamples,StrideTime
|
||||
[ResultStruct] = AutocorrStrides(dataAccCut_filt,FS, StrideTimeRange,ResultStruct);
|
||||
StrideTimeSamples = ResultStruct.StrideTimeSamples; % needed as input for other functions
|
||||
|
||||
% Calculate Step symmetry --> method 1
|
||||
ij = 1;
|
||||
dirSymm = [1,3]; % Gait Synmmetry is only informative in AP/V direction: See Tura A, Raggi M, Rocchi L, Cutti AG, Chiari L: Gait symmetry and regularity in transfemoral amputees assessed by trunk accelerations. J Neuroeng Rehabil 2010, 7:4.
|
||||
|
||||
for jk=1:length(dirSymm)
|
||||
[C, lags] = AutocorrRegSymmSteps(dataAccCut_filt(:,dirSymm(jk)));
|
||||
[Ad,p] = findpeaks(C,'MinPeakProminence',0.2, 'MinPeakHeight', 0.2);
|
||||
|
||||
if size(Ad,1) > 1
|
||||
Ad1 = Ad(1);
|
||||
Ad2 = Ad(2);
|
||||
GaitSymm(:,ij) = abs((Ad1-Ad2)/mean([Ad1+Ad2]))*100;
|
||||
else
|
||||
GaitSymm(:,ij) = NaN;
|
||||
end
|
||||
ij = ij +1;
|
||||
end
|
||||
% Save outcome in struct;
|
||||
ResultStruct.GaitSymm_V = GaitSymm(1);
|
||||
ResultStruct.GaitSymm_AP = GaitSymm(2);
|
||||
|
||||
% Calculate Step symmetry --> method 2
|
||||
[PksAndLocsCorrected] = StepcountFunc(dataAccCut_filt,StrideTimeSamples,FS);
|
||||
LocsSteps = PksAndLocsCorrected(2:2:end,2);
|
||||
|
||||
if rem(size(LocsSteps,1),2) == 0; % is number of steps is even
|
||||
LocsSteps2 = LocsSteps(1:2:end);
|
||||
else
|
||||
LocsSteps2 = LocsSteps(3:2:end);
|
||||
end
|
||||
|
||||
LocsSteps1 = LocsSteps(2:2:end);
|
||||
DiffLocs2 = diff(LocsSteps2);
|
||||
DiffLocs1 = diff(LocsSteps1);
|
||||
StepTime2 = DiffLocs2(1:end-1)/FS; % leave last one out because it is higher
|
||||
StepTime1 = DiffLocs1(1:end-1)/FS;
|
||||
SI = abs((2*(StepTime2-StepTime1))./(StepTime2+StepTime1))*100;
|
||||
ResultStruct.GaitSymmIndex = nanmean(SI);
|
||||
|
||||
%% Calculate spatiotemporal stride parameters
|
||||
|
||||
% Measures from height variation by double integration of VT accelerations and high-pass filtering
|
||||
@ -158,53 +126,45 @@ ResultStruct.GaitSymmIndex = nanmean(SI);
|
||||
|
||||
%% Measures derived from spectral analysis
|
||||
|
||||
AccVectorLen = sqrt(sum(dataAccCut_filt(:,1:3).^2,2));
|
||||
AccVectorLen = sqrt(sum(dataAccCut_filt(:,1:3).^2,2)); % WindowLen -> 10*Fs OR on InputSignal
|
||||
[ResultStruct] = SpectralAnalysisGaitfunc(dataAccCut_filt,WindowLen,FS,N_Harm,LowFrequentPowerThresholds,AccVectorLen,ResultStruct);
|
||||
|
||||
|
||||
%% Calculation non-linear parameters;
|
||||
|
||||
% cut into windows of size WindowLen
|
||||
N_Windows = floor(size(dataAccCut,1)/WindowLen);
|
||||
N_Windows = floor(size(dataAccCut,1)/WindowLen); % Not sure if WindowLen should be different?
|
||||
N_SkipBegin = ceil((size(dataAccCut,1)-N_Windows*WindowLen)/2);
|
||||
LyapunovWolf = nan(N_Windows,3);
|
||||
LyapunovRosen = nan(N_Windows,3);
|
||||
SE= nan(N_Windows,3);
|
||||
|
||||
for WinNr = 1:N_Windows;
|
||||
AccWin = dataAccCut(N_SkipBegin+(WinNr-1)*WindowLen+(1:WindowLen),:);
|
||||
for j=1:3
|
||||
[LyapunovWolf(WinNr,j),~] = CalcMaxLyapWolfFixedEvolv(AccWin(:,j),FS,struct('m',Lyap_m));
|
||||
[LyapunovRosen(WinNr,j),outpo] = CalcMaxLyapConvGait(AccWin(:,j),FS,struct('m',Lyap_m,'FitWinLen',Lyap_FitWinLen));
|
||||
[SE(WinNr,j)] = funcSampleEntropy(AccWin(:,j), Sen_m, Sen_r);
|
||||
% no correction for FS; SE does increase with higher FS but effect is considered negligible as range is small (98-104HZ). Might consider updating r to account for larger ranges.
|
||||
end
|
||||
end
|
||||
|
||||
LyapunovWolf = nanmean(LyapunovWolf,1);
|
||||
LyapunovRosen = nanmean(LyapunovRosen,1);
|
||||
SampleEntropy = nanmean(SE,1);
|
||||
|
||||
ResultStruct.LyapunovWolf_V = LyapunovWolf(1);
|
||||
ResultStruct.LyapunovWolf_ML = LyapunovWolf(2);
|
||||
ResultStruct.LyapunovWolf_AP = LyapunovWolf(3);
|
||||
ResultStruct.LyapunovRosen_V = LyapunovRosen(1);
|
||||
ResultStruct.LyapunovRosen_ML = LyapunovRosen(2);
|
||||
ResultStruct.LyapunovRosen_AP = LyapunovRosen(3);
|
||||
ResultStruct.SampleEntropy_V = SampleEntropy(1);
|
||||
ResultStruct.SampleEntropy_ML = SampleEntropy(2);
|
||||
ResultStruct.SampleEntropy_AP = SampleEntropy(3);
|
||||
|
||||
if isfield(ResultStruct,'StrideFrequency')
|
||||
LyapunovPerStrideWolf = LyapunovWolf/ResultStruct.StrideFrequency;
|
||||
LyapunovPerStrideRosen = LyapunovRosen/ResultStruct.StrideFrequency;
|
||||
end
|
||||
%% Calculate RMS in each direction: added february 2021 by LD, CONSTRUCT: 'Pace'
|
||||
% Sekine, M., Tamura, T., Yoshida, M., Suda, Y., Kimura, Y., Miyoshi, H., ... & Fujimoto, T. (2013). A gait abnormality measure based on root mean square of trunk acceleration. Journal of neuroengineering and rehabilitation, 10(1), 1-7.
|
||||
|
||||
ResultStruct.LyapunovPerStrideWolf_V = LyapunovPerStrideWolf(1);
|
||||
ResultStruct.LyapunovPerStrideWolf_ML = LyapunovPerStrideWolf(2);
|
||||
ResultStruct.LyapunovPerStrideWolf_AP = LyapunovPerStrideWolf(3);
|
||||
ResultStruct.LyapunovPerStrideRosen_V = LyapunovPerStrideRosen(1);
|
||||
ResultStruct.LyapunovPerStrideRosen_ML = LyapunovPerStrideRosen(2);
|
||||
ResultStruct.LyapunovPerStrideRosen_AP = LyapunovPerStrideRosen(3);
|
||||
Data_Centered = normalize(dataAcc_filt,'center','mean'); % The RMS coincides with the Sd since the Acc signals are transformed to give a mean equal to zero
|
||||
RMS = rms(Data_Centered);
|
||||
|
||||
ResultStruct.RMS_V = RMS(1);
|
||||
ResultStruct.RMS_ML = RMS(2);
|
||||
ResultStruct.RMS_AP = RMS(3);
|
||||
|
||||
end
|
210
GaitOutcomesTrunkAccFuncLD.m
Normal file
210
GaitOutcomesTrunkAccFuncLD.m
Normal file
@ -0,0 +1,210 @@
|
||||
function [ResultStruct] = GaitOutcomesTrunkAccFuncLD(inputData,FS,LegLength,WindowLen,ApplyRealignment,ApplyRemoveSteps)
|
||||
|
||||
% Description: Trunk analysis of Iphone data without the need for step detection
|
||||
% CL Nov 2019
|
||||
% Adapted IH feb-april 2020
|
||||
|
||||
% koloms data of smartphone
|
||||
% 1st column is time data;
|
||||
% 2nd column is X, medio-lateral: + left, - right
|
||||
% 3rd column is Y, vertical: + downwards, - upwards
|
||||
% 4th column is Z, anterior- posterior : + forwards, - backwards
|
||||
|
||||
%% Input Trunk accelerations during locomotion in VT, ML, AP direction
|
||||
% InputData: Acceleration signal with time and accelerations in VT,ML and
|
||||
% AP direction.
|
||||
% FS: sample frequency of the Accdata
|
||||
% LegLength: length of the leg of the participant in m;
|
||||
|
||||
|
||||
%% Output
|
||||
% ResultStruct: structure coninting all outcome measured calculated
|
||||
% Spectral parameters, spatiotemporal gait parameters, non-linear
|
||||
% parameters
|
||||
% fields and subfields: include the multiple measurements of a subject
|
||||
|
||||
%% Literature
|
||||
% Richman & Moorman, 2000; [ sample entropy]
|
||||
% Bisi & Stagni Gait & Posture 2016, 47 (6) 37-42
|
||||
% Kavagnah et al., Eur J Appl Physiol 2005 94: 468?475; Human Movement Science 24(2005) 574?587 [ synchrony]
|
||||
% Moe-Nilsen J Biomech 2004 37, 121-126 [ autorcorrelation step regularity and symmetry
|
||||
% Kobsar et al. Gait & Posture 2014 39, 553?557 [ synchrony ]
|
||||
% Rispen et al; Gait & Posture 2014, 40, 187 - 192 [realignment axes]
|
||||
% Zijlstra & HofGait & Posture 2003 18,2, 1-10 [spatiotemporal gait variables]
|
||||
% Lamoth et al, 2002 [index of harmonicity]
|
||||
% Costa et al. 2003 Physica A 330 (2003) 5360 [ multiscale entropy]
|
||||
% Cignetti F, Decker LM, Stergiou N. Ann Biomed Eng. 2012
|
||||
% May;40(5):1122-30. doi: 10.1007/s10439-011-0474-3. Epub 2011 Nov 25. [
|
||||
% Wofl vs. Rosenstein Lyapunov]
|
||||
|
||||
|
||||
%% Settings
|
||||
Gr = 9.81; % Gravity acceleration, multiplication factor for accelerations
|
||||
StrideFreqEstimate = 1.00; % Used to set search for stride frequency from 0.5*StrideFreqEstimate until 2*StrideFreqEstimate
|
||||
StrideTimeRange = [0.2 4.0]; % Range to search for stride time (seconds)
|
||||
IgnoreMinMaxStrides = 0.10; % Number or percentage of highest&lowest values ignored for improved variability estimation
|
||||
N_Harm = 12; % Number of harmonics used for harmonic ratio, index of harmonicity and phase fluctuation
|
||||
LowFrequentPowerThresholds = ...
|
||||
[0.7 1.4]; % Threshold frequencies for estimation of low-frequent power percentages
|
||||
Lyap_m = 7; % Embedding dimension (used in Lyapunov estimations)
|
||||
Lyap_FitWinLen = round(60/100*FS); % Fitting window length (used in Lyapunov estimations Rosenstein's method)
|
||||
Sen_m = 5; % Dimension, the length of the subseries to be matched (used in sample entropy estimation)
|
||||
Sen_r = 0.3; % Tolerance, the maximum distance between two samples to qualify as match, relative to std of DataIn (used in sample entropy estimation)
|
||||
NStartEnd = [100];
|
||||
M = 5; % maximum template length
|
||||
ResultStruct = struct();
|
||||
|
||||
%% Filter and Realign Accdata
|
||||
|
||||
% Apply Realignment & Filter data
|
||||
|
||||
if ApplyRealignment % apply relignment as described in Rispens S, Pijnappels M, van Schooten K, Beek PJ, Daffertshofer A, van Die?n JH (2014).
|
||||
data = inputData(:, [3,2,4]); % reorder data to 1 = V; 2= ML, 3 = AP%
|
||||
% Consistency of gait characteristics as determined from acceleration data collected at different trunk locations. Gait Posture 2014;40(1):187-92.
|
||||
[RealignedAcc, ~] = RealignSensorSignalHRAmp(data, FS);
|
||||
dataAcc = RealignedAcc;
|
||||
[B,A] = butter(2,20/(FS/2),'low');
|
||||
dataAcc_filt = filtfilt(B,A,dataAcc);
|
||||
else % we asume tat data is already reorderd to 1 = V; 2= ML, 3 = AP in an earlier stage;
|
||||
[B,A] = butter(2,20/(FS/2),'low');
|
||||
dataAcc = inputData;
|
||||
dataAcc_filt = filtfilt(B,A,dataAcc);
|
||||
end
|
||||
|
||||
|
||||
%% Step dectection
|
||||
% Determines the number of steps in the signal so that the first 30 and last 30 steps in the signal can be removed
|
||||
|
||||
if ApplyRemoveSteps
|
||||
|
||||
% In order to run the step detection script we first need to run an autocorrelation function;
|
||||
[ResultStruct] = AutocorrStrides(dataAcc_filt,FS, StrideTimeRange,ResultStruct);
|
||||
|
||||
% StrideTimeSamples is needed as an input for the stepcountFunc;
|
||||
StrideTimeSamples = ResultStruct.StrideTimeSamples;
|
||||
|
||||
% Calculate the number of steps;
|
||||
[PksAndLocsCorrected] = StepcountFunc(dataAcc_filt,StrideTimeSamples,FS);
|
||||
% This function selects steps based on negative and positive values.
|
||||
% However to determine the steps correctly we only need one of these;
|
||||
LocsSteps = PksAndLocsCorrected(1:2:end,2);
|
||||
|
||||
%% Cut data & remove currents results
|
||||
% Remove 1 step in the beginning and end of data (comment Bert Otten)
|
||||
dataAccCut = dataAcc(LocsSteps(1):LocsSteps(end-1),:);
|
||||
dataAccCut_filt = dataAcc_filt(LocsSteps(1):LocsSteps(end-1),:);
|
||||
|
||||
% Clear currently saved results from Autocorrelation Analysis
|
||||
|
||||
clear ResultStruct;
|
||||
clear PksAndLocsCorrected;
|
||||
clear LocsSteps;
|
||||
|
||||
else;
|
||||
dataAccCut = dataAcc;
|
||||
dataAccCut_filt = dataAcc_filt;
|
||||
end
|
||||
|
||||
%% Calculate stride parameters
|
||||
ResultStruct = struct; % create empty struct
|
||||
|
||||
% Run function AutoCorrStrides, Outcomeparameters: StrideRegularity,RelativeStrideVariability,StrideTimeSamples,StrideTime
|
||||
[ResultStruct] = AutocorrStrides(dataAccCut_filt,FS, StrideTimeRange,ResultStruct);
|
||||
StrideTimeSamples = ResultStruct.StrideTimeSamples; % needed as input for other functions
|
||||
|
||||
% Calculate Step symmetry --> method 1
|
||||
ij = 1;
|
||||
dirSymm = [1,3]; % Gait Synmmetry is only informative in AP/V direction: See Tura A, Raggi M, Rocchi L, Cutti AG, Chiari L: Gait symmetry and regularity in transfemoral amputees assessed by trunk accelerations. J Neuroeng Rehabil 2010, 7:4.
|
||||
|
||||
for jk=1:length(dirSymm)
|
||||
[C, lags] = AutocorrRegSymmSteps(dataAccCut_filt(:,dirSymm(jk)));
|
||||
[Ad,p] = findpeaks(C,'MinPeakProminence',0.2, 'MinPeakHeight', 0.2);
|
||||
|
||||
if size(Ad,1) > 1
|
||||
Ad1 = Ad(1);
|
||||
Ad2 = Ad(2);
|
||||
GaitSymm(:,ij) = abs((Ad1-Ad2)/mean([Ad1+Ad2]))*100;
|
||||
else
|
||||
GaitSymm(:,ij) = NaN;
|
||||
end
|
||||
ij = ij +1;
|
||||
end
|
||||
% Save outcome in struct;
|
||||
ResultStruct.GaitSymm_V = GaitSymm(1);
|
||||
ResultStruct.GaitSymm_AP = GaitSymm(2);
|
||||
|
||||
% Calculate Step symmetry --> method 2
|
||||
[PksAndLocsCorrected] = StepcountFunc(dataAccCut_filt,StrideTimeSamples,FS);
|
||||
LocsSteps = PksAndLocsCorrected(2:2:end,2);
|
||||
|
||||
if rem(size(LocsSteps,1),2) == 0; % is number of steps is even
|
||||
LocsSteps2 = LocsSteps(1:2:end);
|
||||
else
|
||||
LocsSteps2 = LocsSteps(3:2:end);
|
||||
end
|
||||
|
||||
LocsSteps1 = LocsSteps(2:2:end);
|
||||
DiffLocs2 = diff(LocsSteps2);
|
||||
DiffLocs1 = diff(LocsSteps1);
|
||||
StepTime2 = DiffLocs2(1:end-1)/FS; % leave last one out because it is higher
|
||||
StepTime1 = DiffLocs1(1:end-1)/FS;
|
||||
SI = abs((2*(StepTime2-StepTime1))./(StepTime2+StepTime1))*100;
|
||||
ResultStruct.GaitSymmIndex = nanmean(SI);
|
||||
|
||||
%% Calculate spatiotemporal stride parameters
|
||||
|
||||
% Measures from height variation by double integration of VT accelerations and high-pass filtering
|
||||
[ResultStruct] = SpatioTemporalGaitParameters(dataAccCut_filt,StrideTimeSamples,ApplyRealignment,LegLength,FS,IgnoreMinMaxStrides,ResultStruct);
|
||||
|
||||
%% Measures derived from spectral analysis
|
||||
|
||||
AccVectorLen = sqrt(sum(dataAccCut_filt(:,1:3).^2,2));
|
||||
[ResultStruct] = SpectralAnalysisGaitfunc(dataAccCut_filt,WindowLen,FS,N_Harm,LowFrequentPowerThresholds,AccVectorLen,ResultStruct);
|
||||
|
||||
|
||||
%% Calculation non-linear parameters;
|
||||
|
||||
% cut into windows of size WindowLen
|
||||
N_Windows = floor(size(dataAccCut,1)/WindowLen);
|
||||
N_SkipBegin = ceil((size(dataAccCut,1)-N_Windows*WindowLen)/2);
|
||||
LyapunovWolf = nan(N_Windows,3);
|
||||
LyapunovRosen = nan(N_Windows,3);
|
||||
SE= nan(N_Windows,3);
|
||||
|
||||
for WinNr = 1:N_Windows;
|
||||
AccWin = dataAccCut(N_SkipBegin+(WinNr-1)*WindowLen+(1:WindowLen),:);
|
||||
for j=1:3
|
||||
[LyapunovWolf(WinNr,j),~] = CalcMaxLyapWolfFixedEvolv(AccWin(:,j),FS,struct('m',Lyap_m));
|
||||
[LyapunovRosen(WinNr,j),outpo] = CalcMaxLyapConvGait(AccWin(:,j),FS,struct('m',Lyap_m,'FitWinLen',Lyap_FitWinLen));
|
||||
[SE(WinNr,j)] = funcSampleEntropy(AccWin(:,j), Sen_m, Sen_r);
|
||||
% no correction for FS; SE does increase with higher FS but effect is considered negligible as range is small (98-104HZ). Might consider updating r to account for larger ranges.
|
||||
end
|
||||
end
|
||||
|
||||
LyapunovWolf = nanmean(LyapunovWolf,1);
|
||||
LyapunovRosen = nanmean(LyapunovRosen,1);
|
||||
SampleEntropy = nanmean(SE,1);
|
||||
|
||||
ResultStruct.LyapunovWolf_V = LyapunovWolf(1);
|
||||
ResultStruct.LyapunovWolf_ML = LyapunovWolf(2);
|
||||
ResultStruct.LyapunovWolf_AP = LyapunovWolf(3);
|
||||
ResultStruct.LyapunovRosen_V = LyapunovRosen(1);
|
||||
ResultStruct.LyapunovRosen_ML = LyapunovRosen(2);
|
||||
ResultStruct.LyapunovRosen_AP = LyapunovRosen(3);
|
||||
ResultStruct.SampleEntropy_V = SampleEntropy(1);
|
||||
ResultStruct.SampleEntropy_ML = SampleEntropy(2);
|
||||
ResultStruct.SampleEntropy_AP = SampleEntropy(3);
|
||||
|
||||
if isfield(ResultStruct,'StrideFrequency')
|
||||
LyapunovPerStrideWolf = LyapunovWolf/ResultStruct.StrideFrequency;
|
||||
LyapunovPerStrideRosen = LyapunovRosen/ResultStruct.StrideFrequency;
|
||||
end
|
||||
|
||||
ResultStruct.LyapunovPerStrideWolf_V = LyapunovPerStrideWolf(1);
|
||||
ResultStruct.LyapunovPerStrideWolf_ML = LyapunovPerStrideWolf(2);
|
||||
ResultStruct.LyapunovPerStrideWolf_AP = LyapunovPerStrideWolf(3);
|
||||
ResultStruct.LyapunovPerStrideRosen_V = LyapunovPerStrideRosen(1);
|
||||
ResultStruct.LyapunovPerStrideRosen_ML = LyapunovPerStrideRosen(2);
|
||||
ResultStruct.LyapunovPerStrideRosen_AP = LyapunovPerStrideRosen(3);
|
||||
|
||||
end
|
BIN
GaitVarOutcomesCombined_230221.mat
Normal file
BIN
GaitVarOutcomesCombined_230221.mat
Normal file
Binary file not shown.
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
@ -1,103 +0,0 @@
|
||||
%% Gait Variability Analysis CLBP
|
||||
|
||||
% Gait Variability Analysis
|
||||
% Script created for MAP 2020-2021
|
||||
% adapted from Claudine Lamoth and Iris Hagoort
|
||||
% version1 October 2020
|
||||
|
||||
% Input: needs mat file which contains all raw accelerometer data
|
||||
% Input: needs excel file containing the participant information including
|
||||
% leg length.
|
||||
%% Clear and close;
|
||||
|
||||
clear;
|
||||
close all;
|
||||
%% Load data;
|
||||
% Select 1 trial.
|
||||
% For loop to import all data will be used at a later stage
|
||||
|
||||
[FNaam,FilePad] = uigetfile('*.xls','Load phyphox data...');
|
||||
filename =[FilePad FNaam];
|
||||
PhyphoxData = xlsread(filename)
|
||||
|
||||
%load('Phyphoxdata.mat'); % loads accelerometer data, is stored in struct with name AccData
|
||||
%load('ExcelInfo.mat');
|
||||
%Participants = fields(AccData);
|
||||
%% Settings;
|
||||
%adapted from GaitOutcomesTrunkAccFuncIH
|
||||
|
||||
LegLength = 98; % LegLength info not available!
|
||||
FS = 100; % Sample frequency
|
||||
|
||||
Gr = 9.81; % Gravity acceleration, multiplication factor for accelerations
|
||||
StrideFreqEstimate = 1.00; % Used to set search for stride frequency from 0.5*StrideFreqEstimate until 2*StrideFreqEstimate
|
||||
StrideTimeRange = [0.2 4.0]; % Range to search for stride time (seconds)
|
||||
IgnoreMinMaxStrides = 0.10; % Number or percentage of highest&lowest values ignored for improved variability estimation
|
||||
N_Harm = 12; % Number of harmonics used for harmonic ratio, index of harmonicity and phase fluctuation
|
||||
LowFrequentPowerThresholds = ...
|
||||
[0.7 1.4]; % Threshold frequencies for estimation of low-frequent power percentages
|
||||
Lyap_m = 7; % Embedding dimension (used in Lyapunov estimations)
|
||||
Lyap_FitWinLen = round(60/100*FS); % Fitting window length (used in Lyapunov estimations Rosenstein's method)
|
||||
Sen_m = 5; % Dimension, the length of the subseries to be matched (used in sample entropy estimation)
|
||||
Sen_r = 0.3; % Tolerance, the maximum distance between two samples to qualify as match, relative to std of DataIn (used in sample entropy estimation)
|
||||
NStartEnd = [100];
|
||||
M = 5; % Maximum template length
|
||||
ResultStruct = struct(); % Empty struct
|
||||
|
||||
|
||||
inputData = (PhyphoxData(:,[1 2 3 4])); % matrix with accelerometer data
|
||||
ApplyRealignment = true;
|
||||
ApplyRemoveSteps = true;
|
||||
WindowLen = FS*10;
|
||||
|
||||
|
||||
%% Filter and Realign Accdata
|
||||
% dataAcc depends on ApplyRealignment = true/false
|
||||
% dataAcc_filt (low pass Butterworth Filter + zerophase filtering
|
||||
|
||||
[dataAcc, dataAcc_filt] = FilterandRealignFunc(inputData,FS,ApplyRealignment);
|
||||
|
||||
%% Step dectection
|
||||
% Determines the number of steps in the signal so that the first 30 and last 30 steps in the signal can be removed
|
||||
% StrideTimeSamples is needed for calculation stride parameters!
|
||||
|
||||
[dataAccCut,dataAccCut_filt,StrideTimeSamples] = StepDetectionFunc(FS,ApplyRemoveSteps,dataAcc,dataAcc_filt,StrideTimeRange);
|
||||
|
||||
%% Calculate Stride Parameters
|
||||
% Outcomeparameters: StrideRegularity,RelativeStrideVariability,StrideTimeSamples,StrideTime
|
||||
|
||||
[ResultStruct] = CalculateStrideParametersFunc(dataAccCut_filt,FS,ApplyRemoveSteps,dataAcc,dataAcc_filt,StrideTimeRange);
|
||||
|
||||
%% Calculate spatiotemporal stride parameters
|
||||
% Measures from height variation by double integration of VT accelerations and high-pass filtering
|
||||
% StepLengthMean; Distance; WalkingSpeedMean; StrideTimeVariability; StrideSpeedVariability;
|
||||
% StrideLengthVariability; StrideTimeVariabilityOmitOutlier; StrideSpeedVariabilityOmitOutlier; StrideLengthVariabilityOmitOutlier;
|
||||
|
||||
[ResultStruct] = SpatioTemporalGaitParameters(dataAccCut_filt,StrideTimeSamples,ApplyRealignment,LegLength,FS,IgnoreMinMaxStrides,ResultStruct);
|
||||
|
||||
%% Measures derived from spectral analysis
|
||||
% IndexHarmonicity_V/ML/AP/ALL ; HarmonicRatio_V/ML/AP ; HarmonicRatioP_V/ML/AP ; FrequencyVariability_V/ML/AP; Stride Frequency
|
||||
|
||||
AccVectorLen = sqrt(sum(dataAccCut_filt(:,1:3).^2,2));
|
||||
[ResultStruct] = SpectralAnalysisGaitfunc(dataAccCut_filt,WindowLen,FS,N_Harm,LowFrequentPowerThresholds,AccVectorLen,ResultStruct);
|
||||
|
||||
%% calculate non-linear parameters
|
||||
% Outcomeparameters: Sample Entropy, Lyapunov exponents
|
||||
|
||||
[ResultStruct] = CalculateNonLinearParametersFunc(ResultStruct,dataAccCut,WindowLen,FS,Lyap_m,Lyap_FitWinLen,Sen_m,Sen_r);
|
||||
|
||||
% Save struct as .mat file
|
||||
% save('GaitVarOutcomesParticipantX.mat', 'OutcomesAcc');
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
%% AggregateFunction (seperate analysis per minute);
|
||||
% see AggregateEpisodeValues.m
|
||||
%
|
||||
%
|
||||
|
||||
|
@ -1,6 +1,9 @@
|
||||
|
||||
function [ResultStruct] = HarmonicityFrequency(dataAccCut_filt,P,F, StrideFrequency,dF,LowFrequentPowerThresholds,N_Harm,FS,AccVectorLen,ResultStruct)
|
||||
|
||||
%LD 15-04-2021: Solve problem finding fundamental frequency in power
|
||||
%spectral density STRIDE - VS STEP frequency.
|
||||
|
||||
% Add sum of power spectra (as a rotation-invariant spectrum)
|
||||
P = [P,sum(P,2)];
|
||||
PS = sqrt(P);
|
||||
@ -8,6 +11,7 @@ PS = sqrt(P);
|
||||
% Calculate the measures for the power per separate dimension
|
||||
for i=1:size(P,2);
|
||||
% Relative cumulative power and frequencies that correspond to these cumulative powers
|
||||
|
||||
PCumRel = cumsum(P(:,i))/sum(P(:,i));
|
||||
PSCumRel = cumsum(PS(:,i))/sum(PS(:,i));
|
||||
FCumRel = F+0.5*dF;
|
||||
@ -21,12 +25,12 @@ for i=1:size(P,2);
|
||||
PHarm = zeros(N_Harm,1);
|
||||
PSHarm = zeros(N_Harm,1);
|
||||
for Harm = 1:N_Harm,
|
||||
FHarmRange = (Harm+[-0.1 0.1])*StrideFrequency;
|
||||
FHarmRange = (Harm+[-0.1 0.1])*(StrideFrequency); % In view of possible drift, which could lead to missing or widening peaks, the power spectral density of each peak was calculated within the frequency bands of +0.1 and −0.1 Hz of the peak frequency value.
|
||||
PHarm(Harm) = diff(interp1(FCumRel,PCumRel,FHarmRange));
|
||||
PSHarm(Harm) = diff(interp1(FCumRel,PSCumRel,FHarmRange));
|
||||
end
|
||||
|
||||
% Derive index of harmonicity
|
||||
% Derive index of harmonicity; which is the spectral power of the basic harmonic divided by the sum of the power of first six harmonics
|
||||
if i == 2 % for ML we expect odd instead of even harmonics
|
||||
IndexHarmonicity(i) = PHarm(1)/sum(PHarm(1:2:12));
|
||||
elseif i == 4
|
||||
@ -55,7 +59,8 @@ for i=1:size(P,2);
|
||||
FrequencyVariability(i) = nansum(StrideFreqFluctuation./(1:N_Harm)'.*PHarm)/nansum(PHarm);
|
||||
|
||||
if i<4,
|
||||
% Derive harmonic ratio (two variants)
|
||||
% Derive harmonic ratio (two variants) --> Ratio of the even and
|
||||
% odd harmonics!
|
||||
if i == 2 % for ML we expect odd instead of even harmonics
|
||||
HarmonicRatio(i) = sum(PSHarm(1:2:end-1))/sum(PSHarm(2:2:end)); % relative to summed 3d spectrum
|
||||
HarmonicRatioP(i) = sum(PHarm(1:2:end-1))/sum(PHarm(2:2:end)); % relative to own spectrum
|
||||
@ -70,14 +75,6 @@ end
|
||||
ResultStruct.IndexHarmonicity_V = IndexHarmonicity(1); % higher smoother more regular patter
|
||||
ResultStruct.IndexHarmonicity_ML = IndexHarmonicity(2); % higher smoother more regular patter
|
||||
ResultStruct.IndexHarmonicity_AP = IndexHarmonicity(3); % higher smoother more regular patter
|
||||
ResultStruct.IndexHarmonicity_All = IndexHarmonicity(4);
|
||||
ResultStruct.HarmonicRatio_V = HarmonicRatio(1);
|
||||
ResultStruct.HarmonicRatio_ML = HarmonicRatio(2);
|
||||
ResultStruct.HarmonicRatio_AP = HarmonicRatio(3);
|
||||
ResultStruct.HarmonicRatioP_V = HarmonicRatioP(1);
|
||||
ResultStruct.HarmonicRatioP_ML = HarmonicRatioP(2);
|
||||
ResultStruct.HarmonicRatioP_AP = HarmonicRatioP(3);
|
||||
ResultStruct.FrequencyVariability_V = FrequencyVariability(1);
|
||||
ResultStruct.FrequencyVariability_ML = FrequencyVariability(2);
|
||||
ResultStruct.FrequencyVariability_AP = FrequencyVariability(3);
|
||||
ResultStruct.StrideFrequency = StrideFrequency;
|
55
Hratio.m
Normal file
55
Hratio.m
Normal file
@ -0,0 +1,55 @@
|
||||
function [Hratio,wb]=Hratio(x,fmax,numfreq)
|
||||
% function [HIndex]=IndexOfHarmonicity(x,numfreq);
|
||||
%
|
||||
% INPUT, signal, fmax is max frequency that can be considered as main
|
||||
% frequency = around 1 for stride and around 2 for steps.
|
||||
% numfreq = number of superharmonics in analysis
|
||||
% OUTPUT: Harmoncity Index and mainfrequency
|
||||
% PLOT: with no output a plot is generated
|
||||
% CALLS: WndMainFreq.m
|
||||
% CL 2016
|
||||
|
||||
|
||||
if nargin<3 || isempty(numfreq), numfreq = 10; end
|
||||
|
||||
fs = 100;% sample frequency
|
||||
le=length(x);
|
||||
temp=[];
|
||||
n=10*length(x);
|
||||
ws=fix(length(x)/2);
|
||||
no=fix(0.99*ws);
|
||||
|
||||
[Px,fx]=spectrum2(x,n,no,boxcar(le),100,'mean');
|
||||
Px = Px/sum(Px); % All power spectral densities were normalized by dividing the power by the sum of the total power spectrum, which equals the variance.
|
||||
|
||||
% select frequency
|
||||
wb = WindMainFreq(x,100,fmax) ; % mainfrequency frequency
|
||||
wbr= wb + 0.055; % % basis mainfrequency + tail
|
||||
wbl = wb - 0.055;
|
||||
|
||||
wind=find(fx >=wbl & fx<= wbr); % index van basisfrequentie
|
||||
|
||||
PI_0=sum(Px(wind));
|
||||
|
||||
temp=PI_0;
|
||||
for k=1:numfreq;
|
||||
w=wb*(k+1);
|
||||
wr=w+0.055;
|
||||
wl=w-0.055;
|
||||
wInd=find(fx >=wl & fx<=wr);
|
||||
if nargout ==0; % controle
|
||||
plot(fx,Px,'r'); hold on;
|
||||
set(gca','XLim', [0 6],'FontSize', 8)
|
||||
xlabel('Frequency (Hz)'); ylabel('Power');
|
||||
plot(wb,max(Px),'r.','MarkerSize', 10),
|
||||
plot(wbr,0,'c.'); plot(wbl,0,'c.');
|
||||
plot(w,0,'r.','MarkerSize', 10)
|
||||
plot(wl,0,'c.'); plot(wr,0,'c.');
|
||||
end
|
||||
|
||||
PI=sum(Px(wInd));
|
||||
temp=[temp; PI];
|
||||
|
||||
end
|
||||
|
||||
Hratio = sum(temp(1:2:end-1))/sum(temp(2:2:end));
|
54
IndexOfHarmonicity.m
Normal file
54
IndexOfHarmonicity.m
Normal file
@ -0,0 +1,54 @@
|
||||
function [HIndex,wb]=IndexOfHarmonicity(x,fmax,numfreq)
|
||||
% function [HIndex]=IndexOfHarmonicity(x,numfreq);
|
||||
%
|
||||
% INPUT, signal, fmax is max frequency that can be considered as main
|
||||
% frequency = around 1 for stride and around 2 for steps.
|
||||
% numfreq = number of superharmonics in analysis
|
||||
% OUTPUT: Harmoncity Index and mainfrequency
|
||||
% PLOT: with no output a plot is generated
|
||||
% CALLS: WndMainFreq.m
|
||||
% CL 2016
|
||||
|
||||
|
||||
if nargin<3 || isempty(numfreq), numfreq = 10; end
|
||||
|
||||
fs = 100;% sample frequency
|
||||
le=length(x);
|
||||
temp=[];
|
||||
n=10*length(x);
|
||||
ws=fix(length(x)/2);
|
||||
no=fix(0.99*ws);
|
||||
|
||||
[Px,fx]=spectrum2(x,n,no,boxcar(le),100,'mean');
|
||||
Px = Px/sum(Px); % All power spectral densities were normalized by dividing the power by the sum of the total power spectrum, which equals the variance.
|
||||
|
||||
% select frequency
|
||||
wb = WindMainFreq(x,100,fmax) ; % mainfrequency frequency
|
||||
wbr= wb + 0.055; % % basis mainfrequency + tail
|
||||
wbl = wb - 0.055;
|
||||
|
||||
wind=find(fx >=wbl & fx<= wbr); % index van basisfrequentie
|
||||
|
||||
PI_0=sum(Px(wind));
|
||||
|
||||
temp=PI_0;
|
||||
for k=1:numfreq;
|
||||
w=wb*(k+1);
|
||||
wr=w+0.055;
|
||||
wl=w-0.055;
|
||||
wInd=find(fx >=wl & fx<=wr);
|
||||
if nargout ==0; % controle
|
||||
plot(fx,Px,'r'); hold on;
|
||||
set(gca','XLim', [0 6],'FontSize', 8)
|
||||
xlabel('Frequency (Hz)'); ylabel('Power');
|
||||
plot(wb,max(Px),'r.','MarkerSize', 10),
|
||||
plot(wbr,0,'c.'); plot(wbl,0,'c.');
|
||||
plot(w,0,'r.','MarkerSize', 10)
|
||||
plot(wl,0,'c.'); plot(wr,0,'c.');
|
||||
end
|
||||
|
||||
PI=sum(Px(wInd));
|
||||
temp=[temp; PI];
|
||||
|
||||
end
|
||||
HIndex=PI_0/sum(temp);
|
BIN
Main_GaitVariabilityAnalysis_LD.mlx
Normal file
BIN
Main_GaitVariabilityAnalysis_LD.mlx
Normal file
Binary file not shown.
@ -22,7 +22,7 @@ function [mutM,cummutM,minmuttauV] = MutualInformationHisPro(xV,tauV,bV,flag)
|
||||
% delays.
|
||||
% - cummutM : the vector of the cumulative mutual information values for
|
||||
% the given delays
|
||||
% - minmuttauV : the time of the first minimum of the mutual information.
|
||||
% - : the time of the first minimum of the mutual information.
|
||||
%========================================================================
|
||||
% <MutualInformationHisPro.m>, v 1.0 2010/02/11 22:09:14 Kugiumtzis & Tsimpiris
|
||||
% This is part of the MATS-Toolkit http://eeganalysis.web.auth.gr/
|
||||
|
BIN
Phyphoxdata.mat
BIN
Phyphoxdata.mat
Binary file not shown.
@ -1,17 +1,23 @@
|
||||
function [ResultStruct] = SpatioTemporalGaitParameters(dataAccCut_filt,StrideTimeSamples,ApplyRealignment,LegLength,FS,IgnoreMinMaxStrides,ResultStruct);
|
||||
%% Method Zijlstra & Hof
|
||||
% Mean step length and mean walking speed are estimated using the upward
|
||||
% and downward momvements of the trunk. Assuming a compass gait type, CoM
|
||||
% movements in the sagittal plane follow a circular trajectory during each
|
||||
% single support phase. In this inverted pendlum model, changes in height
|
||||
% of CoM depend on step length. Thus, when changes in height are known,
|
||||
% step length can be predicted from geometrical characteristics as in
|
||||
|
||||
Cutoff = 0.1;
|
||||
MinDist = floor(0.7*0.5*StrideTimeSamples); % Use StrideTimeSamples estimated above
|
||||
DatalFilt = dataAccCut_filt;
|
||||
|
||||
DatalFilt = dataAccCut_filt;
|
||||
% From acceleration to velocity
|
||||
Vel = cumsum(detrend(DatalFilt,'constant'))/FS;
|
||||
[B,A] = butter(2,Cutoff/(FS/2),'high');
|
||||
[B,A] = butter(2,Cutoff/(FS/2),'high'); % To avoid integration drift, position data were high-pass filtered
|
||||
Pos = cumsum(filtfilt(B,A,Vel))/FS;
|
||||
PosFilt = filtfilt(B,A,Pos);
|
||||
PosFiltVT = PosFilt(:,1);
|
||||
PosFiltVT = PosFilt(:,1); % Changes in vertical position were calculated by a double integration of Y.
|
||||
|
||||
% Find minima and maxima in vertical position
|
||||
|
||||
% if ~ApplyRealignment % Signals were not realigned, so it has to be done here
|
||||
% MeanAcc = mean(AccLoco);
|
||||
@ -28,7 +34,8 @@ if isempty(PosPks) && isempty(NegPks)
|
||||
else
|
||||
PksAndLocs = sortrows([PosPks,PosLocs,ones(size(PosPks)) ; NegPks,NegLocs,-ones(size(NegPks))], 2);
|
||||
end
|
||||
% Correct events for two consecutive maxima or two consecutive minima
|
||||
%% Correct events for two consecutive maxima or two consecutive minima
|
||||
|
||||
Events = PksAndLocs(:,2);
|
||||
NewEvents = PksAndLocs(:,2);
|
||||
Signs = PksAndLocs(:,3);
|
||||
@ -77,9 +84,9 @@ DH(DH>MaxDH) = MaxDH;
|
||||
% (Use delta h and delta t to calculate walking speed: use formula from
|
||||
% Z&H, but divide by 2 (skip factor 2)since we get the difference twice
|
||||
% each step, and multiply by 1.25 which is the factor suggested by Z&H)
|
||||
HalfStepLen = 1.25*sqrt(2*LegLength*DH-DH.^2);
|
||||
HalfStepLen = 1.25*sqrt(2*LegLength*DH-DH.^2); % Formulae is correct!
|
||||
Distance = sum(HalfStepLen);
|
||||
WalkingSpeedMean = Distance/(sum(DT)/FS);
|
||||
WalkingSpeedMean = Distance/(sum(DT)/FS); % Gait Speed (m/s) average walking speed
|
||||
% Estimate variabilities between strides
|
||||
StrideLengths = HalfStepLen(1:end-3) + HalfStepLen(2:end-2) + HalfStepLen(3:end-1) + HalfStepLen(4:end);
|
||||
StrideTimes = PksAndLocsCorrected(5:end,2)-PksAndLocsCorrected(1:end-4,2);
|
||||
@ -91,28 +98,25 @@ for i=1:4,
|
||||
WSS(i) = std(StrideSpeeds(i:4:end));
|
||||
end
|
||||
|
||||
StepLengthMean=mean(StrideLengths);
|
||||
StepLengthMean=mean(StrideLengths)/2; % 9-4-2021 LD, previous version with dividing by 2, THEN THIS SHOULD BE STRIDELENGTHMEAN!!
|
||||
StrideLengthMean = mean(StrideLengths);
|
||||
StrideTimeMean = mean(StrideTimes)/FS; % Samples to seconds
|
||||
StrideSpeedMean = mean(StrideSpeeds);
|
||||
|
||||
StrideTimeVariability = min(STS);
|
||||
StrideSpeedVariability = min(WSS);
|
||||
StrideLengthVariability = std(StrideLengths);
|
||||
% Estimate Stride time variability and stride speed variability by removing highest and lowest part
|
||||
if ~isinteger(IgnoreMinMaxStrides)
|
||||
IgnoreMinMaxStrides = ceil(IgnoreMinMaxStrides*size(StrideTimes,1));
|
||||
end
|
||||
StrideTimesSorted = sort(StrideTimes);
|
||||
StrideTimeVariabilityOmitOutlier = std(StrideTimesSorted(1+IgnoreMinMaxStrides:end-IgnoreMinMaxStrides));
|
||||
StrideSpeedSorted = sort(StrideSpeeds);
|
||||
StrideSpeedVariabilityOmitOutlier = std(StrideSpeedSorted(1+IgnoreMinMaxStrides:end-IgnoreMinMaxStrides));
|
||||
StrideLengthsSorted = sort(StrideLengths);
|
||||
StrideLengthVariabilityOmitOutlier = std(StrideLengthsSorted(1+IgnoreMinMaxStrides:end-IgnoreMinMaxStrides));
|
||||
|
||||
ResultStruct.StepLengthMean = StepLengthMean;
|
||||
ResultStruct.Distance = Distance;
|
||||
% LD 16-03-2021 - Calculate CoV as ''coefficient of relative variability''
|
||||
CoVStrideTime = (nanstd(StrideTimes)/nanmean(StrideTimes))*100;
|
||||
CoVStrideSpeed = (nanstd(StrideSpeeds)/nanmean(StrideSpeeds))*100;
|
||||
CoVStrideLength = (nanstd(StrideLengths)/nanmean(StrideLengths))*100;
|
||||
|
||||
% ResultStruct
|
||||
ResultStruct.WalkingSpeedMean = WalkingSpeedMean;
|
||||
ResultStruct.StrideTimeVariability = StrideTimeVariability;
|
||||
ResultStruct.StrideSpeedVariability = StrideSpeedVariability;
|
||||
ResultStruct.StrideLengthVariability = StrideLengthVariability;
|
||||
ResultStruct.StrideTimeVariabilityOmitOutlier = StrideTimeVariabilityOmitOutlier;
|
||||
ResultStruct.StrideSpeedVariabilityOmitOutlier = StrideSpeedVariabilityOmitOutlier;
|
||||
ResultStruct.StrideLengthVariabilityOmitOutlier = StrideLengthVariabilityOmitOutlier;
|
||||
|
||||
ResultStruct.StrideTimeMean = StrideTimeMean;
|
||||
ResultStruct.CoVStrideTime = CoVStrideTime;
|
||||
|
||||
ResultStruct.StrideLengthMean = StrideLengthMean;
|
||||
ResultStruct.CoVStrideLength = CoVStrideLength;
|
@ -2,14 +2,18 @@ function [ResultStruct] = SpectralAnalysisGaitfunc(dataAccCut_filt,WindowLen,FS,
|
||||
|
||||
P=zeros(0,size(dataAccCut_filt,2));
|
||||
|
||||
for i=1:size(dataAccCut_filt,2)
|
||||
for i=1:size(dataAccCut_filt,2) % pwelch = Welch’s power spectral density estimate
|
||||
[P1,~] = pwelch(dataAccCut_filt(:,i),hamming(WindowLen),[],WindowLen,FS);
|
||||
[P2,F] = pwelch(dataAccCut_filt(end:-1:1,i),hamming(WindowLen),[],WindowLen,FS);
|
||||
[P2,F] = pwelch(dataAccCut_filt(end:-1:1,i),hamming(WindowLen),[],WindowLen,FS); % data
|
||||
P(1:numel(P1),i) = (P1+P2)/2;
|
||||
end
|
||||
dF = F(2)-F(1);
|
||||
dF = F(2)-F(1); % frequencies
|
||||
|
||||
% Calculate stride frequency and peak widths
|
||||
[StrideFrequency, ~, PeakWidth, MeanNormalizedPeakWidth] = StrideFrequencyRispen(P,F);
|
||||
[ResultStruct] = HarmonicityFrequency(dataAccCut_filt, P,F, StrideFrequency,dF,LowFrequentPowerThresholds,N_Harm,FS,AccVectorLen,ResultStruct);
|
||||
|
||||
ResultStruct.IH_VT = IndexOfHarmonicity(dataAccCut_filt(:,1),(5*StrideFrequency),10); % expect fmax at step frequency (2* stridefrequency), what happens if you increase fmax (~ PSD)
|
||||
ResultStruct.IH_ML = IndexOfHarmonicity(dataAccCut_filt(:,2),(5*StrideFrequency),10); % expect fmax at stride frequency
|
||||
ResultStruct.IH_AP = IndexOfHarmonicity(dataAccCut_filt(:,3),(5*StrideFrequency),10); % expect fmax at step frequency (2* stridefrequency)
|
||||
end
|
122
StepcountFunc.m
122
StepcountFunc.m
@ -1,65 +1,83 @@
|
||||
function [PksAndLocsCorrected] = StepcountFunc(dataAcc_filt,StrideTimeSamples,FS);
|
||||
|
||||
% Step count funciton extracted from other function called:
|
||||
function [PksAndLocsCorrected] = StepcountFunc(dataAcc,StrideTimeSamples,FS);
|
||||
|
||||
Cutoff = 0.1;
|
||||
MinDist = floor(0.7*0.5*StrideTimeSamples); % Use StrideTimeSamples estimated above
|
||||
DataLFilt = dataAcc_filt;
|
||||
|
||||
% From acceleration to
|
||||
Vel = cumsum(detrend(DataLFilt,'constant'))/FS;
|
||||
[B,A] = butter(2,Cutoff/(FS/2),'high');
|
||||
Pos = cumsum(filtfilt(B,A,Vel))/FS;
|
||||
PosFilt = filtfilt(B,A,Pos);
|
||||
PosFiltVT = PosFilt(:,1);
|
||||
%% USE THE AP ACCELERATION: WIEBREN ZIJLSTRA: ASSESSMENT OF SPATIO-TEMPORAL PARAMTERS DURING UNCONSTRAINED WALKING (2004)
|
||||
|
||||
% Find minima and maxima in vertical position
|
||||
[PosPks,PosLocs] = findpeaks(PosFiltVT(:,1),'minpeakdistance',MinDist);
|
||||
[NegPks,NegLocs] = findpeaks(-PosFiltVT(:,1),'minpeakdistance',MinDist);
|
||||
[B,A] = butter(4,5/(FS/2),'low'); % According to article Lamoth: Multiple gait parameters derived from iPod accelerometry predict age-related gait changes
|
||||
dataAcc = filtfilt(B,A,dataAcc);
|
||||
APAcceleration = dataAcc(:,3);
|
||||
|
||||
% Find minima and maxima in AP acceleration signal
|
||||
[PosPks,PosLocs] = findpeaks(APAcceleration(:,1),'minpeakdistance',MinDist);
|
||||
[NegPks,NegLocs] = findpeaks(-APAcceleration(:,1),'minpeakdistance',MinDist);
|
||||
NegPks = -NegPks;
|
||||
if isempty(PosPks) && isempty(NegPks)
|
||||
PksAndLocs = zeros(0,3);
|
||||
else
|
||||
PksAndLocs = sortrows([PosPks,PosLocs,ones(size(PosPks)) ; NegPks,NegLocs,-ones(size(NegPks))], 2);
|
||||
end
|
||||
% Correct events for two consecutive maxima or two consecutive minima
|
||||
Events = PksAndLocs(:,2);
|
||||
NewEvents = PksAndLocs(:,2);
|
||||
Signs = PksAndLocs(:,3);
|
||||
FalseEventsIX = find(diff(Signs)==0);
|
||||
PksAndLocsToAdd = zeros(0,3);
|
||||
PksAndLocsToAddNr = 0;
|
||||
for i=1:numel(FalseEventsIX),
|
||||
FIX = FalseEventsIX(i);
|
||||
if FIX <= 2
|
||||
% remove the event
|
||||
NewEvents(FIX) = nan;
|
||||
elseif FIX >= numel(Events)-2
|
||||
% remove the next event
|
||||
NewEvents(FIX+1) = nan;
|
||||
else
|
||||
StrideTimesWhenAdding = [Events(FIX+1)-Events(FIX-2),Events(FIX+3)-Events(FIX)];
|
||||
StrideTimesWhenRemoving = Events(FIX+3)-Events(FIX-2);
|
||||
if max(abs(StrideTimesWhenAdding-StrideTimeSamples)) < abs(StrideTimesWhenRemoving-StrideTimeSamples)
|
||||
% add an event
|
||||
[M,IX] = min(Signs(FIX)*PosFiltVT((Events(FIX)+1):(Events(FIX+1)-1)));
|
||||
PksAndLocsToAddNr = PksAndLocsToAddNr+1;
|
||||
PksAndLocsToAdd(PksAndLocsToAddNr,:) = [M,Events(FIX)+IX,-Signs(FIX)];
|
||||
else
|
||||
% remove an event
|
||||
if FIX >= 5 && FIX <= numel(Events)-5
|
||||
ExpectedEvent = (Events(FIX-4)+Events(FIX+5))/2;
|
||||
else
|
||||
ExpectedEvent = (Events(FIX-2)+Events(FIX+3))/2;
|
||||
end
|
||||
if abs(Events(FIX)-ExpectedEvent) > abs(Events(FIX+1)-ExpectedEvent)
|
||||
NewEvents(FIX) = nan;
|
||||
else
|
||||
NewEvents(FIX+1) = nan;
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
PksAndLocsCorrected = sortrows([PksAndLocs(~isnan(NewEvents),:);PksAndLocsToAdd],2);
|
||||
|
||||
PksAndLocsCorrected = PosLocs;
|
||||
|
||||
end
|
||||
% DataLFilt = dataAcc_filt;
|
||||
|
||||
|
||||
% % From acceleration to
|
||||
% Vel = cumsum(detrend(DataLFilt,'constant'))/FS;
|
||||
% [B,A] = butter(2,Cutoff/(FS/2),'high');
|
||||
% Pos = cumsum(filtfilt(B,A,Vel))/FS;
|
||||
% PosFilt = filtfilt(B,A,Pos);
|
||||
% PosFiltVT = PosFilt(:,1);
|
||||
|
||||
% % Find minima and maxima in vertical position
|
||||
% [PosPks,PosLocs] = findpeaks(PosFiltVT(:,1),'minpeakdistance',MinDist);
|
||||
% [NegPks,NegLocs] = findpeaks(-PosFiltVT(:,1),'minpeakdistance',MinDist);
|
||||
% NegPks = -NegPks;
|
||||
% if isempty(PosPks) && isempty(NegPks)
|
||||
% PksAndLocs = zeros(0,3);
|
||||
% else
|
||||
% PksAndLocs = sortrows([PosPks,PosLocs,ones(size(PosPks)) ; NegPks,NegLocs,-ones(size(NegPks))], 2);
|
||||
% end
|
||||
|
||||
% % Correct events for two consecutive maxima or two consecutive minima
|
||||
% Events = PksAndLocs(:,2);
|
||||
% NewEvents = PksAndLocs(:,2);
|
||||
% Signs = PksAndLocs(:,3);
|
||||
% FalseEventsIX = find(diff(Signs)==0);
|
||||
% PksAndLocsToAdd = zeros(0,3);
|
||||
% PksAndLocsToAddNr = 0;
|
||||
% for i=1:numel(FalseEventsIX),
|
||||
% FIX = FalseEventsIX(i);
|
||||
% if FIX <= 2
|
||||
% % remove the event
|
||||
% NewEvents(FIX) = nan;
|
||||
% elseif FIX >= numel(Events)-2
|
||||
% % remove the next event
|
||||
% NewEvents(FIX+1) = nan;
|
||||
% else
|
||||
% StrideTimesWhenAdding = [Events(FIX+1)-Events(FIX-2),Events(FIX+3)-Events(FIX)];
|
||||
% StrideTimesWhenRemoving = Events(FIX+3)-Events(FIX-2);
|
||||
% if max(abs(StrideTimesWhenAdding-StrideTimeSamples)) < abs(StrideTimesWhenRemoving-StrideTimeSamples)
|
||||
% % add an event
|
||||
% [M,IX] = min(Signs(FIX)*APAcceleration((Events(FIX)+1):(Events(FIX+1)-1))); % APAcceleration used to be PosFiltVT
|
||||
% PksAndLocsToAddNr = PksAndLocsToAddNr+1;
|
||||
% PksAndLocsToAdd(PksAndLocsToAddNr,:) = [M,Events(FIX)+IX,-Signs(FIX)];
|
||||
% else
|
||||
% % remove an event
|
||||
% if FIX >= 5 && FIX <= numel(Events)-5
|
||||
% ExpectedEvent = (Events(FIX-4)+Events(FIX+5))/2;
|
||||
% else
|
||||
% ExpectedEvent = (Events(FIX-2)+Events(FIX+3))/2;
|
||||
% end
|
||||
% if abs(Events(FIX)-ExpectedEvent) > abs(Events(FIX+1)-ExpectedEvent)
|
||||
% NewEvents(FIX) = nan;
|
||||
% else
|
||||
% NewEvents(FIX+1) = nan;
|
||||
% end
|
||||
% end
|
||||
% end
|
||||
% end
|
||||
% PksAndLocsCorrected = sortrows([PksAndLocs(~isnan(NewEvents),:);PksAndLocsToAdd],2);
|
||||
|
@ -5,7 +5,8 @@ function [StrideFrequency, QualityInd, PeakWidth, MeanNormalizedPeakWidth] = Str
|
||||
% pwelch spectral densities
|
||||
%
|
||||
% Input:
|
||||
% AccXYZ: a three-dimensional time series with trunk accelerations
|
||||
% AccXYZ: a three-dimensional time series with trunk accelerations (is
|
||||
% for error)
|
||||
% FS: the sample frequency of the time series
|
||||
% StrideFreqGuess: a first guess of the stride frequency
|
||||
%
|
||||
@ -35,6 +36,15 @@ function [StrideFrequency, QualityInd, PeakWidth, MeanNormalizedPeakWidth] = Str
|
||||
|
||||
%% History
|
||||
% February 2013, version 1.1, adapted from StrideFrequencyFrom3dAcc
|
||||
%% Additional information
|
||||
% Stride frequency was estimated from the median of the modal
|
||||
% frequencies (half the modal frequency for VT and AP) for all three
|
||||
% directions. Whenever the median of the three frequencies fell
|
||||
% outside the range 0.6–1.2 Hz, it was replaced by one of the other
|
||||
% two frequencies, if available, within this range. Finally, if all modal
|
||||
% frequencies were within 10% of an integer multiple of the resulting
|
||||
% frequency, it was replaced by the mean of the three associated base
|
||||
% frequencies. frequencies.
|
||||
|
||||
%% Check input
|
||||
if size(AccXYZ,2) ~= 3
|
||||
@ -44,7 +54,7 @@ elseif size(AccXYZ,1) < 10*F
|
||||
end
|
||||
|
||||
|
||||
%% Get PSD
|
||||
%% Get PSD (Power Spectral Density)
|
||||
if numel(F) == 1, % Calculate the PSD from time series AccXYZ, F is the sample frequency
|
||||
AccFilt = detrend(AccXYZ,'constant'); % Detrend data to get rid of DC component in most of the specific windows
|
||||
LenPSD = 10*F;
|
||||
@ -57,13 +67,13 @@ elseif numel(F)==size(AccXYZ,1), % F are the frequencies of the power spectrum A
|
||||
Fwf = F;
|
||||
Pwf = AccXYZ;
|
||||
end
|
||||
Pwf(:,4) = sum(Pwf,2);
|
||||
Pwf(:,4) = sum(Pwf,2); % if input = F
|
||||
|
||||
|
||||
%% Estimate stride frequency
|
||||
% set parameters
|
||||
HarmNr = [2 1 2];
|
||||
CommonRange = [0.6 1.2];
|
||||
HarmNr = [2 1 2]; % AP / V --> Biphasic
|
||||
CommonRange = [0.6 1.2]; % Whenever the median of the three frequencies fell outside the range 0.6–1.2 Hz, it was replaced by one of the other two frequencies,
|
||||
% Get modal frequencies and the 'mean freq. of the peak'
|
||||
for i=1:4,
|
||||
MF1I = find([zeros(5,1);Pwf(6:end,i)]==max([zeros(5,1);Pwf(6:end,i)]),1);
|
||||
|
26
WindMainFreq.m
Normal file
26
WindMainFreq.m
Normal file
@ -0,0 +1,26 @@
|
||||
function Omega=WindMainFreq(x,fs,fmax);
|
||||
% function Omega=WindMainFreq(x,fs,fmax);
|
||||
%INPUT: x = signal; fs = sample frequency; fmax = max frequency for
|
||||
%mainfrequency
|
||||
% OUTPUT = mainfrequency Omega
|
||||
% CALSS: spectrum2.m
|
||||
% PLOT : with no outtput [] generates plot
|
||||
|
||||
if nargin<3 || isempty(fmax), fmax = 4; end
|
||||
n=10*length(x);
|
||||
ws=fix(length(x)/2);
|
||||
no=fix(0.99*ws);
|
||||
|
||||
[p,f]=spectrum2(x,n,no,hamming(ws),fs,'mean');
|
||||
p=p/sum(p);
|
||||
aa=find(f>0.5 & f<fmax);
|
||||
[a,b]=max(p(aa));
|
||||
Omega=f(b+aa(1)-1);
|
||||
if nargout ==0;
|
||||
ii=find(f(:,1)<=fmax);
|
||||
f=f(1:length(ii),:);
|
||||
p=p(1:length(ii),:);
|
||||
plot(f,p,'b');hold on;
|
||||
plot(Omega,max(p(aa)),'m.','MarkerSize', 15); %check w0
|
||||
end
|
||||
|
68
cross_sampen.m
Normal file
68
cross_sampen.m
Normal file
@ -0,0 +1,68 @@
|
||||
function [e,A,B]=cross_sampen(x,y,M,r,sflag);
|
||||
%function [e,A,B]=cross_sampen(x,y,M,r);
|
||||
%
|
||||
% The Cross-sample Entropy (Cross-SampEn) quantified the degree of synchronization between AP and ML, AP and V, and ML and V accelerations.
|
||||
% Cross-SampEn is the negative natural logarithm of the conditional probability that epochs with length m that match point-wise
|
||||
% in the two related signals, repeat itself for m+1 points, within a tolerance of r (in the present study m = 5 and r = 0.3)
|
||||
%Input
|
||||
%
|
||||
%x,y input data
|
||||
%M maximum template length
|
||||
%r matching tolerance
|
||||
%sflag flag to standardize signals(default yes/sflag=1)
|
||||
%
|
||||
%Output
|
||||
%
|
||||
%e sample entropy estimates for m=0,1,...,M-1
|
||||
%A number of matches for m=1,...,M
|
||||
%B number of matches for m=0,...,M-1 excluding last point
|
||||
|
||||
|
||||
if ~exist('sflag')|isempty(sflag),sflag=1;end
|
||||
y=y(:);
|
||||
x=x(:);
|
||||
ny=length(y);
|
||||
nx=length(x);
|
||||
N=nx;
|
||||
if sflag>0
|
||||
y=y-mean(y);
|
||||
sy=sqrt(mean(y.^2));
|
||||
y=y/sy;
|
||||
x=x-mean(x);
|
||||
sx=sqrt(mean(x.^2));
|
||||
x=x/sx;
|
||||
end
|
||||
|
||||
lastrun=zeros(nx,1);
|
||||
run=zeros(nx,1);
|
||||
A=zeros(M,1);
|
||||
B=zeros(M,1);
|
||||
p=zeros(M,1);
|
||||
e=zeros(M,1);
|
||||
|
||||
|
||||
for i=1:ny
|
||||
for j=1:nx
|
||||
if abs(x(j)-y(i))<r
|
||||
run(j)=lastrun(j)+1;
|
||||
M1=min(M,run(j));
|
||||
for m=1:M1
|
||||
A(m)=A(m)+1;
|
||||
if (i<ny)&(j<nx)
|
||||
B(m)=B(m)+1;
|
||||
end
|
||||
end
|
||||
else
|
||||
run(j)=0;
|
||||
end
|
||||
end
|
||||
for j=1:nx
|
||||
lastrun(j)=run(j);
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
N=ny*nx;
|
||||
B=[N;B(1:(M-1))];
|
||||
p=A./B;
|
||||
e=-log(p);
|
BIN
delta_NRPS_Gaitoutcomes.xlsx
Normal file
BIN
delta_NRPS_Gaitoutcomes.xlsx
Normal file
Binary file not shown.
File diff suppressed because one or more lines are too long
@ -49,7 +49,7 @@ function [SE] = funcSampleEntropy(DataIn, m, r)
|
||||
if size(DataIn,1) ~= 1 && size(DataIn,2) ~= 1
|
||||
error('DataIn must be a vector');
|
||||
end
|
||||
DataIn = DataIn(:)/std(DataIn(:));
|
||||
DataIn = DataIn(:)/std(DataIn(:)); % The data is first normalized to unit variance, rendering the outcome scale-independent.
|
||||
N = size(DataIn,1);
|
||||
if N-m <= 0
|
||||
error('m must be smaller than the length of the time series DataIn');
|
||||
@ -58,10 +58,18 @@ end
|
||||
%% Create the vectors Xm to be compared
|
||||
Xm = zeros(N-m,m);
|
||||
for i = 1:m,
|
||||
Xm(:,i) = DataIn(i:end-1-m+i,1);
|
||||
Xm(:,i) = DataIn(i:end-1-m+i,1); % (1) length = DataIn - m, so in this case [X-5,m] double
|
||||
end
|
||||
|
||||
%% Count the numbers of matches for Xm and Xmplusone
|
||||
% 1) Divide up your data into vectors of length m
|
||||
% 2) Count your like matches (Vectors are considered a match if all numbers in the vector fall within +- r (0.3)
|
||||
% 3) Calculate your conditional probability
|
||||
% 4) Divide up your data into vectors of length m+1
|
||||
% 5) Count your like matches
|
||||
% 6) Calculate your conditional probability
|
||||
% 7) Entropy is a ratio of conditional probabilities
|
||||
|
||||
CountXm = 0;
|
||||
CountXmplusone = 0;
|
||||
XmDist = nan(size(Xm));
|
||||
@ -69,11 +77,11 @@ for i = 1:N-m,
|
||||
for j=1:m,
|
||||
XmDist(:,j)=abs(Xm(:,j)-Xm(i,j));
|
||||
end
|
||||
IdXmi = find(max(XmDist,[],2)<=r);
|
||||
CountXm = CountXm + length(IdXmi) - 1;
|
||||
CountXmplusone = CountXmplusone + sum(abs(DataIn(IdXmi+m)-DataIn(i+m))<=r) - 1;
|
||||
IdXmi = find(max(XmDist,[],2)<=r); % (2)
|
||||
CountXm = CountXm + length(IdXmi) - 1; % (3)
|
||||
CountXmplusone = CountXmplusone + sum(abs(DataIn(IdXmi+m)-DataIn(i+m))<=r) - 1; % (6)
|
||||
end
|
||||
|
||||
%% Return sample entropy
|
||||
SE = -log(CountXmplusone/CountXm);
|
||||
SE = -log(CountXmplusone/CountXm); % (7) A srictly periodic time-series is completely predictable and will have a SEn of zero. SEn is defined as the negative natural logarithm of an estimate of the condiotional probability of epochs of length m (m=5) that match point-wise within a tolerance r and repeates itself for m+1 points.
|
||||
|
||||
|
104
matlab.sty
104
matlab.sty
@ -1,104 +0,0 @@
|
||||
\NeedsTeXFormat{LaTeX2e}
|
||||
\ProvidesPackage{matlab}
|
||||
|
||||
\RequirePackage{verbatim}
|
||||
\RequirePackage{fancyvrb}
|
||||
\RequirePackage{alltt}
|
||||
\RequirePackage{upquote}
|
||||
\RequirePackage[framemethod=tikz]{mdframed}
|
||||
\RequirePackage{hyperref}
|
||||
\RequirePackage{color}
|
||||
|
||||
|
||||
\newcommand{\maxwidth}[1]{\ifdim\linewidth>#1 #1\else\linewidth\fi}
|
||||
\newcommand{\mlcell}[1]{{\color{output}\verbatim@font#1}}
|
||||
|
||||
\definecolor{output}{gray}{0.4}
|
||||
|
||||
% Unicode character conversions
|
||||
\DeclareUnicodeCharacter{B0}{\ensuremath{^\circ}}
|
||||
\DeclareUnicodeCharacter{21B5}{\ensuremath{\hookleftarrow}}
|
||||
|
||||
% Paragraph indentation
|
||||
\setlength{\parindent}{0pt}
|
||||
|
||||
% Hyperlink style
|
||||
\hypersetup{
|
||||
colorlinks=true,
|
||||
linkcolor=blue,
|
||||
urlcolor=blue
|
||||
}
|
||||
|
||||
|
||||
% environment styles for MATLAB code and output
|
||||
\mdfdefinestyle{matlabcode}{%
|
||||
outerlinewidth=.5pt,
|
||||
linecolor=gray!20!white,
|
||||
roundcorner=2pt,
|
||||
innertopmargin=.5\baselineskip,
|
||||
innerbottommargin=.5\baselineskip,
|
||||
innerleftmargin=1em,
|
||||
backgroundcolor=gray!10!white
|
||||
}
|
||||
|
||||
\newenvironment{matlabcode}{\verbatim}{\endverbatim}
|
||||
\surroundwithmdframed[style=matlabcode]{matlabcode}
|
||||
|
||||
\newenvironment{matlaboutput}{%
|
||||
\Verbatim[xleftmargin=1.25em, formatcom=\color{output}]%
|
||||
}{\endVerbatim}
|
||||
|
||||
\newenvironment{matlabsymbolicoutput}{%
|
||||
\list{}{\leftmargin=1.25em\relax}%
|
||||
\item\relax%
|
||||
\color{output}\verbatim@font%
|
||||
}{\endlist}
|
||||
|
||||
\newenvironment{matlabtableoutput}[1]{%
|
||||
{\color{output}%
|
||||
\hspace*{1.25em}#1}%
|
||||
}{}
|
||||
|
||||
|
||||
% Table of Contents style
|
||||
\newcounter{multititle}
|
||||
\newcommand{\matlabmultipletitles}{\setcounter{multititle}{1}}
|
||||
|
||||
\newcounter{hastoc}
|
||||
\newcommand{\matlabhastoc}{\setcounter{hastoc}{1}}
|
||||
|
||||
\newcommand{\matlabtitle}[1]{
|
||||
\ifnum\value{multititle}>0
|
||||
\ifnum\value{hastoc}>0
|
||||
\addcontentsline{toc}{section}{#1}
|
||||
\fi
|
||||
\fi
|
||||
\section*{#1}
|
||||
}
|
||||
|
||||
\newcommand{\matlabheading}[1]{
|
||||
\ifnum\value{hastoc}>0
|
||||
\addcontentsline{toc}{subsection}{#1}
|
||||
\fi
|
||||
\subsection*{#1}
|
||||
}
|
||||
|
||||
\newcommand{\matlabheadingtwo}[1]{
|
||||
\ifnum\value{hastoc}>0
|
||||
\addcontentsline{toc}{subsubsection}{#1}
|
||||
\fi
|
||||
\subsubsection*{#1}
|
||||
}
|
||||
|
||||
\newcommand{\matlabheadingthree}[1]{
|
||||
\ifnum\value{hastoc}>0
|
||||
\addcontentsline{toc}{paragraph}{#1}
|
||||
\fi
|
||||
\paragraph*{#1}
|
||||
}
|
||||
|
||||
\newcommand{\matlabtableofcontents}[1]{
|
||||
\renewcommand{\contentsname}{#1}
|
||||
\tableofcontents
|
||||
}
|
||||
|
28
msentropy.m
Normal file
28
msentropy.m
Normal file
@ -0,0 +1,28 @@
|
||||
% Function for calculating multiscale entropy
|
||||
% input: signal
|
||||
% m: match point(s)
|
||||
% r: matching tolerance
|
||||
% factor: number of scale factor
|
||||
% sampenc is available at http://people.ece.cornell.edu/land/PROJECTS/Complexity/sampenc.m
|
||||
%
|
||||
% Multi-scale sample Entropy (Mscale-En) is an indicator of gait predictability. Multi-scale entropy takes the
|
||||
% complexity of a system into account by calculating the
|
||||
% predictability of a signal over time scales with increasing
|
||||
% length. A ‘coarse-graining’ process is applied to the acceleration signals; non-overlapping windows of data
|
||||
% points with an increasing length τ are constructed, with
|
||||
% τ representing the time scale with a tolerance of r (in the
|
||||
% present study τ = 7 and r = 0.2). A complete predictable
|
||||
% signal will adopt a Mscale-En value of 0 [29].
|
||||
|
||||
function e = msentropy(input,m,r,factor)
|
||||
|
||||
y=input;
|
||||
y=y-mean(y);
|
||||
y=y/std(y);
|
||||
|
||||
for i=1:factor
|
||||
s=coarsegraining(y,i); % dont have this function yet.
|
||||
sampe=sampenc(s,m+1,r);
|
||||
e(i)=sampe(m+1);
|
||||
end
|
||||
e=e';
|
@ -0,0 +1,2 @@
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<Info />
|
427
spectrum2.m
Normal file
427
spectrum2.m
Normal file
@ -0,0 +1,427 @@
|
||||
function [Spec,f,SpecConf] = spectrum2(varargin)
|
||||
%SPECTRUM Power spectrum estimate of one or two data sequences.
|
||||
% P=SPECTRUM2(X,NFFT,NOVERLAP,WIND) estimates the Power Spectral Density of
|
||||
% signal vector(s) X using Welch's averaged periodogram method. X must be a
|
||||
% set of column vectors. They are divided into overlapping sections, each of
|
||||
% which is detrended and windowed by the WINDOW parameter, then zero padded
|
||||
% to length NFFT. The magnitude squared of the length NFFT DFTs of the sec-
|
||||
% tions are averaged to form Pxx. P is a two times SIZE(X,2) column matrix
|
||||
% P = [Pxx Pxxc]; the second half of columns Pxxc are the 95% confidence
|
||||
% intervals. The number of rows of P is NFFT/2+1 for NFFT even, (NFFT+1)/2
|
||||
% for NFFT odd, or NFFT if the signal X is complex. If you specify a scalar
|
||||
% for WINDOW, a Hanning window of that length is used.
|
||||
%
|
||||
% [P,F] = SPECTRUM2(X,NFFT,NOVERLAP,WINDOW,Fs) given a sampling frequency
|
||||
% Fs returns a vector of frequencies the same length as Pxx at which the
|
||||
% PSD is estimated. PLOT(F,P(:,1:end/2)) plots the power spectrum estimate
|
||||
% versus true frequency.
|
||||
%
|
||||
% [P, F] = SPECTRUM2(X,NFFT,NOVERLAP,WINDOW,Fs,Pr) where Pr is a scalar
|
||||
% between 0 and 1, overrides the default 95% confidence interval and
|
||||
% returns the Pr*100% confidence interval for Pxx instead.
|
||||
%
|
||||
% SPECTRUM2(X) with no output arguments plots the PSD in the current
|
||||
% figure window, with confidence intervals.
|
||||
%
|
||||
% The default values for the parameters are NFFT = 256 (or SIZE(X,1),
|
||||
% whichever is smaller), NOVERLAP = 0, WINDOW = HANNING(NFFT), Fs = 2,
|
||||
% and Pr = .95. You can obtain a default parameter by leaving it out
|
||||
% or inserting an empty matrix [], e.g. SPECTRUM(X,[],128).
|
||||
%
|
||||
% P = SPECTRUM2(X,Y) performs spectral analysis of the two times SIZE(X,2)
|
||||
% sequences X and Y using the Welch method. SPECTRUM returns the 8 times
|
||||
% SIZE(X,2)column array
|
||||
% P = [Pxx Pyy Pxy Txy Cxy Pxxc Pyyc Pxyc]
|
||||
% where
|
||||
% Pxx = X-vector power spectral density
|
||||
% Pyy = Y-vector power spectral density
|
||||
% Pxy = Cross spectral density
|
||||
% Txy = Complex transfer function from X to Y = Pxy./Pxx
|
||||
% Cxy = Coherence function between X and Y = (abs(Pxy).^2)./(Pxx.*Pyy)
|
||||
% Pxxc,Pyyc,Pxyc = Confidence range.
|
||||
% All input and output options are otherwise exactly the same as for the
|
||||
% single input case.
|
||||
%
|
||||
% SPECTRUM2(X,Y) with no output arguments will plot Pxx, Pyy, abs(Txy),
|
||||
% angle(Txy) and Cxy in sequence, pausing between plots.
|
||||
%
|
||||
% SPECTRUM2(X,...,DFLAG), where DFLAG can be 'linear', 'mean' or 'none',
|
||||
% specifies a detrending mode for the prewindowed sections of X (and Y).
|
||||
% DFLAG can take the place of any parameter in the parameter list
|
||||
% (besides X) as long as it is last, e.g. SPECTRUM(X,'none');
|
||||
%
|
||||
% See also SPECTRUM, PSD, CSD, TFE, COHERE, SPECGRAM, SPECPLOT, DETREND,
|
||||
% PMTM, PMUSIC.
|
||||
% ETFE, SPA, and ARX in the Identification Toolbox.
|
||||
|
||||
% The units on the power spectra Pxx and Pyy are such that, using
|
||||
% Parseval's theorem:
|
||||
%
|
||||
% SUM(Pxx)/LENGTH(Pxx) = SUM(X.^2)/size(x,1) = COV(X)
|
||||
%
|
||||
% The RMS value of the signal is the square root of this.
|
||||
% If the input signal is in Volts as a function of time, then
|
||||
% the units on Pxx are Volts^2*seconds = Volt^2/Hz.
|
||||
%
|
||||
% Here are the covariance, RMS, and spectral amplitude values of
|
||||
% some common functions:
|
||||
% Function Cov=SUM(Pxx)/LENGTH(Pxx) RMS Pxx
|
||||
% a*sin(w*t) a^2/2 a/sqrt(2) a^2*LENGTH(Pxx)/4
|
||||
%Normal: a*rand(t) a^2 a a^2
|
||||
%Uniform: a*rand(t) a^2/12 a/sqrt(12) a^2/12
|
||||
%
|
||||
% For example, a pure sine wave with amplitude A has an RMS value
|
||||
% of A/sqrt(2), so A = SQRT(2*SUM(Pxx)/LENGTH(Pxx)).
|
||||
%
|
||||
% See Page 556, A.V. Oppenheim and R.W. Schafer, Digital Signal
|
||||
% Processing, Prentice-Hall, 1975.
|
||||
|
||||
error(nargchk(1,8,nargin))
|
||||
[msg,x,y,nfft,noverlap,window,Fs,p,dflag]=specchk2(varargin);
|
||||
error(msg)
|
||||
|
||||
|
||||
if isempty(p),
|
||||
p = .95; % default confidence interval even if not asked for
|
||||
end
|
||||
|
||||
n = size(x,1); % Number of data points
|
||||
ns = size(x,2); % Number of signals
|
||||
nwind = length(window);
|
||||
if n < nwind % zero-pad x (and y) if length less than the window length
|
||||
x(nwind,:)=0; n=nwind;
|
||||
if ~isempty(y), y(nwind)=0; end
|
||||
end
|
||||
|
||||
k = fix((n-noverlap)/(nwind-noverlap)); % Number of windows
|
||||
index = 1:nwind;
|
||||
KMU = k*norm(window)^2; % Normalizing scale factor ==> asymptotically unbiased
|
||||
% KMU = k*sum(window)^2;% alt. Nrmlzng scale factor ==> peaks are about right
|
||||
|
||||
if (isempty(y)) % Single sequence case.
|
||||
Pxx = zeros(nfft,ns); Pxx2 = zeros(nfft,ns);
|
||||
for i=1:k
|
||||
for l=1:ns
|
||||
if strcmp(dflag,'linear')
|
||||
xw = window.*detrend(x(index,l));
|
||||
elseif strcmp(dflag,'none')
|
||||
xw = window.*(x(index,l));
|
||||
else
|
||||
xw = window.*detrend(x(index,l),0);
|
||||
end
|
||||
Xx = abs(fft(xw,nfft)).^2;
|
||||
Pxx(:,l) = Pxx(:,l) + Xx;
|
||||
Pxx2(:,l) = Pxx2(:,l) + abs(Xx).^2;
|
||||
end
|
||||
index = index + (nwind - noverlap);
|
||||
end
|
||||
% Select first half
|
||||
if ~any(any(imag(x)~=0)), % if x and y are not complex
|
||||
if rem(nfft,2), % nfft odd
|
||||
select = [1:(nfft+1)/2];
|
||||
else
|
||||
select = [1:nfft/2+1]; % include DC AND Nyquist
|
||||
end
|
||||
else
|
||||
select = 1:nfft;
|
||||
end
|
||||
Pxx = Pxx(select,:);
|
||||
Pxx2 = Pxx2(select,:);
|
||||
cPxx = zeros(size(Pxx));
|
||||
if k > 1
|
||||
c = (k.*Pxx2-abs(Pxx).^2)./(k-1);
|
||||
c = max(c,zeros(size(Pxx)));
|
||||
cPxx = sqrt(c);
|
||||
end
|
||||
ff = sqrt(2)*erfinv(p); % Equal-tails.
|
||||
Pxxc = ff.*cPxx/KMU;
|
||||
P = Pxx/KMU;
|
||||
Pc = Pxxc;
|
||||
else
|
||||
Pxx = zeros(nfft,ns); % Dual sequence case.
|
||||
Pxy = Pxx; Pxx2 = Pxx; Pxy2 = Pxx;
|
||||
Pyy = zeros(nfft,1); Pyy2 = Pyy;
|
||||
for i=1:k
|
||||
if strcmp(dflag,'linear')
|
||||
yw = window.*detrend(y(index));
|
||||
elseif strcmp(dflag,'none')
|
||||
yw = window.*(y(index));
|
||||
else
|
||||
yw = window.*detrend(y(index),0);
|
||||
end
|
||||
Yy = fft(yw,nfft);
|
||||
Yy2 = abs(Yy).^2;
|
||||
Pyy = Pyy + Yy2;
|
||||
Pyy2 = Pyy2 + abs(Yy2).^2;
|
||||
for l=1:ns
|
||||
if strcmp(dflag,'linear')
|
||||
xw = window.*detrend(x(index,l));
|
||||
elseif strcmp(dflag,'none')
|
||||
xw = window.*(x(index,l));
|
||||
else
|
||||
xw = window.*detrend(x(index,l),0);
|
||||
end
|
||||
Xx = fft(xw,nfft);
|
||||
Xx2 = abs(Xx).^2;
|
||||
Pxx(:,l) = Pxx(:,l) + Xx2;
|
||||
Pxx2(:,l) = Pxx2(:,l) + abs(Xx2).^2;
|
||||
Xy = Yy .* conj(Xx);
|
||||
Pxy(:,l) = Pxy(:,l) + Xy;
|
||||
Pxy2(:,l) = Pxy2(:,l) + Xy .* conj(Xy);
|
||||
end
|
||||
index = index + (nwind - noverlap);
|
||||
end
|
||||
% Select first half
|
||||
if ~any(any(imag([x y])~=0)), % if x and y are not complex
|
||||
if rem(nfft,2), % nfft odd
|
||||
select = [1:(nfft+1)/2];
|
||||
else
|
||||
select = [1:nfft/2+1]; % include DC AND Nyquist
|
||||
end
|
||||
else
|
||||
select = 1:nfft;
|
||||
end
|
||||
Pxx = Pxx(select,:);
|
||||
Pxy = Pxy(select,:);
|
||||
Pxx2 = Pxx2(select,:);
|
||||
Pxy2 = Pxy2(select,:);
|
||||
Pyy = Pyy(select);
|
||||
Pyy2 = Pyy2(select);
|
||||
cPxx = zeros(size(Pxx));
|
||||
cPyy = zeros(size(Pyy));
|
||||
cPxy = cPxx;
|
||||
if k > 1
|
||||
c = max((k.*Pxx2-abs(Pxx).^2)./(k-1),zeros(size(Pxx)));
|
||||
cPxx = sqrt(c);
|
||||
c = max((k.*Pyy2-abs(Pyy).^2)./(k-1),zeros(size(Pyy)));
|
||||
cPyy = sqrt(c);
|
||||
c = max((k.*Pxy2-abs(Pxy).^2)./(k-1),zeros(size(Pxx)));
|
||||
cPxy = sqrt(c);
|
||||
end
|
||||
Txy = Pxy./Pxx;
|
||||
Cxy = (abs(Pxy).^2)./(Pxx.*repmat(Pyy,1,size(Pxx,2)));
|
||||
ff = sqrt(2)*erfinv(p); % Equal-tails.
|
||||
Pxx = Pxx/KMU;
|
||||
Pyy = Pyy/KMU;
|
||||
Pxy = Pxy/KMU;
|
||||
Pxxc = ff.*cPxx/KMU;
|
||||
Pxyc = ff.*cPxy/KMU;
|
||||
Pyyc = ff.*cPyy/KMU;
|
||||
P = [Pxx Pyy Pxy Txy Cxy];
|
||||
Pc = [Pxxc Pyyc Pxyc];
|
||||
end
|
||||
freq_vector = (select - 1)'*Fs/nfft;
|
||||
|
||||
if nargout == 0, % do plots
|
||||
newplot;
|
||||
if Fs==2, xl='Frequency'; else, xl = 'f [Hz]'; end
|
||||
nplot=1+(1-isempty(y))*4;
|
||||
|
||||
subplot(nplot,1,1);
|
||||
c = [max(Pxx-Pxxc,0) Pxx+Pxxc];
|
||||
c = c.*(c>0);
|
||||
|
||||
h=semilogy(freq_vector,Pxx,...
|
||||
freq_vector,c(:,1:size(c,2)/2),'--',...
|
||||
freq_vector,c(:,size(c,2)/2+1:end),'--');
|
||||
title('\bf X Power Spectral Density')
|
||||
ylabel('P_x')
|
||||
xlabel(xl)
|
||||
if length(h)>3
|
||||
s={};
|
||||
for k=1:length(h)/3
|
||||
c=get(h(k),'Color');
|
||||
set(h(k+length(h)/3),'Color',c);
|
||||
set(h(k+2*length(h)/3),'Color',c);
|
||||
s{k}=['x_' num2str(k)];
|
||||
end
|
||||
legend(h(1:length(h)/3),s);
|
||||
end
|
||||
if (isempty(y)), % single sequence case
|
||||
return
|
||||
end
|
||||
|
||||
subplot(nplot,1,2);
|
||||
c = [max(Pyy-Pyyc,0) Pyy+Pyyc];
|
||||
c = c.*(c>0);
|
||||
h=semilogy(freq_vector,Pyy,...
|
||||
freq_vector,c(:,1),'--',...
|
||||
freq_vector,c(:,2),'--');
|
||||
if size(Pxx,2)>1
|
||||
for k=1:length(h)/3
|
||||
c=get(h(k),'Color');
|
||||
set(h(k+length(h)/3),'Color',c);
|
||||
set(h(k+2*length(h)/3),'Color',c);
|
||||
end
|
||||
end
|
||||
|
||||
title('\bf Y Power Spectral Density')
|
||||
ylabel('P_y')
|
||||
xlabel(xl)
|
||||
|
||||
subplot(nplot,1,3);
|
||||
semilogy(freq_vector,abs(Txy));
|
||||
title('\bf Transfer function magnitude')
|
||||
ylabel('T_{xy}^{(m)}')
|
||||
xlabel(xl)
|
||||
|
||||
subplot(nplot,1,4);
|
||||
plot(freq_vector,(angle(Txy))), ...
|
||||
title('\bf Transfer function phase')
|
||||
ylabel('T_{xy}^{(p)}')
|
||||
xlabel(xl)
|
||||
|
||||
subplot(nplot,1,5);
|
||||
plot(freq_vector,Cxy);
|
||||
title('\bf Coherence')
|
||||
ylabel('C_{xy}')
|
||||
xlabel(xl)
|
||||
if exist ('niceaxes') ~= 0
|
||||
niceaxes(findall(gcf,'Type','axes'));
|
||||
end
|
||||
elseif nargout ==1,
|
||||
Spec = P;
|
||||
elseif nargout ==2,
|
||||
Spec = P;
|
||||
f = freq_vector;
|
||||
elseif nargout ==3,
|
||||
Spec = P;
|
||||
f = freq_vector;
|
||||
SpecConf = Pc;
|
||||
end
|
||||
|
||||
function [msg,x,y,nfft,noverlap,window,Fs,p,dflag] = specchk2(P)
|
||||
%SPECCHK Helper function for SPECTRUM
|
||||
% SPECCHK(P) takes the cell array P and uses each cell as
|
||||
% an input argument. Assumes P has between 1 and 7 elements.
|
||||
|
||||
msg = [];
|
||||
if size(P{1},1)<=1
|
||||
if max(size(P{1}))==1
|
||||
msg = 'Input data must be a vector, not a scalar.';
|
||||
else
|
||||
msg = 'Requires column vector input.';
|
||||
end
|
||||
x=[];
|
||||
y=[];
|
||||
elseif (length(P)>1),
|
||||
if (all(size(P{1},1)==size(P{2})) & (size(P{1},1)>1) ) | ...
|
||||
size(P{2},1)>1, % 0ne signal or 2 present?
|
||||
% two signals, x and y, present
|
||||
x = P{1}; y = P{2};
|
||||
% shift parameters one left
|
||||
P(1) = [];
|
||||
else
|
||||
% only one signal, x, present
|
||||
x = P{1}; y = [];
|
||||
end
|
||||
else % length(P) == 1
|
||||
% only one signal, x, present
|
||||
x = P{1}; y = [];
|
||||
end
|
||||
|
||||
% now x and y are defined; let's get the rest
|
||||
|
||||
if length(P) == 1
|
||||
nfft = min(size(x,1),256);
|
||||
window = hanning(nfft);
|
||||
noverlap = 0;
|
||||
Fs = 2;
|
||||
p = [];
|
||||
dflag = 'linear';
|
||||
elseif length(P) == 2
|
||||
if isempty(P{2}), dflag = 'linear'; nfft = min(size(x,1),256);
|
||||
elseif isstr(P{2}), dflag = P{2}; nfft = min(size(x,1),256);
|
||||
else dflag = 'linear'; nfft = P{2}; end
|
||||
window = hanning(nfft);
|
||||
noverlap = 0;
|
||||
Fs = 2;
|
||||
p = [];
|
||||
elseif length(P) == 3
|
||||
if isempty(P{2}), nfft = min(size(x,1),256); else nfft=P{2}; end
|
||||
if isempty(P{3}), dflag = 'linear'; noverlap = 0;
|
||||
elseif isstr(P{3}), dflag = P{3}; noverlap = 0;
|
||||
else dflag = 'linear'; noverlap = P{3}; end
|
||||
window = hanning(nfft);
|
||||
Fs = 2;
|
||||
p = [];
|
||||
elseif length(P) == 4
|
||||
if isempty(P{2}), nfft = min(size(x,1),256); else nfft=P{2}; end
|
||||
if isstr(P{4})
|
||||
dflag = P{4};
|
||||
window = hanning(nfft);
|
||||
else
|
||||
dflag = 'linear';
|
||||
window = P{4}; window = window(:); % force window to be a column
|
||||
if length(window) == 1, window = hanning(window); end
|
||||
if isempty(window), window = hanning(nfft); end
|
||||
end
|
||||
if isempty(P{3}), noverlap = 0; else noverlap=P{3}; end
|
||||
Fs = 2;
|
||||
p = [];
|
||||
elseif length(P) == 5
|
||||
if isempty(P{2}), nfft = min(size(x,1),256); else nfft=P{2}; end
|
||||
window = P{4}; window = window(:); % force window to be a column
|
||||
if length(window) == 1, window = hanning(window); end
|
||||
if isempty(window), window = hanning(nfft); end
|
||||
if isempty(P{3}), noverlap = 0; else noverlap=P{3}; end
|
||||
if isstr(P{5})
|
||||
dflag = P{5};
|
||||
Fs = 2;
|
||||
else
|
||||
dflag = 'linear';
|
||||
if isempty(P{5}), Fs = 2; else Fs = P{5}; end
|
||||
end
|
||||
p = [];
|
||||
elseif length(P) == 6
|
||||
if isempty(P{2}), nfft = min(size(x,1),256); else nfft=P{2}; end
|
||||
window = P{4}; window = window(:); % force window to be a column
|
||||
if length(window) == 1, window = hanning(window); end
|
||||
if isempty(window), window = hanning(nfft); end
|
||||
if isempty(P{3}), noverlap = 0; else noverlap=P{3}; end
|
||||
if isempty(P{5}), Fs = 2; else Fs = P{5}; end
|
||||
if isstr(P{6})
|
||||
dflag = P{6};
|
||||
p = [];
|
||||
else
|
||||
dflag = 'linear';
|
||||
if isempty(P{6}), p = .95; else p = P{6}; end
|
||||
end
|
||||
elseif length(P) == 7
|
||||
if isempty(P{2}), nfft = min(size(x,1),256); else nfft=P{2}; end
|
||||
window = P{4}; window = window(:); % force window to be a column
|
||||
if length(window) == 1, window = hanning(window); end
|
||||
if isempty(window), window = hanning(nfft); end
|
||||
if isempty(P{3}), noverlap = 0; else noverlap=P{3}; end
|
||||
if isempty(P{5}), Fs = 2; else Fs = P{5}; end
|
||||
if isempty(P{6}), p = .95; else p = P{6}; end
|
||||
if isstr(P{7})
|
||||
dflag = P{7};
|
||||
else
|
||||
msg = 'DFLAG parameter must be a string.'; return
|
||||
end
|
||||
end
|
||||
|
||||
% NOW do error checking
|
||||
if isempty(msg)
|
||||
if (nfft<length(window)),
|
||||
msg = 'Requires window''s length to be no greater than the FFT length.';
|
||||
end
|
||||
if (noverlap >= length(window)),
|
||||
msg = 'Requires NOVERLAP to be strictly less than the window length.';
|
||||
end
|
||||
if (nfft ~= abs(round(nfft)))|(noverlap ~= abs(round(noverlap))),
|
||||
msg = 'Requires positive integer values for NFFT and NOVERLAP.';
|
||||
end
|
||||
if ~isempty(p),
|
||||
if (prod(size(p))>1)|(p(1,1)>1)|(p(1,1)<0),
|
||||
msg = 'Requires confidence parameter to be a scalar between 0 and 1.';
|
||||
end
|
||||
end
|
||||
if (min(size(y))~=1)&(~isempty(y)),
|
||||
msg = 'Requires column vector input as second signal.';
|
||||
end
|
||||
if (size(x,1)~=length(y))&(~isempty(y)),
|
||||
msg = 'Requires X and Y to have the same number of rows.';
|
||||
end
|
||||
end
|
BIN
struct2csv - Snelkoppeling.lnk
Normal file
BIN
struct2csv - Snelkoppeling.lnk
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user