void divide_one_class_by_labels(ArrayList <double []> TrainingSample, double[][][] classData, double[] classLabel) {
int labelNum;
double [][][] classData = new double[labelNum][][];//K m num
//get each label vector count and attribu number
classData[K:labelNum] = new [k][attributeNum];
// labels all ready created before training. here just a 0..k ---> reshape labels.
classLabel[k]=...
}
// TRAINING FUNCTIONS //
bool DTW::train_(TimeSeriesClassificationData &data){
ArrayList <double []> TrainingSample;
ArrayList <double []> trainingData = TrainingSample;
double [][][] classData = null;//K m num
UINT bestIndex = 0;
//Assign, define a new function to get Num classes
int numClasses = divide_one_class_by_labels(TrainingSample,classData);
//Assign, define a new function to get Num classes
numTemplates = numClasses;
//Assign, define a new function to get Num attribute
numInputDimensions = TrainingSample.get(0).length;
//Assign, define a new function to set nullRejectionThresholds number
nullRejectionThresholds.resize( numClasses );
averageTemplateLength = 0;
//For each class, run a one-to-one DTW and find the template the best describes the data
for(UINT k=0; k<numTemplates; k++){
//Get the class label for the cth class
TimeSeriesClassificationData classData_k = classData( classLabel );
UINT numExamples = classData[k].size();
bestIndex = 0;
//Set the class label of this template
templatesBuffer[k].classLabel = classLabel;
//Set the kth class label
classLabels[k] = classLabel;
trainingLog << "Training Template: " << k << " Class: " << classLabel << std::endl;
//Check to make sure we actually have some training examples
if( numExamples < 1 ){
return false;
}
if( numExamples == 1 && useNullRejection ){
return false;
}
if( numExamples == 1 ){//If we have just one training example then we have to use it as the template
bestIndex = 0;
nullRejectionThresholds[k] = 0.0;//TODO-We need a better way of calculating this!
}else{
//Search for the best training example for this class
if( !train_NDDTW(classData_k,templatesBuffer[k],bestIndex) ){
return false;
}
}
//Add the template with the best index to the buffer
templatesBuffer[k].timeSeries = ;//classData_k[bestIndex].getData();
//Add the average length of the training examples for this template to the overall averageTemplateLength
averageTemplateLength += templatesBuffer[k].averageTemplateLength;
}
//Flag that the models have been trained
trained = true;
converged = true;
averageTemplateLength = averageTemplateLength/numTemplates;
//Recompute the null rejection thresholds
recomputeNullRejectionThresholds();
//Resize the prediction results to make sure it is setup for realtime prediction
continuousInputDataBuffer.clear();
continuousInputDataBuffer.resize(averageTemplateLength,VectorFloat(numInputDimensions,0));
classLikelihoods.resize(numTemplates,DEFAULT_NULL_LIKELIHOOD_VALUE);
classDistances.resize(numTemplates,0);
predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
//Training complete
return trained;
}
bool DTW::train_NDDTW(TimeSeriesClassificationData &trainingData,DTWTemplate &dtwTemplate,UINT &bestIndex){
UINT numExamples = trainingData.getNumSamples();
numExamples = trainingData.size();// length of D1
VectorFloat results(numExamples,0.0);
double[][] distanceResults = new double(numExamples,numExamples);
dtwTemplate.averageTemplateLength = 0;
for(UINT m=0; m<numExamples; m++){
double[] vectorA = new double[45];
double[][] templateA; //The m'th template
double[][] templateB; //The n'th template
dtwTemplate.averageTemplateLength += trainingData[m].getLength();
//Smooth the data if required
if( useSmoothing ) smoothData(trainingData[m].getData(),smoothingFactor,templateA);
else templateA = trainingData[m].getData();
vectorA = trainingData[m];// each element is a vector
templateA = reshape(vectorA)// turn 45 to [15][3]
for(UINT n=0; n<numExamples; n++){
if(m!=n){
else templateB = trainingData[n].getData();
vectorB = trainingData[n];// each element is a vector
templateB = reshape(vectorB)// turn 45 to [15][3]
//Compute the distance between the two time series
MatrixFloat distanceMatrix(templateA.getNumRows(),templateB.getNumRows());
Vector< IndexDist > warpPath;
Float dist = computeDistance(templateA,templateB,distanceMatrix,warpPath);
trainingLog << "Template: " << m << " Timeseries: " << n << " Dist: " << dist << std::endl;
//Update the results values
distanceResults[m][n] = dist;
results[m] += dist;
}else distanceResults[m][n] = 0; //The distance is zero because the two timeseries are the same
}
}
for(UINT m=0; m<numExamples; m++) results[m]/=(numExamples-1);
//Find the best average result, this is the result with the minimum value
bestIndex = 0;
Float bestAverage = results[0];
for(UINT m=1; m<numExamples; m++){
if( results[m] < bestAverage ){
bestAverage = results[m];
bestIndex = m;
}
}
if( numExamples > 2 ){
//Work out the threshold value for the best template
dtwTemplate.trainingMu = results[bestIndex];
dtwTemplate.trainingSigma = 0.0;
for(UINT n=0; n<numExamples; n++){
if(n!=bestIndex){
dtwTemplate.trainingSigma += SQR( distanceResults[ bestIndex ][n] - dtwTemplate.trainingMu );
}
}
dtwTemplate.trainingSigma = sqrt( dtwTemplate.trainingSigma / Float(numExamples-2) );
}else{
warningLog << __GRT_LOG__ << " There are not enough examples to compute the trainingMu and trainingSigma for the template for class " << dtwTemplate.classLabel << std::endl;
dtwTemplate.trainingMu = 0.0;
dtwTemplate.trainingSigma = 0.0;
}
//Set the average length of the training examples
dtwTemplate.averageTemplateLength = (UINT) (dtwTemplate.averageTemplateLength/Float(numExamples));
trainingLog << "AverageTemplateLength: " << dtwTemplate.averageTemplateLength << std::endl;
//Flag that the training was successfull
return true;
}
int labelNum;
double [][][] classData = new double[labelNum][][];//K m num
//get each label vector count and attribu number
classData[K:labelNum] = new [k][attributeNum];
// labels all ready created before training. here just a 0..k ---> reshape labels.
classLabel[k]=...
}
// TRAINING FUNCTIONS //
bool DTW::train_(TimeSeriesClassificationData &data){
ArrayList <double []> TrainingSample;
ArrayList <double []> trainingData = TrainingSample;
double [][][] classData = null;//K m num
UINT bestIndex = 0;
//Assign, define a new function to get Num classes
int numClasses = divide_one_class_by_labels(TrainingSample,classData);
//Assign, define a new function to get Num classes
numTemplates = numClasses;
//Assign, define a new function to get Num attribute
numInputDimensions = TrainingSample.get(0).length;
//Assign, define a new function to set nullRejectionThresholds number
nullRejectionThresholds.resize( numClasses );
averageTemplateLength = 0;
//For each class, run a one-to-one DTW and find the template the best describes the data
for(UINT k=0; k<numTemplates; k++){
//Get the class label for the cth class
TimeSeriesClassificationData classData_k = classData( classLabel );
UINT numExamples = classData[k].size();
bestIndex = 0;
//Set the class label of this template
templatesBuffer[k].classLabel = classLabel;
//Set the kth class label
classLabels[k] = classLabel;
trainingLog << "Training Template: " << k << " Class: " << classLabel << std::endl;
//Check to make sure we actually have some training examples
if( numExamples < 1 ){
return false;
}
if( numExamples == 1 && useNullRejection ){
return false;
}
if( numExamples == 1 ){//If we have just one training example then we have to use it as the template
bestIndex = 0;
nullRejectionThresholds[k] = 0.0;//TODO-We need a better way of calculating this!
}else{
//Search for the best training example for this class
if( !train_NDDTW(classData_k,templatesBuffer[k],bestIndex) ){
return false;
}
}
//Add the template with the best index to the buffer
templatesBuffer[k].timeSeries = ;//classData_k[bestIndex].getData();
//Add the average length of the training examples for this template to the overall averageTemplateLength
averageTemplateLength += templatesBuffer[k].averageTemplateLength;
}
//Flag that the models have been trained
trained = true;
converged = true;
averageTemplateLength = averageTemplateLength/numTemplates;
//Recompute the null rejection thresholds
recomputeNullRejectionThresholds();
//Resize the prediction results to make sure it is setup for realtime prediction
continuousInputDataBuffer.clear();
continuousInputDataBuffer.resize(averageTemplateLength,VectorFloat(numInputDimensions,0));
classLikelihoods.resize(numTemplates,DEFAULT_NULL_LIKELIHOOD_VALUE);
classDistances.resize(numTemplates,0);
predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
maxLikelihood = DEFAULT_NULL_LIKELIHOOD_VALUE;
//Training complete
return trained;
}
bool DTW::train_NDDTW(TimeSeriesClassificationData &trainingData,DTWTemplate &dtwTemplate,UINT &bestIndex){
UINT numExamples = trainingData.getNumSamples();
numExamples = trainingData.size();// length of D1
VectorFloat results(numExamples,0.0);
double[][] distanceResults = new double(numExamples,numExamples);
dtwTemplate.averageTemplateLength = 0;
for(UINT m=0; m<numExamples; m++){
double[] vectorA = new double[45];
double[][] templateA; //The m'th template
double[][] templateB; //The n'th template
dtwTemplate.averageTemplateLength += trainingData[m].getLength();
//Smooth the data if required
if( useSmoothing ) smoothData(trainingData[m].getData(),smoothingFactor,templateA);
else templateA = trainingData[m].getData();
vectorA = trainingData[m];// each element is a vector
templateA = reshape(vectorA)// turn 45 to [15][3]
for(UINT n=0; n<numExamples; n++){
if(m!=n){
else templateB = trainingData[n].getData();
vectorB = trainingData[n];// each element is a vector
templateB = reshape(vectorB)// turn 45 to [15][3]
//Compute the distance between the two time series
MatrixFloat distanceMatrix(templateA.getNumRows(),templateB.getNumRows());
Vector< IndexDist > warpPath;
Float dist = computeDistance(templateA,templateB,distanceMatrix,warpPath);
trainingLog << "Template: " << m << " Timeseries: " << n << " Dist: " << dist << std::endl;
//Update the results values
distanceResults[m][n] = dist;
results[m] += dist;
}else distanceResults[m][n] = 0; //The distance is zero because the two timeseries are the same
}
}
for(UINT m=0; m<numExamples; m++) results[m]/=(numExamples-1);
//Find the best average result, this is the result with the minimum value
bestIndex = 0;
Float bestAverage = results[0];
for(UINT m=1; m<numExamples; m++){
if( results[m] < bestAverage ){
bestAverage = results[m];
bestIndex = m;
}
}
if( numExamples > 2 ){
//Work out the threshold value for the best template
dtwTemplate.trainingMu = results[bestIndex];
dtwTemplate.trainingSigma = 0.0;
for(UINT n=0; n<numExamples; n++){
if(n!=bestIndex){
dtwTemplate.trainingSigma += SQR( distanceResults[ bestIndex ][n] - dtwTemplate.trainingMu );
}
}
dtwTemplate.trainingSigma = sqrt( dtwTemplate.trainingSigma / Float(numExamples-2) );
}else{
warningLog << __GRT_LOG__ << " There are not enough examples to compute the trainingMu and trainingSigma for the template for class " << dtwTemplate.classLabel << std::endl;
dtwTemplate.trainingMu = 0.0;
dtwTemplate.trainingSigma = 0.0;
}
//Set the average length of the training examples
dtwTemplate.averageTemplateLength = (UINT) (dtwTemplate.averageTemplateLength/Float(numExamples));
trainingLog << "AverageTemplateLength: " << dtwTemplate.averageTemplateLength << std::endl;
//Flag that the training was successfull
return true;
}