[GRASS-SVN] r63337 - in grass-addons/grass7/imagery/i.pr: PRLIB i.pr_blob i.pr_classify i.pr_features i.pr_features_additional i.pr_features_extract i.pr_features_selection i.pr_model i.pr_sites_aggregate i.pr_statistics i.pr_subsets i.pr_subsets/old i.pr_training i.pr_uxb

svn_grass at osgeo.org svn_grass at osgeo.org
Tue Dec 2 13:11:56 PST 2014


Author: zarch
Date: 2014-12-02 13:11:56 -0800 (Tue, 02 Dec 2014)
New Revision: 63337

Modified:
   grass-addons/grass7/imagery/i.pr/PRLIB/blob.c
   grass-addons/grass7/imagery/i.pr/PRLIB/bootstrap.c
   grass-addons/grass7/imagery/i.pr/PRLIB/dist.c
   grass-addons/grass7/imagery/i.pr/PRLIB/eigen.c
   grass-addons/grass7/imagery/i.pr/PRLIB/entropy.c
   grass-addons/grass7/imagery/i.pr/PRLIB/features.c
   grass-addons/grass7/imagery/i.pr/PRLIB/features_selection.c
   grass-addons/grass7/imagery/i.pr/PRLIB/getline.c
   grass-addons/grass7/imagery/i.pr/PRLIB/gm.c
   grass-addons/grass7/imagery/i.pr/PRLIB/integration.c
   grass-addons/grass7/imagery/i.pr/PRLIB/lu.c
   grass-addons/grass7/imagery/i.pr/PRLIB/matrix.c
   grass-addons/grass7/imagery/i.pr/PRLIB/min_quadratic.c
   grass-addons/grass7/imagery/i.pr/PRLIB/nn.c
   grass-addons/grass7/imagery/i.pr/PRLIB/open.c
   grass-addons/grass7/imagery/i.pr/PRLIB/pca.c
   grass-addons/grass7/imagery/i.pr/PRLIB/percent.c
   grass-addons/grass7/imagery/i.pr/PRLIB/random.c
   grass-addons/grass7/imagery/i.pr/PRLIB/read_models.c
   grass-addons/grass7/imagery/i.pr/PRLIB/soft_margin_boosting.c
   grass-addons/grass7/imagery/i.pr/PRLIB/sort.c
   grass-addons/grass7/imagery/i.pr/PRLIB/stats.c
   grass-addons/grass7/imagery/i.pr/PRLIB/svm.c
   grass-addons/grass7/imagery/i.pr/PRLIB/test.c
   grass-addons/grass7/imagery/i.pr/PRLIB/training.c
   grass-addons/grass7/imagery/i.pr/PRLIB/tree.c
   grass-addons/grass7/imagery/i.pr/PRLIB/write_matrix.c
   grass-addons/grass7/imagery/i.pr/i.pr_blob/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_classify/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_features/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_features_additional/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_features_extract/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_features_selection/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_model/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_sites_aggregate/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_statistics/cell.c
   grass-addons/grass7/imagery/i.pr/i.pr_statistics/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_subsets/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig.c
   grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig_2.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/conv.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/graphics.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/main.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/mouse.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/points.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/sites.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/title.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/view.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/write_map.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/zoom.c
   grass-addons/grass7/imagery/i.pr/i.pr_training/zoom2.c
   grass-addons/grass7/imagery/i.pr/i.pr_uxb/main.c
Log:
i.pr: Fix the missing code

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/blob.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/blob.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/blob.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,289 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   Blob and BlobSites structure management
+ */
+
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+#include <grass/gis.h>
+#include "global.h"
+
+static void add_points_to_blob();
+static int in_blob();
+
+void extract_sites_from_blob(Blob * blobs, int npoints, Blob * blobs,
+			     struct Cell_head *cellhd, BlobSites * sites,
+			     double **matrix)
+
+     /*extract geographical coordinates of the blob centers
+        and store results in a BlobSites structure, containing the minimum 
+        value of the blob too (computation based on matrix) */
+{
+    int index, j, intindex;
+    double barix, bariy;
+
+    index = 0;
+    for (j = 0; j < nblobs; j++) {
+	if (index < npoints) {
+	    barix = .0;
+	    bariy = .0;
+	    intindex = 0;
+	    sites[j].max = sites[j].min =
+		matrix[blobs[index].row][blobs[index].col];
+	    while (blobs[index].number == j) {
+		if (matrix[blobs[index].row][blobs[index].col] > sites[j].max)
+		    sites[j].max = matrix[blobs[index].row][blobs[index].col];
+		if (matrix[blobs[index].row][blobs[index].col] < sites[j].min)
+		    sites[j].min = matrix[blobs[index].row][blobs[index].col];
+		barix +=
+		    G_col_to_easting((double)blobs[index].col + .5, cellhd);
+		bariy +=
+		    G_row_to_northing((double)blobs[index].row + .5, cellhd);
+		index += 1;
+		intindex += 1;
+	    }
+	    sites[j].east = barix / intindex;
+	    sites[j].north = bariy / intindex;
+	    sites[j].n = intindex;
+	}
+    }
+}
+
+static int in_blob(int row, int col, Blob * blobs, int npoints)
+{
+
+    while (npoints > 0) {
+	if (blobs[npoints - 1].row == row)
+	    if (blobs[npoints - 1].col == col)
+		return TRUE;
+	npoints -= 1;
+    }
+    return FALSE;
+}
+
+void find_blob(double **matrix, int r, int c, Blob ** blobs, int *npoints,
+	       int *nblobs, double tm, double tM)
+
+     /*find blobs within a matrix and add to structure blob. A blob is
+        a set of contiguous cells of a matrix, all of them with value <=
+        tM and >= tm.  npoints is just a counter of cells belonging to the blob.
+        nblobs is the total number of blobs. */
+{
+    int i, j;
+
+    for (i = 0; i < r; i++) {
+	for (j = 0; j < c; j++) {
+	    if ((matrix[i][j] <= tM) && (matrix[i][j] >= tm)) {
+		if (!in_blob(i, j, *blobs, *npoints)) {
+		    if (*npoints == 0)
+			*blobs =
+			    (Blob *) G_calloc(*npoints + 1, sizeof(Blob));
+		    else
+			*blobs =
+			    (Blob *) G_realloc(*blobs,
+					       (*npoints + 1) * sizeof(Blob));
+		    (*blobs)[*npoints].row = i;
+		    (*blobs)[*npoints].col = j;
+		    (*blobs)[*npoints].number = *nblobs;
+		    (*npoints) += 1;
+		    add_points_to_blob(&blobs, npoints, *nblobs, matrix, r, c,
+				       i, j, tm, tM);
+		    (*nblobs) += 1;
+		}
+	    }
+	}
+    }
+}
+
+static void add_points_to_blob(Blob *** blobs, int *npoints, Blob *** blobs,
+			       double **matrix, int r, int c, int i, int j,
+			       double tm, double tM)
+{
+    int s;
+    int points_in_blob;
+    int *row, *col;
+
+    points_in_blob = 0;
+    row = (int *)G_calloc(points_in_blob + 1, sizeof(int));
+    col = (int *)G_calloc(points_in_blob + 1, sizeof(int));
+
+    row[points_in_blob] = i;
+    col[points_in_blob] = j;
+
+    for (s = 0; s <= points_in_blob; s++) {
+	if (row[s] > 0 && row[s] < r && col[s] > 0 && col[s] < c) {
+	    if ((matrix[row[s] - 1][col[s] - 1] <= tM) &&
+		(matrix[row[s] - 1][col[s] - 1] >= tm)) {
+		if (!in_blob(row[s] - 1, col[s] - 1, **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s] - 1;
+		    (**blobs)[*npoints].col = col[s] - 1;
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s] - 1;
+		    col[points_in_blob] = col[s] - 1;
+		}
+	    }
+	    if ((matrix[row[s] - 1][col[s]] <= tM) &&
+		(matrix[row[s] - 1][col[s]] >= tm)) {
+		if (!in_blob(row[s] - 1, col[s], **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s] - 1;
+		    (**blobs)[*npoints].col = col[s];
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s] - 1;
+		    col[points_in_blob] = col[s];
+		}
+	    }
+	    if ((matrix[row[s] - 1][col[s] + 1] <= tM) &&
+		(matrix[row[s] - 1][col[s] + 1] >= tm)) {
+		if (!in_blob(row[s] - 1, col[s] + 1, **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s] - 1;
+		    (**blobs)[*npoints].col = col[s] + 1;
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s] - 1;
+		    col[points_in_blob] = col[s] + 1;
+		}
+	    }
+	    if ((matrix[row[s]][col[s] - 1] <= tM) &&
+		(matrix[row[s]][col[s] - 1] >= tm)) {
+		if (!in_blob(row[s], col[s] - 1, **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s];
+		    (**blobs)[*npoints].col = col[s] - 1;
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s];
+		    col[points_in_blob] = col[s] - 1;
+		}
+	    }
+	    if ((matrix[row[s]][col[s] + 1] <= tM) &&
+		(matrix[row[s]][col[s] + 1] >= tm)) {
+		if (!in_blob(row[s], col[s] + 1, **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s];
+		    (**blobs)[*npoints].col = col[s] + 1;
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s];
+		    col[points_in_blob] = col[s] + 1;
+		}
+	    }
+	    if ((matrix[row[s] + 1][col[s] - 1] <= tM) &&
+		(matrix[row[s] + 1][col[s] - 1] >= tm)) {
+		if (!in_blob(row[s] + 1, col[s] - 1, **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s] + 1;
+		    (**blobs)[*npoints].col = col[s] - 1;
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s] + 1;
+		    col[points_in_blob] = col[s] - 1;
+		}
+	    }
+	    if ((matrix[row[s] + 1][col[s]] <= tM) &&
+		(matrix[row[s] + 1][col[s]] >= tm)) {
+		if (!in_blob(row[s] + 1, col[s], **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s] + 1;
+		    (**blobs)[*npoints].col = col[s];
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s] + 1;
+		    col[points_in_blob] = col[s];
+		}
+	    }
+	    if ((matrix[row[s] + 1][col[s] + 1] <= tM) &&
+		(matrix[row[s] + 1][col[s] + 1] >= tm)) {
+		if (!in_blob(row[s] + 1, col[s] + 1, **blobs, *npoints)) {
+		    **blobs =
+			(Blob *) G_realloc(**blobs,
+					   (*npoints + 1) * sizeof(Blob));
+		    (**blobs)[*npoints].row = row[s] + 1;
+		    (**blobs)[*npoints].col = col[s] + 1;
+		    (**blobs)[*npoints].number = nblobs;
+		    *npoints += 1;
+		    points_in_blob += 1;
+		    row =
+			(int *)G_realloc(row,
+					 (points_in_blob + 1) * sizeof(int));
+		    col =
+			(int *)G_realloc(col,
+					 (points_in_blob + 1) * sizeof(int));
+		    row[points_in_blob] = row[s] + 1;
+		    col[points_in_blob] = col[s] + 1;
+		}
+	    }
+	}
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/bootstrap.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/bootstrap.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/bootstrap.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,222 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   bootstrap, probabily based, samples estraction
+ */
+
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+#include <grass/gis.h>
+#include "global.h"
+
+void Bootsamples(n, prob, random_labels)
+     /*
+        given an array of probabilities of length n, extract a bootstrap sample
+        of n elements according to the vector of probabilities
+      */
+     int n;
+     double *prob;
+     int *random_labels;
+
+{
+    int i, j;
+    int *random_labels_flag;
+    double *random;
+    double *cumprob;
+    double probtot = .0;
+
+    for (i = 0; i < n; i++)
+	probtot += prob[i];
+    for (i = 0; i < n; i++)
+	prob[i] /= probtot;
+
+    random_labels_flag = (int *)G_calloc(n, sizeof(int));
+    random = (double *)G_calloc(n, sizeof(double));
+    cumprob = (double *)G_calloc(n, sizeof(double));
+
+    for (i = 0; i < n; ++i) {
+	random[i] = (double)drand48();
+	random_labels[i] = n - 1;
+	random_labels_flag[i] = 0;
+    }
+
+
+    for (i = 0; i < n; i++) {
+	if (i > 0)
+	    cumprob[i] = cumprob[i - 1] + prob[i];
+	else
+	    cumprob[0] = prob[0];
+
+	for (j = 0; j < n; j++) {
+
+	    if (random[j] < cumprob[i])
+		if (random_labels_flag[j] == 0) {
+		    random_labels[j] = i;
+		    random_labels_flag[j] = 1;
+		}
+	}
+    }
+
+    G_free(random);
+    G_free(cumprob);
+    G_free(random_labels_flag);
+}
+
+
+void Bootsamples_rseed(int n, double *prob, int *random_labels, int *idum)
+
+     /*
+        given an array of probabilities of length n, extract a bootstrap sample
+        of n elements according to the vector of probabilities
+      */
+{
+    int i, j;
+    int *random_labels_flag;
+    double *random;
+    double *cumprob;
+    double probtot = .0;
+
+    for (i = 0; i < n; i++)
+	probtot += prob[i];
+    for (i = 0; i < n; i++)
+	prob[i] /= probtot;
+
+    random_labels_flag = (int *)G_calloc(n, sizeof(int));
+    random = (double *)G_calloc(n, sizeof(double));
+    cumprob = (double *)G_calloc(n, sizeof(double));
+
+    for (i = 0; i < n; ++i) {
+	random[i] = (double)ran1(idum);
+	random_labels[i] = n - 1;
+	random_labels_flag[i] = 0;
+    }
+
+
+    for (i = 0; i < n; i++) {
+	if (i > 0)
+	    cumprob[i] = cumprob[i - 1] + prob[i];
+	else
+	    cumprob[0] = prob[0];
+
+	for (j = 0; j < n; j++) {
+
+	    if (random[j] < cumprob[i])
+		if (random_labels_flag[j] == 0) {
+		    random_labels[j] = i;
+		    random_labels_flag[j] = 1;
+		}
+	}
+    }
+
+    G_free(random);
+    G_free(cumprob);
+    G_free(random_labels_flag);
+}
+
+void Bootsamples(int n, double *prob, int *random_labels)
+
+     /*
+        given an array of probabilities of length n, extract a bootstrap sample
+        of n elements according to the vector of probabilities
+      */
+{
+    int i, j;
+    int *random_labels_flag;
+    double *random;
+    double *cumprob;
+    double probtot = .0;
+
+    for (i = 0; i < n; i++)
+	probtot += prob[i];
+    for (i = 0; i < n; i++)
+	prob[i] /= probtot;
+
+    random_labels_flag = (int *)G_calloc(n, sizeof(int));
+    random = (double *)G_calloc(n, sizeof(double));
+    cumprob = (double *)G_calloc(n, sizeof(double));
+
+    for (i = 0; i < n; ++i) {
+	random[i] = (double)drand48();
+	random_labels[i] = n - 1;
+	random_labels_flag[i] = 0;
+    }
+
+
+    for (i = 0; i < n; i++) {
+	if (i > 0)
+	    cumprob[i] = cumprob[i - 1] + prob[i];
+	else
+	    cumprob[0] = prob[0];
+
+	for (j = 0; j < n; j++) {
+
+	    if (random[j] < cumprob[i])
+		if (random_labels_flag[j] == 0) {
+		    random_labels[j] = i;
+		    random_labels_flag[j] = 1;
+		}
+	}
+    }
+
+    G_free(random);
+    G_free(cumprob);
+    G_free(random_labels_flag);
+}
+
+
+void Bootsamples_rseed(n, prob, random_labels, idum)
+     /*
+        given an array of probabilities of length n, extract a bootstrap sample
+        of n elements according to the vector of probabilities
+      */
+     int n;
+     double *prob;
+     int *random_labels;
+     int *idum;
+
+{
+    int i, j;
+    int *random_labels_flag;
+    double *random;
+    double *cumprob;
+    double probtot = .0;
+
+    for (i = 0; i < n; i++)
+	probtot += prob[i];
+    for (i = 0; i < n; i++)
+	prob[i] /= probtot;
+
+    random_labels_flag = (int *)G_calloc(n, sizeof(int));
+    random = (double *)G_calloc(n, sizeof(double));
+    cumprob = (double *)G_calloc(n, sizeof(double));
+
+    for (i = 0; i < n; ++i) {
+	random[i] = (double)ran1(idum);
+	random_labels[i] = n - 1;
+	random_labels_flag[i] = 0;
+    }
+
+
+    for (i = 0; i < n; i++) {
+	if (i > 0)
+	    cumprob[i] = cumprob[i - 1] + prob[i];
+	else
+	    cumprob[0] = prob[0];
+
+	for (j = 0; j < n; j++) {
+
+	    if (random[j] < cumprob[i])
+		if (random_labels_flag[j] == 0) {
+		    random_labels[j] = i;
+		    random_labels_flag[j] = 1;
+		}
+	}
+    }
+
+    G_free(random);
+    G_free(cumprob);
+    G_free(random_labels_flag);
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/dist.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/dist.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/dist.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,77 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   Distance between arrays computation
+ */
+
+#include <math.h>
+#include "global.h"
+
+
+double squared_distance(double *x, double *y, int n)
+
+     /*
+        squared euclidean distance between vectors x and y of length n
+      */
+{
+    int j;
+    double out = 0.0;
+    double tmp;
+
+
+    for (j = 0; j < n; j++) {
+	tmp = x[j] - y[j];
+	out += tmp * tmp;
+    }
+
+
+    return out;
+}
+
+double euclidean_distance(double *x, double *y, int n)
+
+     /*
+        euclidean distance between vectors x and y of length n
+      */
+{
+    int j;
+    double out = 0.0;
+    double tmp;
+
+
+    for (j = 0; j < n; j++) {
+	tmp = x[j] - y[j];
+	out += tmp * tmp;
+    }
+
+
+    return sqrt(out);
+}
+
+
+double scalar_product(double *x, double *y, int n)
+
+     /*
+        scalar product between vector x and y of length n
+      */
+{
+    double out;
+    int i;
+
+    out = 0.0;
+    for (i = 0; i < n; i++)
+	out += x[i] * y[i];
+
+    return out;
+}
+
+double euclidean_norm(double *x, int n)
+
+     /*
+        euclidean norm of a  vector x of length n
+      */
+{
+    return sqrt(scalar_product(x, x, n));
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/eigen.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/eigen.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/eigen.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,237 @@
+/*
+   Same of the following routines are borrowed from "Numerical Recipes in C"
+   other are written and tested by Stefano Merler
+
+   for
+
+   Eigenvalues and eigenvectors of simmetric matrices compution
+   and management
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <grass/gis.h>
+
+
+#define MAX_ITERS 30
+#define SIGN(a,b) ((b)<0 ? -fabs(a) : fabs(a))
+
+
+
+void tred2(double **a, int n, double d[], double e[])
+
+     /*
+        Houserholder method for reduction of simmetric n x n matrix a to 
+        tridiagonal form: on output a contains the ortogonal transformation
+        matrix, d contains the diagonal elements, and e the off-diagonal.
+      */
+{
+    int l, k, j, i;
+    double scale, hh, h, g, f;
+
+    for (i = n - 1; i >= 1; i--) {
+	l = i - 1;
+	h = scale = 0.0;
+	if (l > 0) {
+	    for (k = 0; k <= l; k++)
+		scale += fabs(a[i][k]);
+	    if (scale == 0.0)
+		e[i] = a[i][l];
+	    else {
+		for (k = 0; k <= l; k++) {
+		    a[i][k] /= scale;
+		    h += a[i][k] * a[i][k];
+		}
+		f = a[i][l];
+		g = f > 0 ? -sqrt(h) : sqrt(h);
+		e[i] = scale * g;
+		h -= f * g;
+		a[i][l] = f - g;
+		f = 0.0;
+		for (j = 0; j <= l; j++) {
+		    /* Next statement can be omitted if eigenvectors not wanted */
+		    a[j][i] = a[i][j] / h;
+		    g = 0.0;
+		    for (k = 0; k <= j; k++)
+			g += a[j][k] * a[i][k];
+		    for (k = j + 1; k <= l; k++)
+			g += a[k][j] * a[i][k];
+		    e[j] = g / h;
+		    f += e[j] * a[i][j];
+		}
+		hh = f / (h + h);
+		for (j = 0; j <= l; j++) {
+		    f = a[i][j];
+		    e[j] = g = e[j] - hh * f;
+		    for (k = 0; k <= j; k++)
+			a[j][k] -= (f * e[k] + g * a[i][k]);
+		}
+	    }
+	}
+	else
+	    e[i] = a[i][l];
+	d[i] = h;
+    }
+    /* Next statement can be omitted if eigenvectors not wanted */
+    d[0] = 0.0;
+    e[0] = 0.0;
+    /* Contents of this loop can be omitted if eigenvectors not
+       wanted except for statement d[i]=a[i][i]; */
+    for (i = 0; i < n; i++) {
+	l = i - 1;
+	if (d[i]) {
+	    for (j = 0; j <= l; j++) {
+		g = 0.0;
+		for (k = 0; k <= l; k++)
+		    g += a[i][k] * a[k][j];
+		for (k = 0; k <= l; k++)
+		    a[k][j] -= g * a[k][i];
+	    }
+	}
+	d[i] = a[i][i];
+	a[i][i] = 1.0;
+	for (j = 0; j <= l; j++)
+	    a[j][i] = a[i][j] = 0.0;
+    }
+}
+
+
+int tqli(double d[], double e[], int n, double **z)
+
+     /*
+        QL algorithm: compute eigenvalues and eigenvectors 
+        of simmetric tridiagonal matrix. On input, d diagonal 
+        and e off-diagonal elements of the matrix (usually the 
+        output of tred2), z the matrix output of tred2.
+        On output d eigenvalues, z eigenvectors.
+      */
+{
+    int m, l, iter, i, k;
+    double s, r, p, g, f, dd, c, b;
+
+    for (i = 1; i < n; i++)
+	e[i - 1] = e[i];
+    e[n - 1] = 0.0;
+    for (l = 0; l < n; l++) {
+	iter = 0;
+	do {
+	    for (m = l; m < n - 1; m++) {
+		dd = fabs(d[m]) + fabs(d[m + 1]);
+		if (fabs(e[m]) + dd == dd)
+		    break;
+	    }
+	    if (m != l) {
+		if (iter++ == MAX_ITERS)
+		    return 0;	/* Too many iterations in TQLI */
+		g = (d[l + 1] - d[l]) / (2.0 * e[l]);
+		r = sqrt((g * g) + 1.0);
+		g = d[m] - d[l] + e[l] / (g + SIGN(r, g));
+		s = c = 1.0;
+		p = 0.0;
+		for (i = m - 1; i >= l; i--) {
+		    f = s * e[i];
+		    b = c * e[i];
+		    if (fabs(f) >= fabs(g)) {
+			c = g / f;
+			r = sqrt((c * c) + 1.0);
+			e[i + 1] = f * r;
+			c *= (s = 1.0 / r);
+		    }
+		    else {
+			s = f / g;
+			r = sqrt((s * s) + 1.0);
+			e[i + 1] = g * r;
+			s *= (c = 1.0 / r);
+		    }
+		    g = d[i + 1] - p;
+		    r = (d[i] - g) * s + 2.0 * c * b;
+		    p = s * r;
+		    d[i + 1] = g + p;
+		    g = c * r - b;
+		    /* Next loop can be omitted if eigenvectors not wanted */
+		    for (k = 0; k < n; k++) {
+			f = z[k][i + 1];
+			z[k][i + 1] = s * z[k][i] + c * f;
+			z[k][i] = c * z[k][i] - s * f;
+		    }
+		}
+		d[l] = d[l] - p;
+		e[l] = g;
+		e[m] = 0.0;
+	    }
+	} while (m != l);
+    }
+    return 1;
+}
+
+
+
+
+void eigen_of_double_matrix(double **M, double **Vectors, double *lambda,
+			    int n)
+
+     /* 
+        Computes eigenvalues (and eigen vectors if desired) for 
+        symmetric matrix M of dimension n x n.
+        Vectors and lambda contain the (obvious) output.
+      */
+{
+    int i, j;
+    double **a, *e;
+
+    a = (double **)G_calloc(n, sizeof(double *));
+    for (i = 0; i < n; i++)
+	a[i] = (double *)G_calloc(n, sizeof(double));
+
+    e = (double *)G_calloc(n, sizeof(double));
+
+    for (i = 0; i < n; i++)
+	for (j = 0; j < n; j++)
+	    a[i][j] = M[i][j];
+
+    tred2(a, n, lambda, e);
+    tqli(lambda, e, n, a);
+
+    /* Returns eigenvectors       */
+    for (i = 0; i < n; i++)
+	for (j = 0; j < n; j++)
+	    Vectors[i][j] = a[i][j];
+
+    for (i = 0; i < n; i++)
+	G_free(a[i]);
+    G_free(a);
+    G_free(e);
+}
+
+void eigsrt(double *d, double **z, int bands)
+
+     /* sort eigenvalues d in descending order, and rearranges 
+        eigenvectors z correspondingly. bands contains the number 
+        of eigenvalues = rows of z = cols of z
+      */
+{
+    double p;
+    long i, j, k;
+
+    for (i = 0; i < (bands - 1); i++) {
+	p = d[k = i];
+	for (j = i + 1; j < bands; j++)
+	    if (d[j] >= p)
+		p = d[k = j];
+	/*
+	   interchange eigen values i and k and corresponding eigen vectors
+	 */
+	if (k != i) {
+	    d[k] = d[i];
+	    d[i] = p;
+	    for (j = 0; j < bands; j++) {
+		p = z[j][i];
+		z[j][i] = z[j][k];
+		z[j][k] = p;
+	    }
+	}
+    }
+}
+
+
+#undef MAX_ITERS

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/entropy.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/entropy.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/entropy.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,66 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   Entropy management
+ */
+
+#include "func.h"
+#include <math.h>
+
+double Entropy(double *data, int n, double zero)
+{
+    int i;
+    double sum;
+    double e;
+
+    sum = 0.0;
+    for (i = 0; i < n; i++)
+	sum += data[i];
+    if (fabs(sum - 1.0) > zero)
+	return (-1.0);
+    else {
+	e = 0.0;
+	for (i = 0; i < n; i++)
+	    e += Clog(data[i], zero);
+    }
+    return (e);
+}
+
+double Clog(double x, double zero)
+{
+    if (x - zero > 0.0)
+	return (-1.0 * x * log(x) / log(2));
+    else
+	return (0.0);
+}
+
+void histo(double *data, int n, double *h, int nbin)
+{
+    int i;
+
+    for (i = 0; i < nbin; i++)
+	h[i] = 0.0;
+    for (i = 0; i < n; i++)
+	h[(int)floor(data[i] * nbin)] += 1.0;
+
+    for (i = 0; i < nbin; i++)
+	h[i] /= (double)n;
+}
+
+
+void histo1(double *data, int n, int *h, int nbin)
+{
+    int j;
+
+    for (j = 0; j < nbin; j++)
+	h[j] = 0.0;
+    for (j = 0; j < n; j++) {
+	if (data[j] == 1.0) {
+	    h[nbin - 1] += 1.0;
+	}
+	else
+	    h[(int)floor(data[j] * nbin)] += 1.0;
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/features.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/features.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/features.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,1200 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   structure Feature management
+ */
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+
+void compute_features(Features * features)
+
+     /*
+        given a training structure in input, fill the features structure
+        according to specification lyke
+        features->f_mean
+        features->f_variance
+        features->f_pca
+        features->f_texture (not yet implemented)
+      */
+{
+    int i, j, k, l;
+    char *mapset;
+    int fp;
+    struct Cell_head cellhd;
+    DCELL *rowbuf;
+    DCELL *tf;
+    DCELL **matrix;
+    int r, c;
+    char tempbuf[500];
+    int *compute_features;
+    int dim;
+    DCELL *mean = NULL, *sd = NULL;
+    int corrent_feature;
+    int *space_for_each_layer;
+    DCELL *projected = NULL;
+    DCELL **pca_matrix = NULL;
+    int ndata_for_pca = 0;
+    int index;
+    int thisclassok;
+    int addclass;
+
+    compute_features =
+	(int *)G_calloc(features->training.nlayers, sizeof(int));
+
+    features->nexamples = features->training.nexamples;
+    dim = features->training.rows * features->training.cols;
+
+    /*compute space */
+    space_for_each_layer = (int *)G_calloc(features->training.nlayers,
+					   sizeof(int));
+
+    features->examples_dim = 0;
+    for (j = 0; j < features->training.nlayers; j++) {
+	if (features->f_mean[0]) {
+	    for (i = 2; i < 2 + features->f_mean[1]; i++) {
+		if (features->f_mean[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features->f_variance[0]) {
+	    for (i = 2; i < 2 + features->f_variance[1]; i++) {
+		if (features->f_variance[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features->f_pca[0]) {
+	    for (i = 2; i < 2 + features->f_pca[1]; i++) {
+		if (features->f_pca[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += dim;
+		}
+	    }
+	}
+	if (space_for_each_layer[j] == 0) {
+	    space_for_each_layer[j] = dim;
+	}
+	features->examples_dim += space_for_each_layer[j];
+    }
+
+    /*alloc memory */
+    features->value =
+	(double **)G_calloc(features->nexamples, sizeof(double *));
+    for (i = 0; i < features->nexamples; i++) {
+	features->value[i] =
+	    (double *)G_calloc(features->examples_dim, sizeof(double));
+    }
+    features->class = (int *)G_calloc(features->nexamples, sizeof(int));
+
+    matrix = (double **)G_calloc(features->nexamples, sizeof(double *));
+    for (i = 0; i < features->nexamples; i++) {
+	matrix[i] = (double *)G_calloc(dim, sizeof(double));
+    }
+
+    mean = (double *)G_calloc(features->nexamples, sizeof(double));
+    sd = (double *)G_calloc(features->nexamples, sizeof(double));
+
+    /*copy classes */
+    for (i = 0; i < features->nexamples; i++) {
+	features->class[i] = features->training.class[i];
+    }
+
+    /*compute p_classes */
+    features->p_classes = (int *)G_calloc(1, sizeof(int));
+    features->nclasses = 1;
+    features->p_classes[0] = features->class[0];
+    for (i = 1; i < features->nexamples; i++) {
+	addclass = TRUE;
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		addclass = FALSE;
+	    }
+	}
+	if (addclass) {
+	    features->nclasses += 1;
+	    features->p_classes = (int *)G_realloc(features->p_classes,
+						   features->nclasses *
+						   sizeof(int));
+	    features->p_classes[features->nclasses - 1] = features->class[i];
+	}
+    }
+
+    /*space for pca */
+    if (features->f_pca[0]) {
+	features->pca =
+	    (Pca *) G_calloc(features->training.nlayers, sizeof(Pca));
+	for (i = 0; i < features->training.nlayers; i++) {
+	    inizialize_pca(&(features->pca[i]), dim);
+	}
+	projected = (double *)G_calloc(dim, sizeof(double));
+
+	if (features->pca_class[0] == 0) {
+	    fprintf(stderr,
+		    "principal components computed on all training data\n");
+	}
+	else {
+	    fprintf(stderr,
+		    "principal components computed on data of classes");
+	    for (l = 1; l <= features->pca_class[1]; l++) {
+		thisclassok = FALSE;
+		for (k = 0; k < features->nclasses; k++) {
+		    if (features->pca_class[1 + l] == features->p_classes[k]) {
+			thisclassok = TRUE;
+			fprintf(stderr, " %d", features->p_classes[k]);
+			break;
+		    }
+		}
+		if (!thisclassok) {
+		    sprintf(tempbuf,
+			    "compute_features-> Class %d for pc not recognized",
+			    features->pca_class[1 + l]);
+		    G_fatal_error(tempbuf);
+		}
+	    }
+	    fprintf(stderr, "\n");
+	}
+
+	ndata_for_pca = 0;
+	for (l = 0; l < features->nexamples; l++) {
+	    for (r = 2; r < (2 + features->pca_class[1]); r++) {
+		if (features->class[l] == features->pca_class[r]) {
+		    ndata_for_pca += 1;
+		}
+	    }
+	}
+	pca_matrix = (double **)G_calloc(ndata_for_pca, sizeof(double *));
+	for (l = 0; l < ndata_for_pca; l++) {
+	    pca_matrix[l] = (double *)G_calloc(dim, sizeof(double));
+	}
+    }
+
+
+    corrent_feature = 0;
+    for (j = 0; j < features->training.nlayers; j++) {
+	for (i = 0; i < features->nexamples; i++) {
+	    switch (features->training.data_type) {
+	    case GRASS_data:
+		fprintf(stdout, "%s\n", features->training.mapnames[i][j]);
+		if ((mapset =
+		     G_find_cell(features->training.mapnames[i][j],
+				 "")) == NULL) {
+		    sprintf(tempbuf,
+			    "compute_features-> Can't find raster map <%s>",
+			    features->training.mapnames[i][j]);
+		    G_fatal_error(tempbuf);
+		}
+		if ((fp =
+		     G_open_cell_old(features->training.mapnames[i][j],
+				     mapset)) < 0) {
+		    sprintf(tempbuf,
+			    "compute_features-> Can't open raster map <%s> for reading",
+			    features->training.mapnames[i][j]);
+		    G_fatal_error(tempbuf);
+		}
+
+		G_get_cellhd(features->training.mapnames[i][j], mapset,
+			     &cellhd);
+		G_set_window(&cellhd);
+		if ((cellhd.rows != features->training.rows) ||
+		    (cellhd.cols != features->training.cols)) {
+		    sprintf(tempbuf, "compute_features-> Dimension Error");
+		    G_fatal_error(tempbuf);
+		}
+		rowbuf = (DCELL *) G_calloc(dim, sizeof(DCELL));
+		tf = rowbuf;
+
+
+		for (r = 0; r < features->training.rows; r++) {
+		    G_get_d_raster_row(fp, tf, r);
+		    for (c = 0; c < features->training.cols; c++) {
+			if (G_is_d_null_value(tf))
+			    *tf = 0.0;
+			matrix[i][c + (r * features->training.cols)] = *tf;
+			tf++;
+		    }
+		}
+		G_free(rowbuf);
+
+		G_close_cell(fp);
+
+		break;
+	    case TABLE_data:
+		matrix[i] = features->training.data[i];
+		break;
+	    default:
+		sprintf(tempbuf, "compute_features-> Format not recognized");
+		G_fatal_error(tempbuf);
+		break;
+	    }
+	}
+
+	for (k = 0; k < features->nexamples; k++) {
+	    mean[k] = sd[k] = 0.0;
+	}
+	mean_and_sd_of_double_matrix_by_row(matrix, features->nexamples,
+					    dim, mean, sd);
+
+	if (features->f_normalize[0]) {
+	    for (i = 2; i < 2 + features->f_normalize[1]; i++) {
+		if (features->f_normalize[i] == j) {
+		    for (k = 0; k < features->nexamples; k++) {
+			for (r = 0; r < dim; r++) {
+			    matrix[k][r] = (matrix[k][r] - mean[k]) / sd[k];
+			}
+		    }
+		}
+	    }
+	}
+
+
+	if (!compute_features[j]) {
+	    for (i = 0; i < features->nexamples; i++) {
+		for (r = 0; r < dim; r++) {
+		    features->value[i][corrent_feature + r] = matrix[i][r];
+		}
+	    }
+	    corrent_feature += dim;
+	}
+	else {
+	    if (features->f_mean[0]) {
+		for (i = 2; i < 2 + features->f_mean[1]; i++) {
+		    if (features->f_mean[i] == j) {
+			for (k = 0; k < features->nexamples; k++) {
+			    features->value[k][corrent_feature] = mean[k];
+			}
+			corrent_feature += 1;
+		    }
+		}
+	    }
+
+	    if (features->f_variance[0]) {
+		for (i = 2; i < 2 + features->f_variance[1]; i++) {
+		    if (features->f_variance[i] == j) {
+			for (k = 0; k < features->nexamples; k++) {
+			    features->value[k][corrent_feature] =
+				sd[k] * sd[k];
+			}
+			corrent_feature += 1;
+		    }
+		}
+	    }
+
+	    if (features->f_pca[0]) {
+		for (i = 2; i < 2 + features->f_pca[1]; i++) {
+		    if (features->f_pca[i] == j) {
+			if (features->pca_class[0] == 0) {
+			    covariance_of_double_matrix(matrix,
+							features->nexamples,
+							dim,
+							features->pca[j].
+							covar);
+			}
+			else {
+			    index = 0;
+			    for (l = 0; l < features->nexamples; l++) {
+				for (r = 2; r < (2 + features->pca_class[1]);
+				     r++) {
+				    if (features->training.class[l] ==
+					features->pca_class[r]) {
+					pca_matrix[index++] = matrix[l];
+				    }
+				}
+			    }
+			    covariance_of_double_matrix(pca_matrix,
+							ndata_for_pca, dim,
+							features->pca[j].
+							covar);
+			}
+			eigen_of_double_matrix(features->pca[j].covar,
+					       features->pca[j].eigmat,
+					       features->pca[j].eigval, dim);
+			eigsrt(features->pca[j].eigval,
+			       features->pca[j].eigmat, dim);
+			for (l = 0; l < features->nexamples; l++) {
+			    product_double_vector_double_matrix(features->pca
+								[j].eigmat,
+								matrix[l],
+								dim, dim,
+								projected);
+			    for (r = 0; r < dim; r++) {
+				features->value[l][corrent_feature + r] =
+				    projected[r];
+			    }
+			}
+			corrent_feature += dim;
+		    }
+		}
+	    }
+	}
+    }
+
+
+    G_free(mean);
+    G_free(sd);
+    G_free(compute_features);
+}
+
+void write_features(char *file, Features * features)
+
+     /*
+        write the features into a file
+      */
+{
+    FILE *fp;
+    int i, j, l, k;
+    int dim;
+    char tempbuf[500];
+    int write_x;
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "write_features-> Can't open file %s for writing",
+		file);
+	G_fatal_error(tempbuf);
+    }
+
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "TRAINING: (%s)\n", features->training.file);
+    fprintf(fp, "#####################\n");
+
+    fprintf(fp, "Type of data:\n");
+    fprintf(fp, "%d\n", features->training.data_type);
+
+    fprintf(fp, "Number of layers:\n");
+    fprintf(fp, "%d\n", features->training.nlayers);
+
+    fprintf(fp, "Training dimensions:\n");
+    fprintf(fp, "%d\t%d\n", features->training.rows, features->training.cols);
+
+    dim = features->training.rows * features->training.cols;
+
+    fprintf(fp, "EW-res\tNS-res\n");
+    fprintf(fp, "%f\t%f\n", features->training.ew_res,
+	    features->training.ns_res);
+
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "FEATURES:\n");
+    fprintf(fp, "#####################\n");
+
+    fprintf(fp, "normalize:\n");
+    if (features->f_normalize[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_normalize[0],
+		features->f_normalize[1], features->f_normalize[2] + 1);
+	for (i = 3; i < 2 + features->f_normalize[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_normalize[2] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "standardize:\n");
+    if (features->f_standardize[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_standardize[0],
+		features->f_standardize[1], features->f_standardize[2] + 1);
+	for (i = 3; i < 2 + features->f_standardize[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_standardize[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "mean:\n");
+    if (features->f_mean[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_mean[0], features->f_mean[1],
+		features->f_mean[2] + 1);
+	for (i = 3; i < 2 + features->f_mean[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_mean[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "variance:\n");
+    if (features->f_variance[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_variance[0],
+		features->f_variance[1], features->f_variance[2] + 1);
+	for (i = 3; i < 2 + features->f_variance[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_variance[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "pca:\n");
+    if (features->f_pca[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_pca[0], features->f_pca[1],
+		features->f_pca[2] + 1);
+	for (i = 3; i < 2 + features->f_pca[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_pca[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "Number of classes:\n");
+    fprintf(fp, "%d\n", features->nclasses);
+
+    fprintf(fp, "Classes:\n");
+    fprintf(fp, "%d", features->p_classes[0]);
+    for (i = 1; i < features->nclasses; i++) {
+	fprintf(fp, "\t%d", features->p_classes[i]);
+    }
+    fprintf(fp, "\n");
+
+    fprintf(fp, "Standardization values:\n");
+    if (features->f_standardize[0]) {
+	fprintf(fp, "%f", features->mean[0]);
+	for (i = 1; i < features->f_standardize[1]; i++) {
+	    fprintf(fp, "\t%f", features->mean[i]);
+	}
+	fprintf(fp, "\n");
+	fprintf(fp, "%f", features->sd[0]);
+	for (i = 1; i < features->f_standardize[1]; i++) {
+	    fprintf(fp, "\t%f", features->sd[i]);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "NULL\n");
+	fprintf(fp, "NULL\n");
+    }
+
+    fprintf(fp, "Features dimensions:\n");
+    fprintf(fp, "%d\t%d\n", features->nexamples, features->examples_dim);
+
+    fprintf(fp, "Features:\n");
+
+    for (i = 0; i < features->training.nlayers; i++) {
+	write_x = TRUE;
+	if (features->f_mean[0]) {
+	    for (j = 2; j < 2 + features->f_mean[1]; j++) {
+		if (features->f_mean[j] == i) {
+		    write_x = FALSE;
+		    fprintf(fp, "l%d_mean\t", i + 1);
+		}
+	    }
+	}
+	if (features->f_variance[0]) {
+	    for (j = 2; j < 2 + features->f_variance[1]; j++) {
+		if (features->f_variance[j] == i) {
+		    write_x = FALSE;
+		    fprintf(fp, "l%d_variance\t", i + 1);
+		}
+	    }
+	}
+	if (features->f_pca[0]) {
+	    for (j = 2; j < 2 + features->f_pca[1]; j++) {
+		if (features->f_pca[j] == i) {
+		    write_x = FALSE;
+		    for (k = 0; k < dim; k++) {
+			fprintf(fp, "l%d_pc%d\t", i + 1, k + 1);
+		    }
+		}
+	    }
+	}
+
+	if (write_x) {
+	    for (j = 0; j < dim; j++) {
+		fprintf(fp, "l%d_x%d\t", i + 1, j + 1);
+	    }
+	}
+	if ((i + 1) == features->training.nlayers) {
+	    fprintf(fp, "class\n");
+	}
+    }
+
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->examples_dim; j++) {
+	    fprintf(fp, "%f\t", features->value[i][j]);
+	}
+	fprintf(fp, "%d\n", features->class[i]);
+    }
+
+    if (features->f_pca[0]) {
+
+	fprintf(fp, "#####################\n");
+	fprintf(fp, "PRINC. COMP.:");
+	if (features->pca_class[0] == 0) {
+	    fprintf(fp, " all classes used\n");
+	}
+	else {
+	    for (i = 0; i < features->pca_class[1]; i++) {
+		fprintf(fp, " %d", features->pca_class[2 + i]);
+	    }
+	    fprintf(fp, " classes used\n");
+	}
+	fprintf(fp, "#####################\n");
+	for (l = 2; l < 2 + features->f_pca[1]; l++) {
+	    fprintf(fp, "PCA: Layer %d\n", features->f_pca[l] + 1);
+	    write_pca(fp, &(features->pca[l - 2]));
+	}
+    }
+    fclose(fp);
+}
+
+void standardize_features(Features * features)
+
+     /*
+        standardize fetures accordining to the fetures.f_standardize array
+      */
+{
+    int j, k;
+
+    double *tmparray;
+    char tempbuf[500];
+
+    for (j = 2; j < 2 + features->f_standardize[1]; j++) {
+	if ((features->f_standardize[j] < 0) ||
+	    (features->f_standardize[j] >= features->examples_dim)) {
+	    sprintf(tempbuf,
+		    "standardize_features-> Can't standardize var number %d: no such variable",
+		    features->f_standardize[j] + 1);
+	    G_fatal_error(tempbuf);
+	}
+    }
+
+    tmparray = (double *)G_calloc(features->nexamples, sizeof(double));
+
+    features->mean =
+	(double *)G_calloc(features->f_standardize[1], sizeof(double));
+    features->sd =
+	(double *)G_calloc(features->f_standardize[1], sizeof(double));
+
+    for (j = 2; j < 2 + features->f_standardize[1]; j++) {
+	for (k = 0; k < features->nexamples; k++) {
+	    tmparray[k] = features->value[k][features->f_standardize[j]];
+	}
+	features->mean[j - 2] =
+	    mean_of_double_array(tmparray, features->nexamples);
+	features->sd[j - 2] =
+	    sd_of_double_array_given_mean(tmparray, features->nexamples,
+					  features->mean[j - 2]);
+	if (features->sd[j - 2] == 0) {
+	    sprintf(tempbuf,
+		    "standardize_features-> Can't standardize var number %d: sd=0",
+		    features->f_standardize[j] + 1);
+	    G_fatal_error(tempbuf);
+	}
+	for (k = 0; k < features->nexamples; k++) {
+	    features->value[k][features->f_standardize[j]] =
+		(tmparray[k] - features->mean[j - 2]) / features->sd[j - 2];
+	}
+    }
+
+    G_free(tmparray);
+}
+
+
+void write_header_features(FILE * fp, Features * features)
+
+     /*
+        write the header features into the pointed  file
+      */
+{
+    int i;
+    int dim;
+
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "TRAINING:\n");
+    fprintf(fp, "#####################\n");
+
+    fprintf(fp, "Type of data:\n");
+    fprintf(fp, "%d\n", features->training.data_type);
+
+    fprintf(fp, "Number of layers:\n");
+    fprintf(fp, "%d\n", features->training.nlayers);
+
+    fprintf(fp, "Training dimensions:\n");
+    fprintf(fp, "%d\t%d\n", features->training.rows, features->training.cols);
+
+    dim = features->training.rows * features->training.cols;
+
+    fprintf(fp, "EW-res\tNS-res\n");
+    fprintf(fp, "%f\t%f\n", features->training.ew_res,
+	    features->training.ns_res);
+
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "FEATURES: (%s)\n", features->file);
+    fprintf(fp, "#####################\n");
+
+    fprintf(fp, "normalize:\n");
+    if (features->f_normalize[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_normalize[0],
+		features->f_normalize[1], features->f_normalize[2] + 1);
+	for (i = 3; i < 2 + features->f_normalize[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_normalize[2] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "standardize:\n");
+    if (features->f_standardize[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_standardize[0],
+		features->f_standardize[1], features->f_standardize[2] + 1);
+	for (i = 3; i < 2 + features->f_standardize[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_standardize[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "mean:\n");
+    if (features->f_mean[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_mean[0], features->f_mean[1],
+		features->f_mean[2] + 1);
+	for (i = 3; i < 2 + features->f_mean[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_mean[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "variance:\n");
+    if (features->f_variance[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_variance[0],
+		features->f_variance[1], features->f_variance[2] + 1);
+	for (i = 3; i < 2 + features->f_variance[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_variance[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "pca:\n");
+    if (features->f_pca[0]) {
+	fprintf(fp, "%d\t%d\t%d", features->f_pca[0], features->f_pca[1],
+		features->f_pca[2] + 1);
+	for (i = 3; i < 2 + features->f_pca[1]; i++) {
+	    fprintf(fp, "\t%d", features->f_pca[i] + 1);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "0\n");
+    }
+
+    fprintf(fp, "Number of classes:\n");
+    fprintf(fp, "%d\n", features->nclasses);
+
+    fprintf(fp, "Classes:\n");
+    fprintf(fp, "%d", features->p_classes[0]);
+    for (i = 1; i < features->nclasses; i++) {
+	fprintf(fp, "\t%d", features->p_classes[i]);
+    }
+    fprintf(fp, "\n");
+
+    fprintf(fp, "Standardization values:\n");
+    if (features->f_standardize[0]) {
+	fprintf(fp, "%f", features->mean[0]);
+	for (i = 1; i < features->f_standardize[1]; i++) {
+	    fprintf(fp, "\t%f", features->mean[i]);
+	}
+	fprintf(fp, "\n");
+	fprintf(fp, "%f", features->sd[0]);
+	for (i = 1; i < features->f_standardize[1]; i++) {
+	    fprintf(fp, "\t%f", features->sd[i]);
+	}
+	fprintf(fp, "\n");
+    }
+    else {
+	fprintf(fp, "NULL\n");
+	fprintf(fp, "NULL\n");
+    }
+
+
+    fprintf(fp, "Features dimensions:\n");
+    fprintf(fp, "%d\t%d\n", features->nexamples, features->examples_dim);
+
+
+}
+
+
+
+void read_features(char *file, Features * features, int npc)
+
+     /*
+        read the features from a file. If pc structure is contained
+        within features, only load the first npc component. If npc < 0
+        all the pc will be loaded.
+      */
+{
+    FILE *fp;
+    char tempbuf[500];
+    char *line = NULL;
+    int i, j, l, r;
+    int dim;
+    int *features_to_be_loaded;
+    int orig_dim;
+    int index;
+    int corrent_feature;
+
+    fp = fopen(file, "r");
+    if (fp == NULL) {
+	sprintf(tempbuf, "read_features-> Can't open file %s for reading",
+		file);
+	G_fatal_error(tempbuf);
+    }
+
+    features->file = file;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &(features->training.data_type));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &(features->training.nlayers));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d%d", &(features->training.rows),
+	   &(features->training.cols));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf%lf", &(features->training.ew_res),
+	   &(features->training.ns_res));
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_normalize = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_normalize[0]));
+    if (features->f_normalize[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_normalize[1]));
+	features->f_normalize = (int *)G_realloc(features->f_normalize,
+						 (2 +
+						  features->f_normalize[1]) *
+						 sizeof(int));
+	for (i = 0; i < features->f_normalize[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_normalize[i + 2]));
+	    features->f_normalize[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_standardize = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_standardize[0]));
+    if (features->f_standardize[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_standardize[1]));
+	features->f_standardize = (int *)G_realloc(features->f_standardize,
+						   (2 +
+						    features->f_standardize
+						    [1]) * sizeof(int));
+	for (i = 0; i < features->f_standardize[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_standardize[i + 2]));
+	    features->f_standardize[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_mean = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_mean[0]));
+    if (features->f_mean[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_mean[1]));
+	features->f_mean = (int *)G_realloc(features->f_mean,
+					    (2 +
+					     features->f_mean[1]) *
+					    sizeof(int));
+	for (i = 0; i < features->f_mean[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_mean[i + 2]));
+	    features->f_mean[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_variance = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_variance[0]));
+    if (features->f_variance[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_variance[1]));
+	features->f_variance = (int *)G_realloc(features->f_variance,
+						(2 +
+						 features->f_variance[1]) *
+						sizeof(int));
+	for (i = 0; i < features->f_variance[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_variance[i + 2]));
+	    features->f_variance[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_pca = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_pca[0]));
+    if (features->f_pca[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_pca[1]));
+	features->f_pca = (int *)G_realloc(features->f_pca,
+					   (2 +
+					    features->f_pca[1]) *
+					   sizeof(int));
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_pca[i + 2]));
+	    features->f_pca[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &(features->nclasses));
+    features->p_classes = (int *)G_calloc(features->nclasses, sizeof(int));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < features->nclasses; i++) {
+	sscanf(line, "%d", &(features->p_classes[i]));
+	line = (char *)strchr(line, '\t');
+	*line++;
+    }
+
+    if (!features->f_standardize[0]) {
+	line = GetLine(fp);
+	line = GetLine(fp);
+	line = GetLine(fp);
+    }
+    else {
+	features->mean =
+	    (double *)G_calloc(features->f_standardize[1], sizeof(double));
+	features->sd =
+	    (double *)G_calloc(features->f_standardize[1], sizeof(double));
+	line = GetLine(fp);
+	line = GetLine(fp);
+	for (i = 0; i < features->f_standardize[1]; i++) {
+	    sscanf(line, "%lf", &(features->mean[i]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+	line = GetLine(fp);
+	for (i = 0; i < features->f_standardize[1]; i++) {
+	    sscanf(line, "%lf", &(features->sd[i]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+    }
+
+
+    if (features->f_pca[0]) {
+	features->pca = (Pca *) G_calloc(features->f_pca[1], sizeof(Pca));
+    }
+
+
+    if (npc > (features->training.rows * features->training.cols)) {
+	npc = features->training.rows * features->training.cols;
+    }
+
+    dim = features->training.rows * features->training.cols;
+    line = GetLine(fp);
+    line = GetLine(fp);
+    if ((!features->f_pca[0]) || (features->f_pca[0] && npc < 0)) {
+	sscanf(line, "%d%d", &(features->nexamples),
+	       &(features->examples_dim));
+
+	features->value =
+	    (double **)G_calloc(features->nexamples, sizeof(double *));
+	for (i = 0; i < features->nexamples; i++) {
+	    features->value[i] =
+		(double *)G_calloc(features->examples_dim, sizeof(double));
+	}
+	features->class = (int *)G_calloc(features->nexamples, sizeof(int));
+
+	line = GetLine(fp);
+	line = GetLine(fp);
+	for (i = 0; i < features->nexamples; i++) {
+	    line = GetLine(fp);
+	    for (j = 0; j < features->examples_dim; j++) {
+		sscanf(line, "%lf", &(features->value[i][j]));
+		line = (char *)strchr(line, '\t');
+		*line++;
+	    }
+	    sscanf(line, "%d", &(features->class[i]));
+	}
+    }
+    else {
+	sscanf(line, "%d%d", &(features->nexamples), &orig_dim);
+
+	features_to_be_loaded = (int *)G_calloc(orig_dim, sizeof(int));
+
+	corrent_feature = 0;
+	features->examples_dim = 0;
+	for (i = 0; i < features->training.nlayers; i++) {
+	    if (features->f_mean[0]) {
+		for (j = 2; j < 2 + features->f_mean[1]; j++) {
+		    if (features->f_mean[j] == i) {
+			features_to_be_loaded[corrent_feature] = 1;
+			corrent_feature += 1;
+			features->examples_dim += 1;
+		    }
+		}
+	    }
+	    if (features->f_variance[0]) {
+		for (j = 2; j < 2 + features->f_variance[1]; j++) {
+		    if (features->f_variance[j] == i) {
+			features_to_be_loaded[corrent_feature] = 1;
+			corrent_feature += 1;
+			features->examples_dim += 1;
+		    }
+		}
+	    }
+	    if (features->f_pca[0]) {
+		for (j = 2; j < 2 + features->f_pca[1]; j++) {
+		    if (features->f_pca[j] == i) {
+			for (r = 0; r < npc; r++) {
+			    features_to_be_loaded[corrent_feature + r] = 1;
+			}
+			corrent_feature += dim;
+			features->examples_dim += npc;
+		    }
+		}
+	    }
+
+	}
+
+	features->value =
+	    (double **)G_calloc(features->nexamples, sizeof(double *));
+	for (i = 0; i < features->nexamples; i++) {
+	    features->value[i] = (double *)G_calloc(features->examples_dim,
+						    sizeof(double));
+	}
+	features->class = (int *)G_calloc(features->nexamples, sizeof(int));
+
+	line = GetLine(fp);
+	line = GetLine(fp);
+	for (i = 0; i < features->nexamples; i++) {
+	    line = GetLine(fp);
+	    index = 0;
+	    for (j = 0; j < orig_dim; j++) {
+		if (features_to_be_loaded[j] == 1) {
+		    sscanf(line, "%lf", &(features->value[i][index++]));
+		}
+		line = (char *)strchr(line, '\t');
+		*line++;
+	    }
+	    sscanf(line, "%d", &(features->class[i]));
+	}
+
+    }
+    if (features->f_pca[0]) {
+	line = GetLine(fp);
+
+	for (l = 0; l < features->f_pca[1]; l++) {
+	    features->pca[l].n =
+		features->training.rows * features->training.cols;
+	    read_pca(fp, &(features->pca[l]));
+	}
+    }
+    fclose(fp);
+}
+
+
+void read_header_features(FILE * fp, Features * features)
+
+     /*
+        read the hearder features from the file pointed
+      */
+{
+    char *line = NULL;
+    int i;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &(features->training.data_type));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &(features->training.nlayers));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d%d", &(features->training.rows),
+	   &(features->training.cols));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf%lf", &(features->training.ew_res),
+	   &(features->training.ns_res));
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_normalize = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_normalize[0]));
+    if (features->f_normalize[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_normalize[1]));
+	features->f_normalize = (int *)G_realloc(features->f_normalize,
+						 (2 +
+						  features->f_normalize[1]) *
+						 sizeof(int));
+	for (i = 0; i < features->f_normalize[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_normalize[i + 2]));
+	    features->f_normalize[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_standardize = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_standardize[0]));
+    if (features->f_standardize[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_standardize[1]));
+	features->f_standardize = (int *)G_realloc(features->f_standardize,
+						   (2 +
+						    features->f_standardize
+						    [1]) * sizeof(int));
+	for (i = 0; i < features->f_standardize[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_standardize[i + 2]));
+	    features->f_standardize[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_mean = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_mean[0]));
+    if (features->f_mean[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_mean[1]));
+	features->f_mean = (int *)G_realloc(features->f_mean,
+					    (2 +
+					     features->f_mean[1]) *
+					    sizeof(int));
+	for (i = 0; i < features->f_mean[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_mean[i + 2]));
+	    features->f_mean[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_variance = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_variance[0]));
+    if (features->f_variance[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_variance[1]));
+	features->f_variance = (int *)G_realloc(features->f_variance,
+						(2 +
+						 features->f_variance[1]) *
+						sizeof(int));
+	for (i = 0; i < features->f_variance[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_variance[i + 2]));
+	    features->f_variance[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    features->f_pca = (int *)G_calloc(2, sizeof(int));
+    sscanf(line, "%d", &(features->f_pca[0]));
+    if (features->f_pca[0]) {
+	line = (char *)strchr(line, '\t');
+	line++;
+	sscanf(line, "%d", &(features->f_pca[1]));
+	features->f_pca = (int *)G_realloc(features->f_pca,
+					   (2 +
+					    features->f_pca[1]) *
+					   sizeof(int));
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    line = (char *)strchr(line, '\t');
+	    line++;
+	    sscanf(line, "%d", &(features->f_pca[i + 2]));
+	    features->f_pca[i + 2] -= 1;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &(features->nclasses));
+    features->p_classes = (int *)G_calloc(features->nclasses, sizeof(int));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < features->nclasses; i++) {
+	sscanf(line, "%d", &(features->p_classes[i]));
+	line = (char *)strchr(line, '\t');
+	*line++;
+    }
+
+    if (!features->f_standardize[0]) {
+	line = GetLine(fp);
+	line = GetLine(fp);
+	line = GetLine(fp);
+    }
+    else {
+	features->mean =
+	    (double *)G_calloc(features->f_standardize[1], sizeof(double));
+	features->sd =
+	    (double *)G_calloc(features->f_standardize[1], sizeof(double));
+	line = GetLine(fp);
+	line = GetLine(fp);
+	for (i = 0; i < features->f_standardize[1]; i++) {
+	    sscanf(line, "%lf", &(features->mean[i]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+	line = GetLine(fp);
+	for (i = 0; i < features->f_standardize[1]; i++) {
+	    sscanf(line, "%lf", &(features->sd[i]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d%d", &(features->nexamples), &(features->examples_dim));
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/features_selection.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/features_selection.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/features_selection.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,713 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   and Maria Serafini
+
+   for
+
+   Features Selction with SVM
+ */
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+
+
+void compute_valoriDJ(svm, features, H_tot, H_tmp, valoriDJ)
+     SupportVectorMachine *svm;
+     Features *features;
+     double **H_tot, **H_tmp, **valoriDJ;
+{
+    double diag, resto;
+    int i, j, t;
+    double DJ;
+
+    (*valoriDJ) = (double *)G_calloc(svm->d, sizeof(double));
+
+
+    diag = 0;
+    resto = 0;
+
+    for (i = 0; i < features->nexamples; i++) {
+	if (svm->alph[i] != 0) {
+	    diag = diag + svm->alph[i] * svm->alph[i] * H_tot[i][i];
+	    for (j = i + 1; j < features->nexamples; j++) {
+		if (svm->alph[j] != 0) {
+		    resto = resto + svm->alph[i] * svm->alph[j] * H_tot[i][j];
+		}
+	    }
+	}
+    }
+
+    DJ = 0.5 * diag + resto;
+
+    for (i = 0; i < svm->d; i++) {
+	compute_H_perdiff(H_tot, H_tmp, features->value,
+			  features->nexamples, svm->two_sigma_squared, i);
+
+	(*valoriDJ)[i] = 0;
+
+	diag = 0;
+	resto = 0;
+
+	for (t = 0; t < features->nexamples; t++) {
+	    if (svm->alph[t] != 0) {
+		diag = diag + svm->alph[t] * svm->alph[t] * H_tmp[t][t];
+		for (j = t + 1; j < features->nexamples; j++) {
+		    if (svm->alph[j] != 0) {
+			resto =
+			    resto + svm->alph[t] * svm->alph[j] * H_tmp[t][j];
+		    }
+		}
+	    }
+	}
+
+	(*valoriDJ)[i] = 0.5 * diag + resto;
+
+	(*valoriDJ)[i] = DJ - (*valoriDJ)[i];
+    }
+
+}
+
+void free_svm(svm)
+     SupportVectorMachine *svm;
+{
+    int j;
+
+    for (j = 0; j < svm->N; j++) {
+	G_free(svm->dense_points[j]);
+    }
+    G_free(svm->target);
+    G_free(svm->Cw);
+    G_free(svm->alph);
+    G_free(svm->w);
+    G_free(svm->error_cache);
+    G_free(svm->precomputed_self_dot_product);
+}
+
+
+void e_rfe_lin(svm, features, names, selected, i, rimanenti, fp_fs_w,
+	       fp_fs_stats)
+     SupportVectorMachine *svm;
+     Features *features;
+     int *names, *selected;
+     int i;
+     int *rimanenti;
+     FILE *fp_fs_w, *fp_fs_stats;
+{
+    double *wsquare;
+    int j;
+    int *h;
+    int nbin;
+    double *pi;
+    double entro;
+    double maxentro, media;
+    int nel;
+    int *eliminati;
+    double *valoriDJ;
+    int index;
+    double lim;
+    double *wlog;
+    int minorimedia;
+    double mediamod;
+    int conv;
+    int *sortindex, *indexordinati;
+    int k, t;
+
+    wsquare = (double *)G_calloc(*rimanenti, sizeof(double));
+
+    for (j = 0; j < *rimanenti; j++) {
+	wsquare[j] = svm->w[j] * svm->w[j];
+    }
+
+
+    if (fp_fs_w != NULL) {
+	fprintf(fp_fs_w, "%6.10f", wsquare[0]);
+	for (j = 1; j < *rimanenti; j++) {
+	    fprintf(fp_fs_w, "\t%f", wsquare[j]);
+	}
+	fprintf(fp_fs_w, "\n");
+    }
+
+    nbin = (int)floor(sqrt(*rimanenti));
+    traslo(wsquare, *rimanenti);
+    h = (int *)G_calloc(nbin, sizeof(int));
+    histo1(wsquare, *rimanenti, h, nbin);
+    pi = (double *)G_calloc(nbin, sizeof(double));
+    for (j = 0; j < nbin; j++) {
+	pi[j] = (double)h[j] / (*rimanenti);
+    }
+    entro = Entropy(pi, nbin, 0.000001);
+
+    if (entro < 0.0) {
+	fprintf(stderr, "problemi con l'entropia \n");
+	exit(0);
+    }
+
+    maxentro = log(nbin) / log(2);
+
+    media = 0.0;
+    for (j = 0; j < *rimanenti; j++) {
+	media += wsquare[j];
+    }
+    media /= *rimanenti;
+
+    if (entro > 0.5 * maxentro && media > 0.2) {
+
+	nel = h[0];
+
+	if (fp_fs_stats != NULL)
+	    fprintf(fp_fs_stats, "%d\tunif\t%f\t%f\t%d\n",
+		    *rimanenti, entro, maxentro, nel);
+
+	eliminati = (int *)G_calloc(nel, sizeof(int));
+	valoriDJ = (double *)G_calloc(nel, sizeof(double));
+
+	index = 0;
+
+	lim = (double)1 / nbin;
+
+	for (j = 0; j < *rimanenti; j++) {
+	    if (wsquare[j] <= lim) {
+		eliminati[index] = j;
+		valoriDJ[index] = wsquare[j];
+		index += 1;
+	    }
+	}
+
+    }
+    else {
+
+	wlog = (double *)G_calloc(*rimanenti, sizeof(double));
+
+	for (j = 0; j < *rimanenti; j++) {
+	    wlog[j] = log(wsquare[j] + 1.0);
+	}
+
+	media = 0.0;
+
+	for (j = 0; j < *rimanenti; j++) {
+	    media += wlog[j];
+	}
+
+	media /= *rimanenti;
+
+	minorimedia = 0;
+
+	for (j = 0; j < *rimanenti; j++) {
+	    if (wlog[j] < media) {
+		minorimedia += 1;
+	    }
+	}
+
+	mediamod = media;
+
+	conv = 0;
+
+	while (conv == 0) {
+	    nel = 0;
+	    mediamod = 0.5 * mediamod;
+
+	    for (j = 0; j < *rimanenti; j++) {
+		if (wlog[j] < mediamod) {
+		    nel += 1;
+		}
+	    }
+
+	    if (nel <= 0.5 * minorimedia && nel != 0) {
+		conv = 1;
+
+		eliminati = (int *)G_calloc(nel, sizeof(int));
+		valoriDJ = (double *)G_calloc(nel, sizeof(double));
+
+		index = 0;
+
+		for (j = 0; j < *rimanenti; j++) {
+		    if (wlog[j] < mediamod) {
+			eliminati[index] = j;
+			valoriDJ[index] = wlog[j];
+			index += 1;
+		    }
+		}
+
+	    }
+	}
+
+	if (fp_fs_stats != NULL)
+	    fprintf(fp_fs_stats, "%d\tnon-unif\t%f\t%f\t%d\n",
+		    *rimanenti, entro, maxentro, nel);
+
+	G_free(wlog);
+    }
+
+    sortindex = (int *)G_calloc(nel, sizeof(int));
+
+    indexx_1(nel, valoriDJ, sortindex);
+
+    indexordinati = (int *)G_calloc(nel, sizeof(int));
+
+    for (j = 0; j < nel; j++) {
+	indexordinati[j] = eliminati[sortindex[j]];
+    }
+
+    for (j = 0; j < nel; j++) {
+	selected[*rimanenti - j - 1] = names[indexordinati[j]];
+    }
+
+    for (j = 0; j < nel; j++) {
+	for (k = eliminati[j]; k < (*rimanenti - 1); k++) {
+	    for (t = 0; t < features->nexamples; t++) {
+		features->value[t][k] = features->value[t][k + 1];
+	    }
+	    names[k] = names[k + 1];
+	}
+	for (k = j + 1; k < nel; k++) {
+	    eliminati[k]--;
+	}
+	(*rimanenti)--;
+    }
+
+    G_free(sortindex);
+    G_free(indexordinati);
+    G_free(pi);
+    G_free(h);
+    G_free(eliminati);
+    G_free(valoriDJ);
+    G_free(wsquare);
+
+}
+
+void e_rfe_gauss(valoriDJ, features, names, selected, i, H_tot, H_tmp,
+		 rimanenti, svm_kp, fp_fs_w, fp_fs_stats)
+     double *valoriDJ;
+     Features *features;
+     double **H_tot, **H_tmp;
+     int *names, *selected;
+     int i;
+     int *rimanenti;
+     double svm_kp;
+     FILE *fp_fs_w, *fp_fs_stats;
+{
+    int j;
+    int *h;
+    int nbin;
+    double *pi;
+    double entro;
+    double maxentro, media;
+    int nel;
+    int *eliminati;
+    double *valorieliminati;
+    int index;
+    double lim;
+    double *wlog;
+    int minorimedia;
+    double mediamod;
+    int conv;
+    int *sortindex, *indexordinati;
+    int k, t;
+
+
+    if (fp_fs_w != NULL) {
+	fprintf(fp_fs_w, "%6.10f", valoriDJ[0]);
+	for (j = 1; j < *rimanenti; j++) {
+	    fprintf(fp_fs_w, "\t%6.10f", valoriDJ[j]);
+	}
+	fprintf(fp_fs_w, "\n");
+    }
+
+    nbin = (int)floor(sqrt(*rimanenti));
+    traslo(valoriDJ, *rimanenti);
+    h = (int *)G_calloc(nbin, sizeof(int));
+    histo1(valoriDJ, *rimanenti, h, nbin);
+    pi = (double *)G_calloc(nbin, sizeof(double));
+    for (j = 0; j < nbin; j++) {
+	pi[j] = (double)h[j] / (*rimanenti);
+    }
+    entro = Entropy(pi, nbin, 0.000001);
+
+    if (entro < 0.0) {
+	fprintf(stderr, "problemi con l'entropia \n");
+	exit(0);
+    }
+
+    maxentro = log(nbin) / log(2);
+    media = 0.0;
+    for (j = 0; j < *rimanenti; j++) {
+	media += valoriDJ[j];
+    }
+    media /= *rimanenti;
+
+    if (entro > 0.5 * maxentro && media > 0.2) {
+
+	nel = h[0];
+
+	if (fp_fs_stats != NULL)
+	    fprintf(fp_fs_stats, "%d\tunif\t%f\t%f\t%d\n",
+		    *rimanenti, entro, maxentro, nel);
+
+	eliminati = (int *)G_calloc(nel, sizeof(int));
+	valorieliminati = (double *)G_calloc(nel, sizeof(double));
+
+	index = 0;
+
+	lim = (double)1 / nbin;
+
+	for (j = 0; j < *rimanenti; j++) {
+	    if (valoriDJ[j] <= lim) {
+		eliminati[index] = j;
+		valorieliminati[index] = valoriDJ[j];
+		index += 1;
+	    }
+	}
+
+    }
+    else {
+
+	wlog = (double *)G_calloc(*rimanenti, sizeof(double));
+
+	for (j = 0; j < *rimanenti; j++) {
+	    wlog[j] = log(valoriDJ[j] + 1.0);
+	}
+
+	media = 0.0;
+
+	for (j = 0; j < *rimanenti; j++) {
+	    media += wlog[j];
+	}
+
+	media /= *rimanenti;
+
+	minorimedia = 0;
+
+	for (j = 0; j < *rimanenti; j++) {
+	    if (wlog[j] < media) {
+		minorimedia += 1;
+	    }
+	}
+
+	mediamod = media;
+
+	conv = 0;
+
+	while (conv == 0) {
+	    nel = 0;
+	    mediamod = 0.5 * mediamod;
+
+	    for (j = 0; j < *rimanenti; j++) {
+		if (wlog[j] < mediamod) {
+		    nel += 1;
+		}
+	    }
+
+	    if (nel <= 0.5 * minorimedia && nel != 0) {
+		conv = 1;
+
+		eliminati = (int *)G_calloc(nel, sizeof(int));
+		valorieliminati = (double *)G_calloc(nel, sizeof(double));
+
+		index = 0;
+
+		for (j = 0; j < *rimanenti; j++) {
+		    if (wlog[j] < mediamod) {
+			eliminati[index] = j;
+			valorieliminati[index] = wlog[j];
+			index += 1;
+		    }
+		}
+	    }
+	}
+
+
+	if (fp_fs_stats != NULL)
+	    fprintf(fp_fs_stats, "%d\tnon-unif\t%f\t%f\t%d\n",
+		    *rimanenti, entro, maxentro, nel);
+
+
+	G_free(wlog);
+    }
+
+    sortindex = (int *)G_calloc(nel, sizeof(int));
+
+    indexx_1(nel, valorieliminati, sortindex);
+
+    indexordinati = (int *)G_calloc(nel, sizeof(int));
+
+    for (j = 0; j < nel; j++) {
+	indexordinati[j] = eliminati[sortindex[j]];
+    }
+
+    for (j = 0; j < nel; j++) {
+	selected[*rimanenti - j - 1] = names[indexordinati[j]];
+    }
+
+
+    for (k = 0; k < nel; k++) {
+
+	compute_H_perdiff(H_tot, H_tmp, features->value, features->nexamples,
+			  svm_kp, eliminati[k]);
+
+	for (j = 0; j < features->nexamples; j++) {
+	    for (t = 0; t < features->nexamples; t++) {
+		H_tot[j][t] = H_tmp[j][t];
+	    }
+	}
+    }
+
+
+    for (j = 0; j < nel; j++) {
+	for (k = eliminati[j]; k < (*rimanenti - 1); k++) {
+	    for (t = 0; t < features->nexamples; t++) {
+		features->value[t][k] = features->value[t][k + 1];
+	    }
+	    names[k] = names[k + 1];
+	}
+	for (k = j + 1; k < nel; k++) {
+	    eliminati[k]--;
+	}
+	(*rimanenti)--;
+    }
+
+    G_free(sortindex);
+    G_free(indexordinati);
+    G_free(pi);
+    G_free(h);
+    G_free(eliminati);
+    G_free(valorieliminati);
+
+}
+
+
+void one_rfe_lin(svm, names, selected, fp_fs_w)
+     SupportVectorMachine *svm;
+     int *names, *selected;
+     FILE *fp_fs_w;
+{
+    double *wsquare;
+    int i, j;
+    int *sortindex;
+
+    wsquare = (double *)G_calloc(svm->d, sizeof(double));
+    sortindex = (int *)G_calloc(svm->d, sizeof(int));
+
+
+    for (j = 0; j < svm->d; j++) {
+	wsquare[j] = svm->w[j] * svm->w[j];
+    }
+
+    if (fp_fs_w != NULL) {
+	fprintf(fp_fs_w, "%6.10f", wsquare[0]);
+	for (j = 1; j < svm->d; j++) {
+	    fprintf(fp_fs_w, "\t%6.10f", wsquare[j]);
+	}
+	fprintf(fp_fs_w, "\n");
+    }
+
+    indexx_1(svm->d, wsquare, sortindex);
+
+    for (i = 0; i < svm->d; i++) {
+	selected[svm->d - i - 1] = names[sortindex[i]];
+    }
+
+    G_free(wsquare);
+    G_free(sortindex);
+}
+
+void one_rfe_gauss(valoriDJ, names, selected, n, fp_fs_w)
+     double *valoriDJ;
+     int *names, *selected;
+     int n;
+     FILE *fp_fs_w;
+{
+    int i, j;
+    int *sortindex;
+
+    if (fp_fs_w != NULL) {
+	fprintf(fp_fs_w, "%6.10f", valoriDJ[0]);
+	for (j = 1; j < n; j++) {
+	    fprintf(fp_fs_w, "\t%6.10f", valoriDJ[j]);
+	}
+	fprintf(fp_fs_w, "\n");
+    }
+
+    sortindex = (int *)G_calloc(n, sizeof(int));
+
+
+    indexx_1(n, valoriDJ, sortindex);
+
+    for (i = 0; i < n; i++) {
+	selected[n - i - 1] = names[sortindex[i]];
+    }
+
+    G_free(sortindex);
+}
+
+
+void rfe_lin(svm, features, names, selected, i, fp_fs_w)
+     SupportVectorMachine *svm;
+     Features *features;
+     int *names, *selected;
+     int i;
+     FILE *fp_fs_w;
+{
+    double *wsquare;
+    double wmin;
+    int wmin_index;
+    int j, t;
+
+
+    wsquare = (double *)G_calloc(svm->d, sizeof(double));
+
+    for (j = 0; j < svm->d; j++) {
+	wsquare[j] = svm->w[j] * svm->w[j];
+    }
+
+
+    if (fp_fs_w != NULL) {
+	fprintf(fp_fs_w, "%6.10f", wsquare[0]);
+	for (j = 1; j < svm->d; j++) {
+	    fprintf(fp_fs_w, "\t%6.10f", wsquare[j]);
+	}
+	fprintf(fp_fs_w, "\n");
+    }
+
+    wmin = wsquare[0];
+    wmin_index = 0;
+
+    for (j = 1; j < svm->d; j++) {
+	if (wmin > wsquare[j]) {
+	    wmin = wsquare[j];
+	    wmin_index = j;
+	}
+    }
+
+    selected[features->examples_dim - i - 1] = names[wmin_index];
+
+    for (j = wmin_index; j < features->examples_dim; j++) {
+	for (t = 0; t < features->nexamples; t++) {
+	    features->value[t][j] = features->value[t][j + 1];
+	}
+	names[j] = names[j + 1];
+    }
+    G_free(wsquare);
+}
+
+void rfe_gauss(valoriDJ, features, names, selected, i, H_tot, H_tmp, svm_kp,
+	       fp_fs_w)
+     Features *features;
+     double *valoriDJ;
+     int *names, *selected;
+     double **H_tot, **H_tmp;
+     int i;
+     double svm_kp;
+     FILE *fp_fs_w;
+{
+    double wmin;
+    int wmin_index;
+    int j, t;
+
+
+    if (fp_fs_w != NULL) {
+	fprintf(fp_fs_w, "%6.10f", valoriDJ[0]);
+	for (j = 1; j < features->examples_dim - i; j++) {
+	    fprintf(fp_fs_w, "\t%6.10f", valoriDJ[j]);
+	}
+	fprintf(fp_fs_w, "\n");
+    }
+
+    wmin = valoriDJ[0];
+    wmin_index = 0;
+
+    for (j = 1; j < features->examples_dim - i; j++) {
+	if (wmin > valoriDJ[j]) {
+	    wmin = valoriDJ[j];
+	    wmin_index = j;
+	}
+    }
+
+    selected[features->examples_dim - i - 1] = names[wmin_index];
+
+
+    compute_H_perdiff(H_tot, H_tmp, features->value, features->nexamples,
+		      svm_kp, wmin_index);
+
+    for (j = 0; j < features->nexamples; j++) {
+	for (t = 0; t < features->nexamples; t++) {
+	    H_tot[j][t] = H_tmp[j][t];
+	}
+    }
+
+
+    for (j = wmin_index; j < features->examples_dim - i - 1; j++) {
+	for (t = 0; t < features->nexamples; t++) {
+	    features->value[t][j] = features->value[t][j + 1];
+	}
+	names[j] = names[j + 1];
+    }
+
+}
+
+void compute_H(matrix, XX, y, ndati, nfeat, sigma)
+     double **matrix, **XX;
+     int *y;
+     double sigma;
+     int ndati, nfeat;
+{
+    int r, s;
+
+
+    for (r = 0; r < ndati; r++) {
+	for (s = r; s < ndati; s++) {
+	    matrix[r][s] =
+		y[r] * y[s] * squared_gaussian_kernel(XX[r], XX[s], nfeat,
+						      sigma);
+	    matrix[s][r] = matrix[r][s];
+	}
+
+    }
+}
+
+void compute_H_perdiff(Hvecchia, Hnuova, XX, ndati, sigma, featdaelim)
+     double **Hvecchia, **Hnuova, **XX;
+     double sigma;
+     int ndati, featdaelim;
+
+     // featdaelim e' la variabile numerata come numera C (0...nfeat-1)
+{
+    int r, s;
+
+    for (r = 0; r < ndati; r++) {
+	for (s = r; s < ndati; s++) {
+	    Hnuova[r][s] =
+		Hvecchia[r][s] *
+		(exp
+		 ((XX[r][featdaelim] -
+		   XX[s][featdaelim]) * (XX[r][featdaelim] -
+					 XX[s][featdaelim]) / sigma));
+	    Hnuova[s][r] = Hnuova[r][s];
+	}
+    }
+}
+
+
+
+void traslo(x, n)
+     double *x;
+     int n;
+{
+    int j;
+    double m, M;
+
+    m = min(x, n);
+    M = max(x, n);
+    if (m == M) {
+	fprintf(stdout,
+		"i pesi sono tutti uguali e non e' possibile fare feature selection \n");
+	exit(0);
+    }
+    for (j = 0; j < n; j++) {
+	x[j] = (x[j] - m) / (M - m);
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/getline.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/getline.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/getline.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,36 @@
+/*
+   The following routine is written and tested by Stefano Merler
+
+   for
+
+   getting a line from a buffered stream
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include "global.h"
+
+char *GetLine(fp)
+     /*
+        get a line from a buffered stream (pointed from fp)
+      */
+     FILE *fp;
+{
+    char line[BUFFSIZE], *p = NULL;
+
+    strcpy(line, "");
+
+    while (strlen(line) == 0 && !feof(fp)) {
+	p = fgets(line, BUFFSIZE, fp);
+	if (*line == '#' || strlen(line) == 1)
+	    strcpy(line, "");
+    }
+
+    if (p) {
+	line[strlen(line) - 1] = '\0';
+	return (char *)strdup(line);
+    }
+    else
+	return NULL;
+
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/gm.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/gm.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/gm.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,441 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   structure GaussianMixture management
+ */
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+static void compute_covar();
+static void compute_mean();
+
+void compute_gm(GaussianMixture * gm, int nsamples, int nvar, double **data,
+		int *data_class, int nclasses, int *classes)
+
+     /*
+        Compute gm model, given a matrix of examples data of dimension
+        nsamples x nvar. Classes of each example are contained in data_class.
+        the array classes (of length nclasses) shall contain all the possible
+        classes of the array data_class
+      */
+{
+    double ***tmpMat;
+    int *index2;
+    int i, j, k;
+
+    gm->classes = classes;
+    gm->nclasses = nclasses;
+
+    gm->npoints_for_class = (int *)G_calloc(gm->nclasses, sizeof(int));
+    for (i = 0; i < nsamples; i++) {
+	for (j = 0; j < gm->nclasses; j++) {
+	    if (data_class[i] == gm->classes[j]) {
+		gm->npoints_for_class[j] += 1;
+	    }
+	}
+    }
+
+    gm->nvars = nvar;
+    gm->priors = (double *)G_calloc(gm->nclasses, sizeof(double));
+    gm->mean = (double **)G_calloc(gm->nclasses, sizeof(double *));
+    for (i = 0; i < gm->nclasses; i++)
+	gm->mean[i] = (double *)G_calloc(gm->nvars, sizeof(double));
+    gm->det = (double *)G_calloc(gm->nclasses, sizeof(double));
+    gm->covar = (double ***)G_calloc(gm->nclasses, sizeof(double **));
+    for (i = 0; i < gm->nclasses; i++) {
+	gm->covar[i] = (double **)G_calloc(gm->nvars, sizeof(double *));
+	for (j = 0; j < gm->nvars; j++)
+	    gm->covar[i][j] = (double *)G_calloc(gm->nvars, sizeof(double));
+    }
+    tmpMat = (double ***)G_calloc(gm->nclasses, sizeof(double **));
+    for (i = 0; i < gm->nclasses; i++) {
+	tmpMat[i] =
+	    (double **)G_calloc(gm->npoints_for_class[i], sizeof(double *));
+	for (j = 0; j < gm->npoints_for_class[i]; j++)
+	    tmpMat[i][j] = (double *)G_calloc(gm->nvars, sizeof(double));
+    }
+
+    index2 = (int *)G_calloc(gm->nclasses, sizeof(int));
+    for (i = 0; i < nsamples; i++)
+	for (j = 0; j < gm->nclasses; j++)
+	    if (data_class[i] == gm->classes[j]) {
+		for (k = 0; k < gm->nvars; k++)
+		    tmpMat[j][index2[j]][k] = data[i][k];
+		index2[j] += 1;
+	    }
+
+    for (i = 0; i < gm->nclasses; i++)
+	compute_mean(tmpMat, gm, i);
+
+    for (i = 0; i < gm->nclasses; i++)
+	compute_covar(tmpMat, gm, i);
+
+    for (i = 0; i < gm->nclasses; i++)
+	gm->priors[i] = (double)gm->npoints_for_class[i] / (double)nsamples;
+
+    for (i = 0; i < gm->nclasses; i++)
+	for (j = 0; j < gm->npoints_for_class[i]; j++)
+	    G_free(tmpMat[i][j]);
+    G_free(tmpMat);
+    G_free(index2);
+}
+
+
+static void compute_covar(double ***mat, GaussianMixture * gm, int class)
+
+     /*
+        compute covariance matrix of all the classes
+      */
+{
+    int i, j, k;
+
+    for (i = 0; i < gm->nvars; i++)
+	for (j = i; j < gm->nvars; j++) {
+	    for (k = 0; k < gm->npoints_for_class[class]; k++) {
+		gm->covar[class][i][j] +=
+		    (mat[class][k][i] -
+		     gm->mean[class][i]) * (mat[class][k][j] -
+					    gm->mean[class][j]);
+	    }
+	    gm->covar[class][j][i] = gm->covar[class][i][j];
+	}
+    for (i = 0; i < gm->nvars; i++)
+	for (j = 0; j < gm->nvars; j++)
+	    gm->covar[class][i][j] /= ((double)gm->npoints_for_class[class] -
+				       1.);
+}
+
+static void compute_mean(double ***mat, GaussianMixture * gm, int class)
+
+     /*
+        compute the mean of each variable for all the classes
+      */
+{
+    int i, j;
+
+    for (i = 0; i < gm->nvars; i++)
+	for (j = 0; j < gm->npoints_for_class[class]; j++)
+	    gm->mean[class][i] += mat[class][j][i];
+
+    for (i = 0; i < gm->nvars; i++)
+	gm->mean[class][i] /= gm->npoints_for_class[class];
+}
+
+void write_gm(char *file, GaussianMixture * gm, Features * features)
+
+     /*
+        write gm structure to a file 
+      */
+{
+    FILE *fpout;
+    int i, j, k;
+    char tempbuf[500];
+
+    fpout = fopen(file, "w");
+    if (fpout == NULL) {
+	sprintf(tempbuf, "write_gm-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    write_header_features(fpout, features);
+    fprintf(fpout, "#####################\n");
+    fprintf(fpout, "MODEL:\n");
+    fprintf(fpout, "#####################\n");
+
+    fprintf(fpout, "Model:\n");
+    fprintf(fpout, "GaussianMixture\n");
+    fprintf(fpout, "nclasses:\n");
+    fprintf(fpout, "%d\n", gm->nclasses);
+
+    fprintf(fpout, "nvars:\n");
+    fprintf(fpout, "%d\n", gm->nvars);
+
+    fprintf(fpout, "classes:\n");
+    fprintf(fpout, "%d", gm->classes[0]);
+    for (i = 1; i < gm->nclasses; i++)
+	fprintf(fpout, "\t%d", gm->classes[i]);
+    fprintf(fpout, "\n");
+
+    fprintf(fpout, "priors:\n");
+    fprintf(fpout, "%f", gm->priors[0]);
+    for (i = 1; i < gm->nclasses; i++)
+	fprintf(fpout, "\t%f", gm->priors[i]);
+    fprintf(fpout, "\n");
+
+    for (i = 0; i < gm->nclasses; i++) {
+	fprintf(fpout, "CLASS %d:\n", gm->classes[i]);
+	fprintf(fpout, "mean:\n");
+	fprintf(fpout, "%f", gm->mean[i][0]);
+	for (j = 1; j < gm->nvars; j++)
+	    fprintf(fpout, "\t%f", gm->mean[i][j]);
+	fprintf(fpout, "\n");
+	fprintf(fpout, "covar:\n");
+	for (j = 0; j < gm->nvars; j++) {
+	    fprintf(fpout, "%f", gm->covar[i][j][0]);
+	    for (k = 1; k < gm->nvars; k++)
+		fprintf(fpout, "\t%f", gm->covar[i][j][k]);
+	    fprintf(fpout, "\n");
+	}
+    }
+
+    if (features->f_pca[0]) {
+	fprintf(fpout, "#####################\n");
+	fprintf(fpout, "PRINC. COMP.:\n");
+	fprintf(fpout, "#####################\n");
+
+	fprintf(fpout, "Number of pc:\n");
+	fprintf(fpout, "%d\n", features->npc);
+
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    fprintf(fpout, "PCA: Layer %d\n", i + 1);
+	    write_pca(fpout, &(features->pca[i]));
+	}
+    }
+
+    fclose(fpout);
+}
+
+
+void test_gm(GaussianMixture * gm, Features * features, char *file)
+
+     /*
+        test gm model on a set of data (features) and write the results
+        into a file. To standard output accuracy and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    int predI;
+    double predD;
+    double *error;
+    double accuracy;
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_gm-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if (features->nclasses == 2) {
+		    if ((predD =
+			 predict_gm_2class(gm,
+					   features->value[i])) *
+			features->class[i] <= 0) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%f\n", features->class[i], predD);
+		}
+		else {
+		    if ((predI =
+			 predict_gm_multiclass(gm,
+					       features->value[i])) !=
+			features->class[i]) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%d\n", features->class[i], predI);
+		}
+		break;
+	    }
+	}
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    fclose(fp);
+
+    fprintf(stdout, "Accuracy: %f\n", accuracy);
+    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", features->p_classes[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", data_in_each_class[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", (int)error[j]);
+    }
+    fprintf(stdout, "\n");
+
+    for (j = 0; j < features->nclasses; j++) {
+	error[j] /= data_in_each_class[j];
+    }
+
+    fprintf(stdout, "Perrors\t%f", error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%f", error[j]);
+    }
+    fprintf(stdout, "\n");
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+
+void compute_test_gm(GaussianMixture * gm)
+
+     /*
+        compute inverse and determinant of each covariance matrix of a gm model
+      */
+{
+    int i, j;
+
+    gm->det = (double *)G_calloc(gm->nclasses, sizeof(double));
+    gm->inv_covar = (double ***)G_calloc(gm->nclasses, sizeof(double **));
+    for (i = 0; i < gm->nclasses; i++) {
+	gm->inv_covar[i] = (double **)G_calloc(gm->nvars, sizeof(double *));
+	for (j = 0; j < gm->nvars; j++)
+	    gm->inv_covar[i][j] =
+		(double *)G_calloc(gm->nvars, sizeof(double));
+    }
+
+    for (j = 0; j < gm->nclasses; j++) {
+	gm->det[j] = determinant_of_double_matrix(gm->covar[j], gm->nvars);
+	inverse_of_double_matrix(gm->covar[j], gm->inv_covar[j], gm->nvars);
+    }
+}
+
+
+int predict_gm_multiclass(GaussianMixture * gm, double *x)
+
+     /* 
+        multiclass problems: given a gm model, return the predicted class 
+        of a test point x
+      */
+{
+    int i, j, c;
+    double *tmpVect;
+    double *distmean;
+    double *posteriors;
+    double delta;
+    double max_posterior;
+    int max_posterior_index;
+    char tempbuf[500];
+
+    tmpVect = (double *)G_calloc(gm->nvars, sizeof(double));
+    distmean = (double *)G_calloc(gm->nvars, sizeof(double));
+    posteriors = (double *)G_calloc(gm->nclasses, sizeof(double));
+
+    for (c = 0; c < gm->nclasses; c++) {
+	for (i = 0; i < gm->nvars; i++)
+	    distmean[i] = x[i] - gm->mean[c][i];
+
+	for (i = 0; i < gm->nvars; i++)
+	    tmpVect[i] = 0.0;
+
+	for (i = 0; i < gm->nvars; i++)
+	    for (j = 0; j < gm->nvars; j++)
+		tmpVect[i] += distmean[j] * gm->inv_covar[c][j][i];
+
+	delta = 0.0;
+	for (i = 0; i < gm->nvars; i++)
+	    delta += tmpVect[i] * distmean[i];
+
+	if (gm->det[c] > 0.0) {
+	    posteriors[c] = exp(-0.5 * delta) / sqrt(gm->det[c]);
+	}
+	else {
+	    sprintf(tempbuf,
+		    "predict_gm_multiclass-> det. of cov. matrix of class %d = 0",
+		    c);
+	    G_fatal_error(tempbuf);
+	}
+	posteriors[c] = posteriors[c] * gm->priors[c];
+    }
+
+    max_posterior = 0.0;
+    max_posterior_index = 0;
+    for (c = 0; c < gm->nclasses; c++)
+	if (posteriors[c] > max_posterior) {
+	    max_posterior = posteriors[c];
+	    max_posterior_index = c;
+	}
+
+    G_free(tmpVect);
+    G_free(distmean);
+    G_free(posteriors);
+
+    return gm->classes[max_posterior_index];
+
+}
+
+double predict_gm_2class(GaussianMixture * gm, double *x)
+
+     /* 
+        2 class problems: given a gm model , return the posterior of class (with sign) for 
+        a test point x
+      */
+{
+    int i, j, c;
+    double *tmpVect;
+    double *distmean;
+    double *posteriors;
+    double delta;
+    char tempbuf[500];
+
+    tmpVect = (double *)G_calloc(gm->nvars, sizeof(double));
+    distmean = (double *)G_calloc(gm->nvars, sizeof(double));
+    posteriors = (double *)G_calloc(gm->nclasses, sizeof(double));
+
+    for (c = 0; c < gm->nclasses; c++) {
+	for (i = 0; i < gm->nvars; i++)
+	    distmean[i] = x[i] - gm->mean[c][i];
+
+	for (i = 0; i < gm->nvars; i++)
+	    tmpVect[i] = 0.0;
+
+	for (i = 0; i < gm->nvars; i++)
+	    for (j = 0; j < gm->nvars; j++)
+		tmpVect[i] += distmean[j] * gm->inv_covar[c][j][i];
+
+	delta = 0.0;
+	for (i = 0; i < gm->nvars; i++)
+	    delta += tmpVect[i] * distmean[i];
+
+	if (gm->det[c] > 0.0) {
+	    posteriors[c] = exp(-0.5 * delta) / sqrt(gm->det[c]);
+	}
+	else {
+	    sprintf(tempbuf,
+		    "predict_gm_2class-> det. of cov. matrix of class %d = 0",
+		    c);
+	    G_fatal_error(tempbuf);
+	}
+	posteriors[c] = posteriors[c] * gm->priors[c];
+    }
+
+
+    G_free(tmpVect);
+    G_free(distmean);
+
+    if (posteriors[0] > posteriors[1]) {
+	return posteriors[0] / (posteriors[0] +
+				posteriors[1]) * gm->classes[0];
+    }
+    else {
+	return posteriors[1] / (posteriors[0] +
+				posteriors[1]) * gm->classes[1];
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/integration.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/integration.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/integration.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,185 @@
+/*
+   Same of the following routines are borrowed from "Numerical Recipes in C"
+   other are written and tested by Stefano Merler
+
+   for
+
+   integration of function using the trapezoidal rule
+
+   Supported function for 
+   - non-parametric functions
+   - functions depending from 1 parameter
+   - functions depending from 2 parameters
+ */
+
+
+#include <math.h>
+#include <stdio.h>
+
+#define FUNC(x) ((*func)(x))
+#define EPS 1.0e-5
+#define JMAX 1000
+
+double trapzd(func, a, b, n)
+     /* 
+        trapezoidal rule for func=func(x) on interval [a,b]
+        n = steps number
+      */
+     double a, b;
+     double (*func) ();
+     int n;
+{
+    double x, tnm, sum, del;
+    double s;
+    int j;
+
+    if (n == 1) {
+	return (s = 0.5 * (b - a) * (FUNC(a) + FUNC(b)));
+    }
+    else {
+	tnm = n;
+	del = (b - a) / tnm;
+	x = a + 0.5 * del;
+	for (sum = 0.0, j = 1; j <= n; j++, x += del)
+	    sum += FUNC(x);
+	s = (b - a) * sum / tnm;
+	return s;
+    }
+}
+
+double trapzd1(func, p1, a, b, n)
+     /* 
+        trapezoidal rule for func=func(x; p1) on interval [a,b]
+        p1 free parameter
+        n = steps number
+      */
+     double a, b;
+     double p1;
+     double (*func) ();
+     int n;
+{
+    double x, tnm, sum, del;
+    double s;
+    int j;
+
+    if (n == 1) {
+	return (s = 0.5 * (b - a) * (func(a, p1) + func(b, p1)));
+    }
+    else {
+	tnm = n;
+	del = (b - a) / tnm;
+	x = a + 0.5 * del;
+	for (sum = 0.0, j = 1; j <= n; j++, x += del)
+	    sum += func(x, p1);
+	s = (b - a) * sum / tnm;
+	return s;
+    }
+}
+
+double trapzd2(func, p1, p2, a, b, n)
+     /* 
+        trapezoidal rule for func=func(x; p1,p2) on interval [a,b]
+        p1 and p2 free parameters
+        n = steps number
+      */
+     double a, b;
+     double p1, p2;
+     double (*func) ();
+     int n;
+{
+    double x, tnm, sum, del;
+    double s;
+    int j;
+
+    if (n == 1) {
+	return (s = 0.5 * (b - a) * (func(a, p1, p2) + func(b, p1, p2)));
+    }
+    else {
+	tnm = n;
+	del = (b - a) / tnm;
+	x = a + 0.5 * del;
+	for (sum = 0.0, j = 1; j <= n; j++, x += del)
+	    sum += func(x, p1, p2);
+	s = (b - a) * sum / tnm;
+	return s;
+    }
+}
+
+double qtrap(func, a, b)
+     /* 
+        trapezoidal rule for func=func(x) with stopping rule
+      */
+     double a, b;
+     double (*func) ();
+{
+    int j;
+    double s, olds;
+
+    olds = -1.0e-30;
+
+    for (j = 1; j <= JMAX; j++) {
+	s = trapzd(func, a, b, j);
+	if (fabs(s - olds) < EPS * fabs(olds))
+	    return s;
+	olds = s;
+    }
+
+    fprintf(stderr, "Too many steps in routine qtrap\n");
+    return s;
+}
+
+
+double qtrap1(func, p1, a, b)
+     /* 
+        trapezoidal rule for func=func(x) on interval [a,b]
+        with internal stopping rule
+        p1  free parameter
+      */
+     double a, b;
+     double p1;
+     double (*func) ();
+{
+    int j;
+    double s, olds;
+
+    olds = -1.0e-30;
+
+    for (j = 1; j <= JMAX; j++) {
+	s = trapzd(func, p1, a, b, j);
+	if (fabs(s - olds) < EPS * fabs(olds))
+	    return s;
+	olds = s;
+    }
+
+    fprintf(stderr, "Too many steps in routine qtrap\n");
+    return s;
+}
+
+double qtrap2(func, p1, p2, a, b)
+     /* 
+        trapezoidal rule for func=func(x) on interval [a,b]
+        with internal stopping rule
+        p1 and p2 free parameters
+      */
+     double a, b;
+     double p1, p2;
+     double (*func) ();
+{
+    int j;
+    double s, olds;
+
+    olds = -1.0e-30;
+
+    for (j = 1; j <= JMAX; j++) {
+	s = trapzd(func, p1, p2, a, b, j);
+	if (fabs(s - olds) < EPS * fabs(olds))
+	    return s;
+	olds = s;
+    }
+
+    fprintf(stderr, "Too many steps in routine qtrap\n");
+    return s;
+}
+
+#undef EPS
+#undef JMAX

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/lu.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/lu.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/lu.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,185 @@
+/*
+   Same of the following routines are borrowed from "Numerical Recipes in C"
+   other are written and tested by Stefano Merler
+
+   for
+
+   LU matrix decomposition, linear equation solution (Ax=b), inversion
+   of matrices and deteminant computation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <grass/gis.h>
+
+
+#define CTINY 1.0e-32
+
+void ludcmp(double **a, int n, int *indx, double *d)
+
+     /*
+        LU decomposition of n x n matrix a.
+      */
+{
+    int i, imax = 0, j, k;
+    double big, dum, sum, temp;
+    double *vv;
+
+    vv = (double *)G_calloc(n, sizeof(double));
+    *d = 1.0;
+    for (i = 0; i < n; i++) {
+	big = 0;
+	for (j = 0; j < n; j++)
+	    if ((temp = fabs(a[i][j])) > big)
+		big = temp;
+	if (big == 0.0) {
+	    fprintf(stderr, "Singular matrix in routine ludcmp\n");
+	    exit(1);
+	}
+	vv[i] = 1.0 / big;
+    }
+    for (j = 0; j < n; j++) {
+	for (i = 0; i < j; i++) {
+	    sum = a[i][j];
+	    for (k = 0; k < i; k++)
+		sum -= a[i][k] * a[k][j];
+	    a[i][j] = sum;
+	}
+	big = 0.0;
+	for (i = j; i < n; i++) {
+	    sum = a[i][j];
+	    for (k = 0; k < j; k++)
+		sum -= a[i][k] * a[k][j];
+	    a[i][j] = sum;
+	    if ((dum = vv[i] * fabs(sum)) >= big) {
+		big = dum;
+		imax = i;
+	    }
+	}
+	if (j != imax) {
+	    for (k = 0; k < n; k++) {
+		dum = a[imax][k];
+		a[imax][k] = a[j][k];
+		a[j][k] = dum;
+	    }
+	    *d = -(*d);
+	    vv[imax] = vv[j];
+	}
+	indx[j] = imax;
+	if (a[j][j] == 0.0)
+	    a[j][j] = CTINY;
+	if (j != n) {
+	    dum = 1.0 / a[j][j];
+	    for (i = j + 1; i < n; i++)
+		a[i][j] *= dum;
+	}
+    }
+    G_free(vv);
+}
+
+#undef CTINY
+
+
+void lubksb(double **a, int n, int *indx, double b[])
+
+     /* 
+        Solve linear equation Ax=B
+        a has to be a LU decomposed n x n matrix, and indx 
+        is usually the output of ludcmp.
+        On output, b contains the solution
+      */
+{
+    int i, ii = -1, ip, j;
+    double sum;
+
+    for (i = 0; i < n; i++) {
+	ip = indx[i];
+	sum = b[ip];
+	b[ip] = b[i];
+	if (ii >= 0)
+	    for (j = ii; j <= i - 1; j++)
+		sum -= a[i][j] * b[j];
+	else if (sum != 0.0)
+	    ii = i;
+	b[i] = sum;
+    }
+    for (i = n - 1; i >= 0; i--) {
+	sum = b[i];
+	for (j = i + 1; j < n; j++)
+	    sum -= a[i][j] * b[j];
+	b[i] = sum / a[i][i];
+    }
+}
+
+void inverse_of_double_matrix(double **A, double **inv_A, int n)
+
+     /* 
+        Inverse of a matrix A of dimension n x n.
+        Output stored in inv_A
+      */
+{
+    double d, *col, **tmpA;
+    int i, j, *indx;
+
+    tmpA = (double **)G_calloc(n, sizeof(double *));
+    for (j = 0; j < n; j++)
+	tmpA[j] = (double *)G_calloc(n, sizeof(double));
+
+    for (j = 0; j < n; j++)
+	for (i = 0; i < n; i++)
+	    tmpA[j][i] = A[j][i];
+
+    col = (double *)G_calloc(n, sizeof(double));
+    indx = (int *)G_calloc(n, sizeof(int));
+
+    ludcmp(tmpA, n, indx, &d);
+    for (j = 0; j < n; j++) {
+	for (i = 0; i < n; i++)
+	    col[i] = 0;
+	col[j] = 1;
+	lubksb(tmpA, n, indx, col);
+	for (i = 0; i < n; i++)
+	    inv_A[i][j] = col[i];
+    }
+
+    G_free(col);
+    G_free(indx);
+    for (j = 0; j < n; j++)
+	G_free(tmpA[j]);
+    G_free(tmpA);
+
+}
+
+double determinant_of_double_matrix(double **A, int n)
+
+     /* 
+        determinant of a double matrix A of dimension n x n
+      */
+{
+    double d, **tmpA;
+    int i, j, *indx;
+
+    tmpA = (double **)G_calloc(n, sizeof(double *));
+    for (j = 0; j < n; j++)
+	tmpA[j] = (double *)G_calloc(n, sizeof(double));
+
+    for (j = 0; j < n; j++)
+	for (i = 0; i < n; i++)
+	    tmpA[j][i] = A[j][i];
+
+    indx = (int *)G_calloc(n, sizeof(int));
+
+    ludcmp(tmpA, n, indx, &d);
+
+    for (j = 0; j < n; j++)
+	d *= tmpA[j][j];
+
+    G_free(indx);
+    for (j = 0; j < n; j++)
+	G_free(tmpA[j]);
+    G_free(tmpA);
+
+    return (d);
+
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/matrix.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/matrix.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/matrix.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,161 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   management of matrices and arrays
+
+   Supported function for 
+   - product matrix matrix or vector matrix
+   - transpose matrix
+   - conversion of matrix to array
+   - extraction of portion of matrix
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <grass/gis.h>
+
+void product_double_matrix_double_matrix(double **x, double **y, int r,
+					 int cr, int c, double **out)
+
+     /*
+        product of matrices x * y, 
+        r = rows of x 
+        cr= cols of x = rows of y
+        c = cols of y
+        out is the r x c matrix. 
+      */
+{
+    int i, j, h;
+
+    for (i = 0; i < r; i++)
+	for (j = 0; j < c; j++) {
+	    out[i][j] = .0;
+	    for (h = 0; h < cr; h++)
+		out[i][j] += x[i][h] * y[h][j];
+	}
+
+}
+
+
+void product_double_matrix_double_vector(double **x, double *y, int r, int cr,
+					 double *out)
+
+     /*
+        vector x matrix y * x, 
+        r = rows of x 
+        cr= cols of x = elements of y
+        out is the output vector (r elements) . Memory for out is not stored.
+      */
+{
+    int i, h;
+
+    for (i = 0; i < r; i++) {
+	out[i] = .0;
+	for (h = 0; h < cr; h++)
+	    out[i] += x[i][h] * y[h];
+    }
+
+}
+
+void product_double_vector_double_matrix(double **x, double *y, int rr, int c,
+					 double *out)
+
+     /*
+        vector x matrix y * x, 
+        rr = rows of x = elements of y 
+        c= cols of x 
+        out is the output vector (c elements) . Memory for out is not stored.
+      */
+{
+    int i, h;
+
+    for (i = 0; i < c; i++) {
+	out[i] = .0;
+	for (h = 0; h < rr; h++)
+	    out[i] += x[h][i] * y[h];
+    }
+
+}
+
+
+void transpose_double_matrix(double **x, int n)
+
+     /*
+        transpose, and overwrite, the input matrix x 
+        of dimension n x n
+      */
+{
+    double **trans;
+    int i, j;
+
+    trans = (double **)G_calloc(n, sizeof(double *));
+    for (i = 0; i < n; i++)
+	trans[i] = (double *)G_calloc(n, sizeof(double));
+    for (i = 0; i < n; i++)
+	for (j = 0; j < n; j++)
+	    trans[j][i] = x[i][j];
+    for (i = 0; i < n; i++)
+	for (j = 0; j < n; j++)
+	    x[i][j] = trans[i][j];
+
+    for (i = 0; i < n; i++)
+	G_free(trans[i]);
+    G_free(trans);
+
+}
+
+void double_matrix_to_vector(double **mat, int rows, int cols, double *vect)
+
+     /*
+        transform matrix mat of dimension rows x cols in vector
+        vect of length rows x cols.
+        matrix is scanned by row
+      */
+{
+    int i, j;
+
+    for (i = 0; i < rows; i++)
+	for (j = 0; j < cols; j++)
+	    vect[(i * cols) + j] = mat[i][j];
+}
+
+void extract_portion_of_double_matrix(int r, int c, int br, int bc,
+				      double **mat, double **wind)
+
+     /*
+        extract a squared portion of a matrix mat
+        given a the indeces of the center [r,c] 
+        and the semilength of the borders [br,bc]
+        Output to array wind
+      */
+{
+    int i, j;
+
+    for (i = 0; i < 2 * br + 1; i++) {
+	for (j = 0; j < 2 * bc + 1; j++) {
+	    wind[i][j] = mat[r - br + i][c - bc + j];
+	}
+    }
+}
+
+void transpose_double_matrix_rectangular(double **x, int n, int m,
+					 double ***trans)
+
+     /*
+        transpose the input matrix x  of dimension n x m
+        output to pointer to matrix trans
+      */
+{
+    int i, j;
+
+    (*trans) = (double **)G_calloc(m, sizeof(double *));
+    for (i = 0; i < m; i++)
+	(*trans)[i] = (double *)G_calloc(n, sizeof(double));
+
+    for (i = 0; i < m; i++)
+	for (j = 0; j < n; j++)
+	    (*trans)[i][j] = x[j][i];
+
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/min_quadratic.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/min_quadratic.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/min_quadratic.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,304 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   quadratic programming
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <grass/gis.h>
+#include "func.h"
+
+void mqc(double **M, double *m, int n, double **H, double *h, int **mH,
+	 double **K, double *k, int **mK, double eps, double *x,
+	 double *minvalue)
+{
+    int i, j, l;
+
+    double **invM;
+    double **HM, **HMH, *tnH, **HMK, **KM, **KMK, *tnK, **tH, **tK;
+    double mMm;
+    double gap;
+    double *alpha, *beta;
+    double L, f;
+    double tmpalpha, tmpbeta, tmpL, tmpf;
+
+    /*alloc memory */
+    invM = (double **)G_calloc(n, sizeof(double *));
+    for (i = 0; i < n; i++)
+	invM[i] = (double *)G_calloc(n, sizeof(double));
+
+    if (mH > 0) {
+	HM = (double **)G_calloc(mH, sizeof(double *));
+	for (i = 0; i < mH; i++)
+	    HM[i] = (double *)G_calloc(n, sizeof(double));
+
+	HMH = (double **)G_calloc(mH, sizeof(double *));
+	for (i = 0; i < mH; i++)
+	    HMH[i] = (double *)G_calloc(mH, sizeof(double));
+
+	tnH = (double *)G_calloc(mH, sizeof(double));
+
+	tH = (double **)G_calloc(n, sizeof(double *));
+	for (i = 0; i < n; i++)
+	    tH[i] = (double *)G_calloc(mH, sizeof(double));
+
+	for (i = 0; i < mH; i++)
+	    for (j = 0; j < n; j++)
+		tH[j][i] = H[i][j];
+    }
+
+    if (mH > 0 && mK > 0) {
+	HMK = (double **)G_calloc(mH, sizeof(double *));
+	for (i = 0; i < mH; i++)
+	    HMK[i] = (double *)G_calloc(mK, sizeof(double));
+    }
+
+    if (mK > 0) {
+	KM = (double **)G_calloc(mK, sizeof(double *));
+	for (i = 0; i < mK; i++)
+	    KM[i] = (double *)G_calloc(n, sizeof(double));
+
+	KMK = (double **)G_calloc(mK, sizeof(double *));
+	for (i = 0; i < mK; i++)
+	    KMK[i] = (double *)G_calloc(mK, sizeof(double));
+
+	tnK = (double *)G_calloc(mK, sizeof(double));
+
+	tK = (double **)G_calloc(n, sizeof(double *));
+	for (i = 0; i < n; i++)
+	    tK[i] = (double *)G_calloc(mK, sizeof(double));
+
+	for (i = 0; i < mK; i++)
+	    for (j = 0; j < n; j++)
+		tK[j][i] = K[i][j];
+    }
+
+    /*compute inverse of M */
+    inverse_of_double_matrix(M, invM, n);
+
+    /*compute matrices products */
+    if (mH > 0) {
+	product_double_matrix_double_matrix(H, invM, mH, n, n, HM);
+	product_double_matrix_double_matrix(HM, tH, mH, n, mH, HMH);
+	product_double_matrix_double_vector(HM, m, mH, n, tnH);
+	for (i = 0; i < mH; i++)
+	    tnH[i] += 2. * h[i];
+    }
+
+    if (mH > 0 && mK > 0)
+	product_double_matrix_double_matrix(HM, tK, mH, n, mK, HMK);
+
+    if (mK > 0) {
+	product_double_matrix_double_matrix(K, invM, mK, n, n, KM);
+	product_double_matrix_double_matrix(KM, tK, mK, n, mK, KMK);
+	product_double_matrix_double_vector(KM, m, mK, n, tnK);
+	for (i = 0; i < mK; i++)
+	    tnK[i] += 2. * k[i];
+    }
+
+
+    mMm = 0.0;
+    for (i = 0; i < n; i++)
+	for (j = 0; j < n; j++)
+	    mMm += m[i] * m[j] * invM[i][j];
+    mMm *= -.5;
+
+    if (mH > 0)
+	alpha = (double *)G_calloc(mH, sizeof(double));
+    if (mK > 0)
+	beta = (double *)G_calloc(mK, sizeof(double));
+
+    gap = eps + 1;
+    /*gradient ascendent on the dual Lagrangian */
+    while (gap > eps) {
+	if (mH > 0 && mK > 0) {
+	    for (l = 0; l < mH; l++) {
+
+		tmpalpha = .0;
+		for (i = 0; i < mH; i++)
+		    if (alpha[i] > 0)
+			tmpalpha += HMH[i][l] * alpha[i];
+
+		tmpalpha += tnH[l];
+
+
+		for (i = 0; i < mK; i++)
+		    tmpalpha += HMK[l][i] * beta[i];
+
+		alpha[l] -= tmpalpha / HMH[l][l];
+
+		if (alpha[l] < .0)
+		    alpha[l] = .0;
+	    }
+
+	    for (l = 0; l < mK; l++) {
+		tmpbeta = .0;
+		for (i = 0; i < mK; i++)
+		    tmpbeta += KMK[i][l] * beta[i];
+
+		tmpbeta += tnK[l];
+
+
+		for (i = 0; i < mH; i++)
+		    if (alpha[i] > 0)
+			tmpbeta += HMK[i][l] * alpha[i];
+
+		beta[l] -= tmpbeta / KMK[l][l];
+
+	    }
+	}
+	else if (mH > 0 && mK == 0) {
+	    for (l = 0; l < mH; l++) {
+
+		tmpalpha = .0;
+		for (i = 0; i < mH; i++)
+		    if (alpha[i] > 0)
+			tmpalpha += HMH[i][l] * alpha[i];
+
+		tmpalpha += tnH[l];
+
+		alpha[l] -= tmpalpha / HMH[l][l];
+		if (alpha[l] < .0)
+		    alpha[l] = .0;
+	    }
+	}
+	else if (mH == 0 && mK > 0) {
+	    for (l = 0; l < mK; l++) {
+		tmpbeta = .0;
+		for (i = 0; i < mK; i++)
+		    tmpbeta += KMK[i][l] * beta[i];
+
+		tmpbeta += tnK[l];
+
+		beta[l] -= tmpbeta / KMK[l][l];
+
+	    }
+	}
+
+	/*value of the dual Lagrangian */
+	L = mMm;
+
+	tmpL = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		for (j = 0; j < mH; j++)
+		    if (alpha[j] > 0)
+			tmpL += alpha[i] * alpha[j] * HMH[i][j];
+	L -= .5 * tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		tmpL += alpha[i] * tnH[i];
+	L -= tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mK; i++)
+	    for (j = 0; j < mK; j++)
+		tmpL += beta[i] * beta[j] * KMK[i][j];
+	L -= .5 * tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mK; i++)
+	    tmpL += beta[i] * tnK[i];
+	L -= tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		for (j = 0; j < mK; j++)
+		    tmpL += alpha[i] * beta[j] * HMK[i][j];
+	L -= tmpL;
+
+	L *= .5;
+
+	/*value of the objective function */
+	f = mMm - L;
+
+	tmpf = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		tmpf += alpha[i] * tnH[i];
+	f -= .5 * tmpf;
+
+	tmpf = .0;
+	for (i = 0; i < mK; i++)
+	    tmpf += beta[i] * tnK[i];
+	f -= .5 * tmpf;
+
+	/* gap between dual Lagrangian and objective function (stopping criteria) */
+	gap = fabs((f - L) / (f + 1.));
+	printf("%f\n", gap);
+
+    }
+
+    /*minimum */
+
+    for (l = 0; l < n; l++) {
+	x[l] = .0;
+
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		x[l] += HM[i][l] * alpha[i];
+
+	for (i = 0; i < mK; i++)
+	    x[l] += KM[i][l] * beta[i];
+
+	for (i = 0; i < n; i++)
+	    x[l] += invM[l][i] * m[i];
+
+	x[l] *= -.5;
+    }
+    for (i = 0; i < mH; i++)
+	printf("a[%d]=%f\n", i, alpha[i]);
+    for (i = 0; i < mK; i++)
+	printf("b[%d]=%f\n", i, beta[i]);
+
+    /*value of the function */
+    *minvalue = f;
+
+
+    /*free memory */
+    for (i = 0; i < n; i++)
+	G_free(invM[i]);
+    G_free(invM);
+
+    if (mH > 0) {
+	G_free(alpha);
+	G_free(tnH);
+	for (i = 0; i < mH; i++) {
+	    G_free(HM[i]);
+	    G_free(HMH[i]);
+	}
+	G_free(HM);
+	G_free(HMH);
+	for (i = 0; i < n; i++)
+	    G_free(tH[i]);
+	G_free(tH);
+    }
+
+    if (mK > 0) {
+	G_free(beta);
+	G_free(tnK);
+	for (i = 0; i < mK; i++) {
+	    G_free(KM[i]);
+	    G_free(KMK[i]);
+	}
+	G_free(KM);
+	G_free(KMK);
+	for (i = 0; i < n; i++)
+	    G_free(tK[i]);
+	G_free(tK);
+    }
+
+    if (mK > 0 && mH > 0) {
+	for (i = 0; i < mH; i++)
+	    G_free(HMK[i]);
+	G_free(HMK);
+    }
+
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/nn.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/nn.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/nn.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,306 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   structure NearestNeighbor management
+ */
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+
+void compute_nn(NearestNeighbor * nn, int nsamples, int nvar, double **data,
+		int *data_class)
+
+     /*
+        Compute nn model, given a matrix of examples data of dimension
+        nsamples x nvar. Classes of each example are contained in data_class.
+      */
+{
+    int i, j;
+
+
+    nn->nsamples = nsamples;
+    nn->nvars = nvar;
+
+    nn->data = (double **)G_calloc(nn->nsamples, sizeof(double *));
+    for (i = 0; i < nn->nsamples; i++) {
+	nn->data[i] = (double *)G_calloc(nn->nvars, sizeof(double));
+    }
+    nn->class = (int *)G_calloc(nn->nsamples, sizeof(int));
+
+    for (i = 0; i < nn->nsamples; i++) {
+	for (j = 0; j < nn->nvars; j++) {
+	    nn->data[i][j] = data[i][j];
+	}
+	nn->class[i] = data_class[i];
+    }
+}
+
+
+void write_nn(char *file, NearestNeighbor * nn, Features * features)
+
+     /*
+        write nn structure to a file 
+      */
+{
+    FILE *fpout;
+    int i, j;
+    char tempbuf[500];
+
+    fpout = fopen(file, "w");
+    if (fpout == NULL) {
+	sprintf(tempbuf, "write_nn-> Can't open file <%s> for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    write_header_features(fpout, features);
+    fprintf(fpout, "#####################\n");
+    fprintf(fpout, "MODEL:\n");
+    fprintf(fpout, "#####################\n");
+
+    fprintf(fpout, "Model:\n");
+    fprintf(fpout, "NearestNeighbor\n");
+    fprintf(fpout, "k:\n");
+    fprintf(fpout, "%d\n", nn->k);
+    fprintf(fpout, "number of samples:\n");
+    fprintf(fpout, "%d\n", nn->nsamples);
+
+    fprintf(fpout, "number of variables:\n");
+    fprintf(fpout, "%d\n", nn->nvars);
+
+    for (i = 0; i < nn->nsamples; i++) {
+	for (j = 0; j < nn->nvars; j++) {
+	    fprintf(fpout, "%f\t", nn->data[i][j]);
+	}
+	fprintf(fpout, "%d\n", nn->class[i]);
+    }
+
+    if (features->f_pca[0]) {
+	fprintf(fpout, "#####################\n");
+	fprintf(fpout, "PRINC. COMP.:\n");
+	fprintf(fpout, "#####################\n");
+
+	fprintf(fpout, "Number of pc:\n");
+	fprintf(fpout, "%d\n", features->npc);
+
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    fprintf(fpout, "PCA: Layer %d\n", i + 1);
+	    write_pca(fpout, &(features->pca[i]));
+	}
+    }
+
+    fclose(fpout);
+}
+
+
+int predict_nn_multiclass(NearestNeighbor * nn, double *x, int k,
+			  int nclasses, int *classes)
+
+     /* 
+        multiclass problems: given a nn model, return the predicted class of a test point x 
+        using k-nearest neighbor for the prediction. the array classes (of length nclasses)
+        shall contain all the possible classes to be predicted
+      */
+{
+    int i, j;
+    double *dist;
+    int *pres_class, *pred_class, *index;
+    int max_class;
+    int max;
+
+    dist = (double *)G_calloc(nn->nsamples, sizeof(double));
+    index = (int *)G_calloc(nn->nsamples, sizeof(int));
+    pred_class = (int *)G_calloc(k, sizeof(int));
+    pres_class = (int *)G_calloc(nclasses, sizeof(int));
+
+
+    for (i = 0; i < nn->nsamples; i++) {
+	dist[i] = squared_distance(x, nn->data[i], nn->nvars);
+    }
+
+    indexx_1(nn->nsamples, dist, index);
+
+    for (i = 0; i < k; i++) {
+	pred_class[i] = nn->class[index[i]];
+    }
+
+    for (j = 0; j < k; j++) {
+	for (i = 0; i < nclasses; i++) {
+	    if (pred_class[j] == classes[i]) {
+		pres_class[i] += 1;
+		break;
+	    }
+	}
+    }
+
+
+    max = 0;
+    max_class = 0;
+    for (i = 0; i < nclasses; i++) {
+	if (pres_class[i] > max) {
+	    max = pres_class[i];
+	    max_class = i;
+	}
+    }
+
+    G_free(dist);
+    G_free(index);
+    G_free(pred_class);
+    G_free(pres_class);
+
+    return classes[max_class];
+
+}
+
+
+double predict_nn_2class(NearestNeighbor * nn, double *x, int k, int nclasses,
+			 int *classes)
+
+     /* 
+        2 class problems: given a nn model, return the majority of the class (with sign) 
+        of a test point x using k-nearest neighbor for the prediction. 
+        the array classes (of length nclasses)  shall contain all the possible 
+        classes to be predicted
+      */
+{
+    int i, j;
+    double *dist;
+    int *pres_class, *pred_class, *index;
+
+    dist = (double *)G_calloc(nn->nsamples, sizeof(double));
+    index = (int *)G_calloc(nn->nsamples, sizeof(int));
+    pred_class = (int *)G_calloc(k, sizeof(int));
+    pres_class = (int *)G_calloc(nclasses, sizeof(int));
+
+
+    for (i = 0; i < nn->nsamples; i++) {
+	dist[i] = squared_distance(x, nn->data[i], nn->nvars);
+    }
+
+    indexx_1(nn->nsamples, dist, index);
+
+    for (i = 0; i < k; i++) {
+	pred_class[i] = nn->class[index[i]];
+    }
+
+    for (j = 0; j < k; j++) {
+	for (i = 0; i < nclasses; i++) {
+	    if (pred_class[j] == classes[i]) {
+		pres_class[i] += 1;
+		break;
+	    }
+	}
+    }
+
+
+    G_free(dist);
+    G_free(index);
+    G_free(pred_class);
+
+    if (pres_class[0] > pres_class[1]) {
+	return (double)pres_class[0] / (double)(k * classes[0]);
+    }
+    else {
+	return (double)pres_class[1] / (double)(k * classes[1]);
+    }
+
+}
+
+
+
+void test_nn(NearestNeighbor * nn, Features * features, int k, char *file)
+
+     /*
+        test nn model on a set of data (features) using k-nearest neighbor 
+        and write the results into a file. To standard output accuracy 
+        and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    int predI;
+    double predD;
+    double *error;
+    double accuracy;
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_nn-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if (features->nclasses == 2) {
+		    if ((predD =
+			 predict_nn_2class(nn, features->value[i], k,
+					   features->nclasses,
+					   features->p_classes)) *
+			features->class[i] <= 0) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%f\n", features->class[i], predD);
+		}
+		else {
+		    if ((predI =
+			 predict_nn_multiclass(nn, features->value[i], k,
+					       features->nclasses,
+					       features->p_classes)) !=
+			features->class[i]) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%d\n", features->class[i], predI);
+		}
+		break;
+	    }
+	}
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    fclose(fp);
+
+    fprintf(stdout, "Accuracy: %f\n", accuracy);
+    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", features->p_classes[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", data_in_each_class[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", (int)error[j]);
+    }
+    fprintf(stdout, "\n");
+
+    for (j = 0; j < features->nclasses; j++) {
+	error[j] /= data_in_each_class[j];
+    }
+
+    fprintf(stdout, "Perrors\t%f", error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%f", error[j]);
+    }
+    fprintf(stdout, "\n");
+    G_free(data_in_each_class);
+    G_free(error);
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/open.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/open.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/open.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,60 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   open new raster maps
+ */
+
+#include <grass/gis.h>
+#include <stdlib.h>
+
+int open_new_CELL(name)
+     /* 
+        open a new raster map of name name in CELL format
+      */
+     char *name;
+{
+    int fd;
+    char err[400];
+
+    if (G_legal_filename(name) < 0) {
+	sprintf(err, "open_new_CELL-> %s - ** illegal name **", name);
+	G_fatal_error(err);
+	exit(1);
+    }
+
+    fd = G_open_raster_new(name, CELL_TYPE);
+    if (fd < 0) {
+	sprintf(err, "open_new_CELL-> failed in attempt to open %s\n", name);
+	G_fatal_error(err);
+	exit(1);
+    }
+
+    return fd;
+}
+
+int open_new_DCELL(char *name)
+
+     /* 
+        open a new raster map of name name in DELL format
+      */
+{
+    int fd;
+    char err[400];
+
+    if (G_legal_filename(name) < 0) {
+	sprintf(err, "open_new_DCELL-> %s - ** illegal name **", name);
+	G_fatal_error(err);
+	exit(1);
+    }
+
+    fd = G_open_raster_new(name, DCELL_TYPE);
+    if (fd < 0) {
+	sprintf(err, "open_new_DCELL-> failed in attempt to open %s\n", name);
+	G_fatal_error(err);
+	exit(1);
+    }
+
+    return fd;
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/pca.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/pca.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/pca.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,87 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   structure Pca management
+ */
+
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+
+
+void inizialize_pca(Pca * pca, int dim)
+
+     /* 
+        alloc memory for Pca structure pca of dimension dim
+      */
+{
+    int i;
+
+    pca->n = dim;
+    pca->mean = (double *)G_calloc(dim, sizeof(double));
+    pca->sd = (double *)G_calloc(dim, sizeof(double));
+    pca->covar = (double **)G_calloc(dim, sizeof(double *));
+    for (i = 0; i < dim; i++)
+	pca->covar[i] = (double *)G_calloc(dim, sizeof(double));
+    pca->eigmat = (double **)G_calloc(dim, sizeof(double *));
+    for (i = 0; i < dim; i++)
+	pca->eigmat[i] = (double *)G_calloc(dim, sizeof(double));
+    pca->eigval = (double *)G_calloc(dim, sizeof(double));
+}
+
+void write_pca(FILE * fp, Pca * pca)
+
+     /* write a pca structure into the file pointed */
+{
+    int i, j;
+
+    fprintf(fp, "eigenvalues:\n");
+    fprintf(fp, "%f", pca->eigval[0]);
+    for (i = 1; i < pca->n; i++) {
+	fprintf(fp, "\t%f", pca->eigval[i]);
+    }
+    fprintf(fp, "\n");
+    fprintf(fp, "eigenvectors (by column):\n");
+    for (i = 0; i < pca->n; i++) {
+	fprintf(fp, "%f", pca->eigmat[i][0]);
+	for (j = 1; j < pca->n; j++)
+	    fprintf(fp, "\t%f", pca->eigmat[i][j]);
+	fprintf(fp, "\n");
+    }
+}
+
+void read_pca(FILE * fp, Pca * pca)
+
+     /* raed a pca structure from the file pointed */
+{
+    int i, j;
+    char *line = NULL;
+
+    pca->eigval = (double *)G_calloc(pca->n, sizeof(double));
+    pca->eigmat = (double **)G_calloc(pca->n, sizeof(double *));
+    for (i = 0; i < pca->n; i++) {
+	pca->eigmat[i] = (double *)G_calloc(pca->n, sizeof(double));
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < pca->n; i++) {
+	sscanf(line, "%lf", &(pca->eigval[i]));
+	line = (char *)strchr(line, '\t');
+	*line++;
+    }
+    line = GetLine(fp);
+    for (j = 0; j < pca->n; j++) {
+	line = GetLine(fp);
+	for (i = 0; i < pca->n; i++) {
+	    sscanf(line, "%lf", &(pca->eigmat[j][i]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/percent.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/percent.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/percent.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,39 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   status of a loop computation
+ */
+
+#include <stdio.h>
+
+static int prev = -1;
+
+void percent(int n, int d, int s)
+
+     /*
+        compute percentage (and print to stderr)
+        of work done within a loop. 
+        n actual number, d total number, s step
+      */
+{
+    register int x;
+
+    if (d <= 0 || s <= 0)
+	x = 100;
+    else {
+	x = n * 100 / d;
+	if (x % s)
+	    return;
+    }
+    if (n <= 0 || n >= d || x != prev) {
+	prev = x;
+	fprintf(stderr, "%4d%%\b\b\b\b\b", x);
+	fflush(stderr);
+    }
+    if (x >= 100) {
+	fprintf(stderr, "\n");
+	prev = -1;
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/random.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/random.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/random.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,146 @@
+/*
+   The following routines are borrowed from "Numerical Recipes in C"
+
+   for 
+
+   extraction of samples from normal and uniform distributions
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+#define M1 259200
+#define IA1 7141
+#define IC1 54773
+#define RM1 (1.0/M1)
+#define M2 134456
+#define IA2 8121
+#define IC2 28411
+#define RM2  (1.0/M2)
+#define M3 243000
+#define IA3 4561
+#define IC3 51349
+
+double ran1(int *idum)
+
+     /*
+        return a double from a uniform distributio over [0,1].
+        Idum inizialize the procedure
+      */
+{
+    static long ix1, ix2, ix3;
+    static double r[98];
+    double temp;
+    static int iff = 0;
+    int j;
+
+    if (*idum < 0 || iff == 0) {
+	iff = 1;
+	ix1 = (IC1 - (*idum)) % M1;
+	ix1 = (IA1 * ix1 + IC1) % M1;
+	ix2 = ix1 % M2;
+	ix1 = (IA1 * ix1 + IC1) % M1;
+	ix3 = ix1 % M3;
+	for (j = 1; j <= 97; j++) {
+	    ix1 = (IA1 * ix1 + IC1) % M1;
+	    ix2 = (IA2 * ix2 + IC2) % M2;
+	    r[j] = (ix1 + ix2 * RM2) * RM1;
+	}
+	*idum = 1;
+    }
+    ix1 = (IA1 * ix1 + IC1) % M1;
+    ix2 = (IA2 * ix2 + IC2) % M2;
+    ix3 = (IA3 * ix3 + IC3) % M3;
+    j = 1 + ((97 * ix3) / M3);
+    if (j > 97 || j < 1) {
+	fprintf(stderr, "RAN1: this cannot happen\n");
+	exit(-1);
+    }
+    temp = r[j];
+    r[j] = (ix1 + ix2 * RM2) * RM1;
+    return temp;
+}
+
+
+double gasdev(int *idum)
+
+     /*
+        return a double from a normal distribution (m=0, v=1).
+        Idum inizialize the procedure
+      */
+{
+    static int iset = 0;
+    static double gset;
+    double fac, r, v1, v2;
+
+    if (iset == 0) {
+	do {
+	    v1 = 2.0 * ran1(idum) - 1.0;
+	    v2 = 2.0 * ran1(idum) - 1.0;
+	    r = v1 * v1 + v2 * v2;
+	} while (r >= 1.0 || r == 0.0);
+	fac = sqrt(-2.0 * log(r) / r);
+	gset = v1 * fac;
+	iset = 1;
+	return v2 * fac;
+    }
+    else {
+	iset = 0;
+	return gset;
+    }
+}
+
+double expdev(int *idum)
+{
+    return -log(ran1(idum));
+}
+
+
+double gamdev(double A, double B, int *idum)
+{
+    int j, ia;
+    double am, e, s, v1, v2, x, y, p;
+    const double exp_m1 = 0.36787944117144232159;
+
+    ia = (int)A;
+    if (ia < 1) {
+	e = 1.0 + exp_m1 * A;
+	for (;;) {
+	    p = e * ran1(idum);
+	    if (p >= 1.0) {
+		x = -log((e - p) / A);
+		if (expdev(idum) >= (1.0 - A) * log(x))
+		    break;
+	    }
+	    else {
+		x = exp(log(p) / A);
+		if (expdev(idum) >= x)
+		    break;
+	    }
+	}
+	return x * B;
+    }
+    if (ia < 6) {
+	x = 1.0;
+	for (j = 1; j <= ia; j++)
+	    x *= ran1(idum);
+	x = -log(x);
+    }
+    else {
+	do {
+	    do {
+		do {
+		    v1 = 2.0 * ran1(idum) - 1.0;
+		    v2 = 2.0 * ran1(idum) - 1.0;
+		} while (v1 * v1 + v2 * v2 > 1.0);
+		y = v2 / v1;
+		am = ia - 1;
+		s = sqrt(2.0 * am + 1.0);
+		x = s * y + am;
+	    } while (x <= 0.0);
+	    e = (1.0 + y * y) * exp(am * log(x / am) - s * y);
+	} while (ran1(idum) > e);
+    }
+    return x * B;
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/read_models.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/read_models.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/read_models.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,427 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   loading different model types
+ */
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+
+
+static void read_bsvm();
+static void read_btree();
+static void read_tree();
+static void read_svm();
+static void read_gm();
+static void read_nn();
+
+int read_model(char *file, Features * features, NearestNeighbor * nn,
+	       GaussianMixture * gm, Tree * tree, SupportVectorMachine * svm,
+	       BTree * btree, BSupportVectorMachine * bsvm)
+
+     /*read a model from file and fill the structure according to the
+        model type. Moreover load the features */
+{
+    int model_type;
+    FILE *fp;
+    char tempbuf[500];
+    char *line = NULL;
+    int i;
+
+    fp = fopen(file, "r");
+    if (fp == NULL) {
+	sprintf(tempbuf, "read_model-> Can't open file %s for reading", file);
+	G_fatal_error(tempbuf);
+    }
+
+    read_header_features(fp, features);
+
+    /* scan model type */
+    line = GetLine(fp);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    if (strcmp(line, "GaussianMixture") == 0) {
+	model_type = GM_model;
+    }
+    else if (strcmp(line, "NearestNeighbor") == 0) {
+	model_type = NN_model;
+    }
+    else if (strcmp(line, "ClassificationTree") == 0) {
+	model_type = CT_model;
+    }
+    else if (strcmp(line, "SupportVectorMachine") == 0) {
+	model_type = SVM_model;
+    }
+    else if (strcmp(line, "B-ClassificationTree") == 0) {
+	model_type = BCT_model;
+    }
+    else if (strcmp(line, "B-SupportVectorMachine") == 0) {
+	model_type = BSVM_model;
+    }
+    else {
+	return 0;
+    }
+
+    /* read model */
+    switch (model_type) {
+    case NN_model:
+	read_nn(fp, &nn);
+	break;
+    case GM_model:
+	read_gm(fp, &gm);
+	break;
+    case CT_model:
+	read_tree(fp, &tree);
+	break;
+    case SVM_model:
+	read_svm(fp, &svm);
+	break;
+    case BCT_model:
+	read_btree(fp, &btree);
+	break;
+    case BSVM_model:
+	read_bsvm(fp, &bsvm);
+	break;
+    case 0:
+	return 0;
+	break;
+    }
+
+    if (features->f_pca[0]) {
+	features->pca = (Pca *) G_calloc(features->f_pca[1], sizeof(Pca));
+
+	line = GetLine(fp);
+	line = GetLine(fp);
+	line = GetLine(fp);
+	sscanf(line, "%d", &(features->npc));
+
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    features->pca[i].n =
+		features->training.rows * features->training.cols;
+	    read_pca(fp, &(features->pca[i]));
+	}
+    }
+
+    fclose(fp);
+
+    return model_type;
+}
+
+
+static void read_bsvm(FILE * fp, BSupportVectorMachine ** bsvm)
+{
+    char *line = NULL;
+    int i;
+    SupportVectorMachine *tmp_svm;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf", &((*bsvm)->w));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*bsvm)->nsvm));
+
+    (*bsvm)->weights = (double *)G_calloc((*bsvm)->nsvm, sizeof(double));
+    (*bsvm)->svm = (SupportVectorMachine *)
+	G_calloc((*bsvm)->nsvm, sizeof(SupportVectorMachine));
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < (*bsvm)->nsvm; i++) {
+	sscanf(line, "%lf", &((*bsvm)->weights[i]));
+	line = (char *)strchr(line, '\t');
+	*line++;
+    }
+
+    for (i = 0; i < (*bsvm)->nsvm; i++) {
+	tmp_svm = &((*bsvm)->svm[i]);
+	read_svm(fp, &tmp_svm);
+    }
+}
+
+
+static void read_btree(FILE * fp, BTree ** btree)
+{
+    char *line = NULL;
+    int i;
+    Tree *tmp_tree;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf", &((*btree)->w));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*btree)->ntrees));
+
+    (*btree)->weights = (double *)G_calloc((*btree)->ntrees, sizeof(double));
+    (*btree)->tree = (Tree *) G_calloc((*btree)->ntrees, sizeof(Tree));
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < (*btree)->ntrees; i++) {
+	sscanf(line, "%lf", &((*btree)->weights[i]));
+	line = (char *)strchr(line, '\t');
+	*line++;
+    }
+
+    for (i = 0; i < (*btree)->ntrees; i++) {
+	tmp_tree = &((*btree)->tree[i]);
+	read_tree(fp, &tmp_tree);
+    }
+}
+
+static void read_tree(FILE * fp, Tree ** tree)
+{
+    char *line = NULL;
+    int i, j;
+    int nclasses;
+    int nvar;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*tree)->nnodes));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &nclasses);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &nvar);
+
+    (*tree)->node = (Node *) G_calloc((*tree)->nnodes, sizeof(Node));
+
+    for (i = 0; i < (*tree)->nnodes; i++) {
+	(*tree)->node[i].npoints_for_class =
+	    (int *)G_calloc(nclasses, sizeof(int));
+	(*tree)->node[i].priors =
+	    (double *)G_calloc(nclasses, sizeof(double));
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < (*tree)->nnodes; i++) {
+	line = GetLine(fp);
+	(*tree)->node[i].nclasses = nclasses;
+	(*tree)->node[i].nvar = nvar;
+	sscanf(line, "%d", &((*tree)->node[i].terminal));
+	line = (char *)strchr(line, '\t');
+	*line++;
+	sscanf(line, "%d", &((*tree)->node[i].npoints));
+	line = (char *)strchr(line, '\t');
+	*line++;
+	for (j = 0; j < nclasses; j++) {
+	    sscanf(line, "%d", &((*tree)->node[i].npoints_for_class[j]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+	for (j = 0; j < nclasses; j++) {
+	    sscanf(line, "%lf", &((*tree)->node[i].priors[j]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+	sscanf(line, "%d", &(*tree)->node[i].class);
+	line = (char *)strchr(line, '\t');
+	*line++;
+	if (!(*tree)->node[i].terminal) {
+	    sscanf(line, "%d", &((*tree)->node[i].left));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    sscanf(line, "%d", &((*tree)->node[i].right));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    sscanf(line, "%d", &((*tree)->node[i].var));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    sscanf(line, "%lf", &((*tree)->node[i].value));
+	}
+    }
+
+}
+
+static void read_svm(FILE * fp, SupportVectorMachine ** svm)
+{
+    char *line = NULL;
+    int i, j;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*svm)->convergence));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    if (strcmp(line, "gaussian_kernel") == 0) {
+	(*svm)->kernel_type = SVM_KERNEL_GAUSSIAN;
+    }
+    else if (strcmp(line, "linear_kernel") == 0) {
+	(*svm)->kernel_type = SVM_KERNEL_LINEAR;
+    }
+    else {
+	G_fatal_error("kernel not recognized\n");
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf", &((*svm)->two_sigma_squared));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf", &((*svm)->C));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf", &((*svm)->cost));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf%lf%%d", &((*svm)->tolerance), &((*svm)->eps),
+	   &((*svm)->maxloops));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*svm)->N));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*svm)->d));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%lf", &((*svm)->b));
+    line = GetLine(fp);
+
+
+    if ((*svm)->kernel_type == SVM_KERNEL_GAUSSIAN) {
+	(*svm)->dense_points =
+	    (double **)G_calloc((*svm)->N, sizeof(double *));
+	for (i = 0; i < (*svm)->N; i++) {
+	    (*svm)->dense_points[i] =
+		(double *)G_calloc((*svm)->d, sizeof(double));
+	}
+	(*svm)->target = (int *)G_calloc((*svm)->N, sizeof(int));
+	(*svm)->alph = (double *)G_calloc((*svm)->N, sizeof(double));
+
+	for (i = 0; i < (*svm)->N; i++) {
+	    line = GetLine(fp);
+	    for (j = 0; j < (*svm)->d; j++) {
+		sscanf(line, "%lf", &((*svm)->dense_points[i][j]));
+		line = (char *)strchr(line, '\t');
+		*line++;
+	    }
+	    sscanf(line, "%d", &((*svm)->target[i]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    sscanf(line, "%lf", &((*svm)->alph[i]));
+	}
+    }
+    if ((*svm)->kernel_type == SVM_KERNEL_LINEAR) {
+	(*svm)->w = (double *)G_calloc((*svm)->d, sizeof(double));
+	line = GetLine(fp);
+	for (j = 0; j < (*svm)->d; j++) {
+	    sscanf(line, "%lf", &((*svm)->w[j]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+    }
+
+}
+
+
+
+static void read_gm(FILE * fp, GaussianMixture ** gm)
+{
+    char *line = NULL;
+    int i, j, k;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*gm)->nclasses));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*gm)->nvars));
+
+    (*gm)->classes = (int *)G_calloc((*gm)->nclasses, sizeof(int));
+    (*gm)->priors = (double *)G_calloc((*gm)->nclasses, sizeof(double));
+
+
+    (*gm)->mean = (double **)G_calloc((*gm)->nclasses, sizeof(double *));
+    for (i = 0; i < (*gm)->nclasses; i++)
+	(*gm)->mean[i] = (double *)G_calloc((*gm)->nvars, sizeof(double));
+
+    (*gm)->covar = (double ***)G_calloc((*gm)->nclasses, sizeof(double **));
+    for (i = 0; i < (*gm)->nclasses; i++) {
+	(*gm)->covar[i] = (double **)G_calloc((*gm)->nvars, sizeof(double *));
+	for (j = 0; j < (*gm)->nvars; j++)
+	    (*gm)->covar[i][j] =
+		(double *)G_calloc((*gm)->nvars, sizeof(double));
+    }
+
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < (*gm)->nclasses; i++) {
+	sscanf(line, "%d", &((*gm)->classes[i]));
+	line = (char *)strchr(line, '\t');
+	*line++;
+    }
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    for (i = 0; i < (*gm)->nclasses; i++) {
+	sscanf(line, "%lf", &((*gm)->priors[i]));
+	line = (char *)strchr(line, '\t');
+	*line++;
+    }
+
+
+    for (i = 0; i < (*gm)->nclasses; i++) {
+	line = GetLine(fp);
+	line = GetLine(fp);
+	line = GetLine(fp);
+	for (j = 0; j < (*gm)->nvars; j++) {
+	    sscanf(line, "%lf", &((*gm)->mean[i][j]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+	line = GetLine(fp);
+	for (k = 0; k < (*gm)->nvars; k++) {
+	    line = GetLine(fp);
+	    for (j = 0; j < (*gm)->nvars; j++) {
+		sscanf(line, "%lf", &((*gm)->covar[i][k][j]));
+		line = (char *)strchr(line, '\t');
+		*line++;
+	    }
+	}
+    }
+
+
+}
+
+static void read_nn(FILE * fp, NearestNeighbor ** nn)
+{
+    char *line = NULL;
+    int i, j;
+
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*nn)->k));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*nn)->nsamples));
+    line = GetLine(fp);
+    line = GetLine(fp);
+    sscanf(line, "%d", &((*nn)->nvars));
+
+    (*nn)->data = (double **)G_calloc((*nn)->nsamples, sizeof(double *));
+    for (i = 0; i < (*nn)->nsamples; i++)
+	(*nn)->data[i] = (double *)G_calloc((*nn)->nvars, sizeof(double));
+
+    (*nn)->class = (int *)G_calloc((*nn)->nsamples, sizeof(int));
+
+    for (i = 0; i < (*nn)->nsamples; i++) {
+	line = GetLine(fp);
+	for (j = 0; j < (*nn)->nvars; j++) {
+	    sscanf(line, "%lf", &((*nn)->data[i][j]));
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	}
+	sscanf(line, "%d", &((*nn)->class[i]));
+    }
+
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/soft_margin_boosting.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/soft_margin_boosting.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/soft_margin_boosting.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,114 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   Soft Boosting implementation (quadratic programming)
+ */
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+void maximize(double *alpha, int N, double *beta, int T, double **M)
+{
+    int i, l, t, s;
+    int convergence = FALSE;
+    double detetaalpha, detetabeta;
+    double *alpha_old;
+    double *beta_old;
+    double eps = 0.00001;
+    double *eta;
+    double tmp;
+
+    alpha_old = (double *)G_calloc(N, sizeof(double));
+    beta_old = (double *)G_calloc(T, sizeof(double));
+    eta = (double *)G_calloc(N, sizeof(double));
+
+    for (l = 0; l < N; l++) {
+	tmp = .0;
+	for (t = 0; t < T; t++)
+	    tmp += M[l][t] * M[l][t];
+	eta[l] = 1.0 / tmp;
+    }
+
+    for (l = 0; l < N; l++)
+	alpha[l] = .0;
+
+    for (t = 0; t < T; t++)
+	beta[t] = .0;
+
+    while (!convergence) {
+	for (l = 0; l < N; l++)
+	    alpha_old[l] = alpha[l];
+	for (s = 0; s < T; s++)
+	    beta_old[s] = beta[s];
+
+	for (l = 0; l < N; l++) {
+	    detetaalpha = .0;
+	    for (t = 0; t < T; t++)
+		for (i = 0; i < N; i++)
+		    detetaalpha -= alpha[i] * M[l][t] * M[i][t];
+
+	    for (t = 0; t < T; t++)
+		detetaalpha -= beta[t] * M[l][t];
+
+	    detetaalpha += 1.0;
+
+	    alpha[l] += eta[l] * detetaalpha;
+
+	    if (alpha[l] < 0)
+		alpha[l] = .0;
+
+	    if (alpha[l] > 100. / N)
+		alpha[l] = 100. / N;
+
+	}
+
+	for (s = 0; s < T; s++) {
+	    detetabeta = -1.0 * beta[s];
+
+	    for (i = 0; i < N; i++)
+		detetabeta -= alpha[i] * M[i][s];
+
+	    beta[s] += detetabeta;
+
+	    if (beta[s] < 0)
+		beta[s] = .0;
+	}
+
+	/*
+	   for(l=0;l<N;l++){
+	   fprintf(stderr,"%f\t",alpha[l]);
+	   }
+	   fprintf(stderr,"\n");
+	 */
+
+	convergence = TRUE;
+	for (l = 0; l < N; l++) {
+	    if (fabs(alpha[l] - alpha_old[l]) > eps) {
+		fprintf(stderr, "ALPHA %d %f %f\n", l, alpha_old[l],
+			alpha[l]);
+		convergence = FALSE;
+		break;
+	    }
+	}
+	if (convergence)
+	    for (s = 0; s < T; s++) {
+		if (fabs(beta[s] - beta_old[s]) > eps) {
+		    fprintf(stderr, "BETA %d %f %f\n", s, beta_old[s],
+			    beta[s]);
+		    convergence = FALSE;
+		    break;
+		}
+	    }
+    }
+    G_free(alpha_old);
+    G_free(beta_old);
+    G_free(eta);
+
+
+
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/sort.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/sort.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/sort.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,115 @@
+/*
+   The following routines is borrowed from "Numerical Recipes in C"
+
+   for
+
+   sortinf of an array 
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <grass/gis.h>
+#include "global.h"
+
+#define ALN2I 1.442695022
+#define TINY 1.0e-5
+
+static void indexx();
+
+void shell(int n, double *arr)
+
+     /*  
+        sort and rearranges an array arr of length n 
+        into ascending order
+      */
+{
+    int nn, m, j, i, lognb2;
+    double t;
+
+    lognb2 = (log((double)n) * ALN2I + TINY);
+    m = n;
+    for (nn = 1; nn <= lognb2; nn++) {
+	m >>= 1;
+	for (j = m + 1; j <= n; j++) {
+	    i = j - m;
+	    t = arr[j - 1];
+	    while (i >= 1 && arr[i - 1] > t) {
+		arr[i + m - 1] = arr[i - 1];
+		i -= m;
+	    }
+	    arr[i + m - 1] = t;
+	}
+    }
+}
+
+#undef ALN2I
+#undef TINY
+
+
+
+void indexx_1(int n, double arrin[], int indx[])
+
+     /*
+        sort array arrin of length n into ascending order,
+        without modify it. The order of the sording will be
+        contained into the indx array
+      */
+{
+    int i;
+    double *tmparrin;
+    int *tmpindx;
+
+    tmpindx = (int *)G_calloc(n + 1, sizeof(int));
+    tmparrin = (double *)G_calloc(n + 1, sizeof(double));
+
+    for (i = 0; i < n; i++)
+	tmparrin[i + 1] = arrin[i];
+
+    indexx(n, tmparrin, tmpindx);
+
+    for (i = 0; i < n; i++)
+	indx[i] = tmpindx[i + 1] - 1;
+
+    G_free(tmpindx);
+    G_free(tmparrin);
+
+}
+
+
+static void indexx(int n, double arrin[], int indx[])
+{
+    int l, j, ir, indxt, i;
+    double q;
+
+    for (j = 1; j <= n; j++)
+	indx[j] = j;
+    if (n == 1)
+	return;
+    l = (n >> 1) + 1;
+    ir = n;
+    for (;;) {
+	if (l > 1)
+	    q = arrin[(indxt = indx[--l])];
+	else {
+	    q = arrin[(indxt = indx[ir])];
+	    indx[ir] = indx[1];
+	    if (--ir == 1) {
+		indx[1] = indxt;
+		return;
+	    }
+	}
+	i = l;
+	j = l << 1;
+	while (j <= ir) {
+	    if (j < ir && arrin[indx[j]] < arrin[indx[j + 1]])
+		j++;
+	    if (q < arrin[indx[j]]) {
+		indx[i] = indx[j];
+		j += (i = j);
+	    }
+	    else
+		j = ir + 1;
+	}
+	indx[i] = indxt;
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/stats.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/stats.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/stats.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,401 @@
+/*
+   The following routing are written and tested by Stefano Merler
+
+   for 
+
+   statistical description of data
+
+   Supported function for:
+   - mean computation
+   - standard deviation and variance computation
+   - autocovariance computation
+   - covariance matrix computation
+   -min-max of an array
+ */
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <grass/gis.h>
+
+double mean_of_double_array(double *x, int n)
+
+     /* 
+        compute the mean of an array x of lenth n
+      */
+{
+    int i;
+    double mean = .0;
+
+    for (i = 0; i < n; i++)
+	mean += x[i];
+
+    mean /= n;
+
+    return mean;
+
+}
+
+double var_of_double_array(double *x, int n)
+
+     /* 
+        compute the var of an array x of length n
+      */
+{
+    int i;
+    double deviation;
+    double mean = .0;
+    double var = .0;
+
+    for (i = 0; i < n; i++)
+	mean += x[i];
+
+    mean /= n;
+
+    for (i = 0; i < n; i++) {
+	deviation = x[i] - mean;
+	var += deviation * deviation;
+    }
+
+    var /= (n - 1.0);
+
+    return var;
+
+}
+
+double sd_of_double_array(double *x, int n)
+
+     /* 
+        compute the sd of an array x of length n
+      */
+{
+    int i;
+    double deviation;
+    double mean = .0;
+    double var = .0;
+
+    for (i = 0; i < n; i++)
+	mean += x[i];
+
+    mean /= n;
+
+    for (i = 0; i < n; i++) {
+	deviation = x[i] - mean;
+	var += deviation * deviation;
+    }
+
+    var /= (n - 1.0);
+
+    return sqrt(var);
+
+}
+
+
+double var_of_double_array_given_mean(double *x, int n, double mean)
+
+     /* 
+        compute the var of an array x of length n
+        without computation of the mean mean, 
+        given in input
+      */
+{
+    int i;
+    double deviation;
+    double var = .0;
+
+    for (i = 0; i < n; i++) {
+	deviation = x[i] - mean;
+	var += deviation * deviation;
+    }
+
+    var /= (n - 1.0);
+
+    return var;
+
+}
+
+
+double sd_of_double_array_given_mean(double *x, int n, double mean)
+
+     /* 
+        compute the sd of an array x of length n
+        without computation of the mean, 
+        given in input
+      */
+{
+    int i;
+    double deviation;
+    double var = .0;
+
+    for (i = 0; i < n; i++) {
+	deviation = x[i] - mean;
+	var += deviation * deviation;
+    }
+
+    var /= (n - 1.0);
+
+    return sqrt(var);
+
+}
+
+
+void mean_and_var_of_double_matrix_by_row(double **x, int n, int m,
+					  double *mean, double *var)
+
+     /* 
+        each row of the input matrix x (dimension n x m)
+        is considered an independent array of data. 
+        The function compute mean and var of each row,  
+        stored within the array mean and var. 
+      */
+{
+    int i, j;
+    double deviation;
+
+    for (j = 0; j < n; j++)
+	for (i = 0; i < m; i++) {
+	    mean[j] += x[j][i];
+	}
+    for (i = 0; i < n; i++)
+	mean[i] /= m;
+    for (j = 0; j < n; j++) {
+	for (i = 0; i < m; i++) {
+	    deviation = x[j][i] - mean[j];
+	    var[j] += deviation * deviation;
+	}
+    }
+    for (i = 0; i < n; i++)
+	var[i] = var[i] / (m - 1.);
+}
+
+void mean_and_sd_of_double_matrix_by_row(double **x, int n, int m,
+					 double *mean, double *sd)
+
+     /* 
+        each row of the input matrix x (dimension n x m)
+        is considered an independent array of data. 
+        The function compute mean and sd of each row,   
+        stored within the array mean and sd. 
+      */
+{
+    int i, j;
+    double deviation;
+
+    for (j = 0; j < n; j++)
+	for (i = 0; i < m; i++) {
+	    mean[j] += x[j][i];
+	}
+    for (i = 0; i < n; i++)
+	mean[i] /= m;
+    for (j = 0; j < n; j++) {
+	for (i = 0; i < m; i++) {
+	    deviation = x[j][i] - mean[j];
+	    sd[j] += deviation * deviation;
+	}
+    }
+    for (i = 0; i < n; i++)
+	sd[i] = sqrt(sd[i] / (m - 1.));
+}
+
+void mean_and_var_of_double_matrix_by_col(double **x, int n, int m,
+					  double *mean, double *var)
+
+     /* 
+        each col of the input matrix x (dimension n x m)
+        is considered an independent array of data. 
+        The function compute mean and var of each col,  
+        stored within the array mean and sd. 
+      */
+{
+    int i, j;
+    double deviation;
+
+    for (i = 0; i < m; i++) {
+	for (j = 0; j < n; j++)
+	    mean[i] += x[j][i];
+    }
+    for (i = 0; i < m; i++)
+	mean[i] /= n;
+
+    for (i = 0; i < m; i++) {
+	for (j = 0; j < n; j++) {
+	    deviation = x[j][i] - mean[i];
+	    var[i] += deviation * deviation;
+	}
+    }
+    for (i = 0; i < m; i++)
+	var[i] = var[i] / (n - 1.);
+}
+
+void mean_and_sd_of_double_matrix_by_col(double **x, int n, int m,
+					 double *mean, double *sd)
+
+     /* 
+        each col of the input matrix x (dimension n x m)
+        is considered an independent array of data. 
+        The function compute mean and sd of each col,   
+        stored within the array mean and sd. 
+      */
+{
+    int i, j;
+    double deviation;
+
+    for (i = 0; i < m; i++) {
+	for (j = 0; j < n; j++)
+	    mean[i] += x[j][i];
+    }
+    for (i = 0; i < m; i++)
+	mean[i] /= n;
+
+    for (i = 0; i < m; i++) {
+	for (j = 0; j < n; j++) {
+	    deviation = x[j][i] - mean[i];
+	    sd[i] += deviation * deviation;
+	}
+    }
+    for (i = 0; i < m; i++)
+	sd[i] = sqrt(sd[i] / (n - 1.));
+}
+
+double auto_covariance_of_2_double_array(double *x, double *y, int n)
+
+     /* 
+        compute the auto covariance of 2 array x and y of length n
+      */
+{
+    int i;
+    double mx = .0;
+    double my = .0;
+    double cc = .0;
+
+    for (i = 0; i < n; i++) {
+	mx += x[i];
+	my += y[i];
+    }
+    mx /= n;
+    my /= n;
+
+    for (i = 0; i < n; i++)
+	cc += (x[i] - mx) * (y[i] - my);
+    cc /= n;
+    return (cc);
+}
+
+void covariance_of_double_matrix(double **x, int n, int m, double **cov)
+
+     /*
+        compute covariance matrix of a matrix x of dimension n x m.
+        Output to matrix cov.
+      */
+{
+    int i, j, k;
+    double *mean;
+
+    mean = (double *)G_calloc(m, sizeof(double));
+
+    for (i = 0; i < m; i++) {
+	for (j = 0; j < n; j++)
+	    mean[i] += x[j][i];
+	mean[i] /= n;
+    }
+
+    for (i = 0; i < m; i++)
+	for (j = i; j < m; j++) {
+	    for (k = 0; k < n; k++)
+		cov[i][j] += (x[k][i] - mean[i]) * (x[k][j] - mean[j]);
+	    cov[i][j] /= n;
+	    cov[j][i] = cov[i][j];
+	}
+
+    G_free(mean);
+
+}
+
+
+
+double entropy(double *p, int n)
+
+     /*
+        compute and return entropy of an array p (his components to be intended
+        as proportions) of length n
+      */
+{
+    int i;
+    double entropy = .0;
+
+    for (i = 0; i < n; i++)
+	if (p[i] > 0)
+	    entropy += p[i] * log(p[i]);
+
+    return -1.0 * entropy;
+
+}
+
+double gaussian_kernel(double *x, double *y, int n, double kp)
+
+     /*
+        compute e return gaussian kernel exp(-sqrt(||x-y||)/kp),
+        x and y array of length n
+      */
+{
+    int j;
+    double out = 0.0;
+    double tmp;
+
+    for (j = 0; j < n; j++) {
+	tmp = x[j] - y[j];
+	out += tmp * tmp;
+    }
+
+    return exp(-1.0 * sqrt(out) / kp);
+}
+
+double squared_gaussian_kernel(double *x, double *y, int n, double kp)
+
+     /*
+        compute e return gaussian kernel exp(-||x-y||/kp),
+        x and y array of length n
+      */
+{
+    int j;
+    double out = 0.0;
+    double tmp;
+
+    for (j = 0; j < n; j++) {
+	tmp = x[j] - y[j];
+	out += tmp * tmp;
+    }
+
+    return exp(-1.0 * out / kp);
+}
+
+double min(double *x, int n)
+{
+    int j;
+    double out;
+
+    out = x[0];
+
+    for (j = 1; j < n; j++) {
+	if (out > x[j]) {
+	    out = x[j];
+	}
+    }
+    return out;
+}
+
+
+double max(double *x, int n)
+{
+    int j;
+    double out;
+
+    out = x[0];
+
+    for (j = 1; j < n; j++) {
+	if (out < x[j]) {
+	    out = x[j];
+	}
+    }
+    return out;
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/svm.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/svm.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/svm.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,1789 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   structures SupportVectorMachine adn  BSupportVectorMachine management
+ */
+
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+
+static void svm_smo();
+static double learned_func_linear();
+static double learned_func_nonlinear();
+static double rbf_kernel();
+static double direct_kernel();
+static double dot_product_func();
+static int examineExample();
+static int takeStep();
+static int distance_from_span_sv();
+double dot_product();
+
+void compute_svm(SupportVectorMachine * svm, int n, int d, double **x, int *y,
+		 int svm_kernel, double svm_kp, double svm_C, double svm_tol,
+		 double svm_eps, int svm_maxloops, int svm_verbose,
+		 double *svm_W)
+{
+    int i, j;
+
+    svm->N = n;
+    svm->d = d;
+    svm->C = svm_C;
+    svm->tolerance = svm_tol;
+    svm->eps = svm_eps;
+    svm->two_sigma_squared = svm_kp;
+    svm->kernel_type = svm_kernel;
+    svm->maxloops = svm_maxloops;
+    svm->verbose = svm_verbose;
+    svm->b = .0;
+
+    if (svm_kernel != SVM_KERNEL_DIRECT) {
+	svm->dense_points = (double **)G_calloc(n, sizeof(double *));
+	for (i = 0; i < n; i++)
+	    svm->dense_points[i] = (double *)G_calloc(d, sizeof(double));
+
+	for (i = 0; i < n; i++)
+	    for (j = 0; j < d; j++)
+		svm->dense_points[i][j] = x[i][j];
+
+	svm->w = (double *)G_calloc(d, sizeof(double));
+    }
+
+    svm->target = (int *)G_calloc(n, sizeof(int));
+    for (i = 0; i < n; i++)
+	svm->target[i] = y[i];
+
+    svm->Cw = (double *)G_calloc(n, sizeof(double));
+    svm->alph = (double *)G_calloc(n, sizeof(double));
+    svm->error_cache = (double *)G_calloc(n, sizeof(double));
+    for (i = 0; i < n; i++)
+	svm->error_cache[i] = -y[i];
+
+    svm->precomputed_self_dot_product = (double *)G_calloc(n, sizeof(double));
+
+    for (i = 0; i < n; i++)
+	svm->Cw[i] = svm->C * svm_W[i];
+
+    if (svm_kernel == SVM_KERNEL_DIRECT) {
+	int p_class;
+	int n_class = 0;
+	int index;
+	int i1, i2;
+
+	for (i = 0; i < n; i++)
+	    if (y[i] == -1)
+		n_class++;
+
+	p_class = n - n_class;
+	svm->d = p_class * n_class;
+	svm->orig_d = d;
+
+	svm->dense_points = (double **)G_calloc(n, sizeof(double *));
+	for (i = 0; i < n; i++)
+	    svm->dense_points[i] = (double *)G_calloc(svm->d, sizeof(double));
+
+	svm->w = (double *)G_calloc(svm->d, sizeof(double));
+
+	svm->dot_prod = (double **)G_calloc(n, sizeof(double *));
+	for (i = 0; i < n; i++)
+	    svm->dot_prod[i] = (double *)G_calloc(n, sizeof(double));
+
+	for (i = 0; i < n; i++)
+	    svm->dot_prod[i][i] = dot_product(x[i], x[i], d);
+
+	for (i = 0; i < n; i++)
+	    for (j = i + 1; j < n; j++)
+		svm->dot_prod[j][i] = svm->dot_prod[i][j] =
+		    dot_product(x[i], x[j], d);
+
+	svm->models = (SVM_direct_kernel *)
+	    G_calloc(svm->d, sizeof(SVM_direct_kernel));
+
+	index = 0;
+	for (i = 0; i < n; i++)
+	    if (y[i] == -1)
+		for (j = 0; j < n; j++)
+		    if (y[j] == 1) {
+			svm->models[index].i1 = i;
+			svm->models[index].x1 = x[i];
+			svm->models[index].y1 = y[i];
+			svm->models[index].i2 = j;
+			svm->models[index].x2 = x[j];
+			svm->models[index].y2 = y[j];
+			svm->models[index].d = d;
+			svm->models[index].w_coeff = (y[j] - y[i]) /
+			    (y[j] * svm->dot_prod[j][j] -
+			     y[i] * svm->dot_prod[i][i] - (y[j] - y[i])
+			     * svm->dot_prod[i][j]);
+			svm->models[index].b =
+			    y[i] -
+			    svm->models[index].w_coeff * (y[i] *
+							  svm->dot_prod[i][i]
+							  +
+							  y[j] *
+							  svm->dot_prod[i]
+							  [j]);
+			index++;
+		    }
+
+	for (i = 0; i < n; i++)
+	    for (j = 0; j < svm->d; j++) {
+		i1 = svm->models[j].i1;
+		i2 = svm->models[j].i2;
+
+		svm->dense_points[i][j] = svm->models[j].w_coeff *
+		    (y[i1] * svm->dot_prod[i1][i] +
+		     y[i2] * svm->dot_prod[i2][i])
+		    + svm->models[j].b;
+
+		if (svm->dense_points[i][j] > 1.0)
+		    svm->dense_points[i][j] = 1.0;
+		else if (svm->dense_points[i][j] < -1.0)
+		    svm->dense_points[i][j] = -1.0;
+	    }
+
+	svm->H = (double **)G_calloc(n, sizeof(double *));
+	for (j = 0; j < n; j++)
+	    svm->H[j] = (double *)G_calloc(n, sizeof(double));
+
+	for (i = 0; i < n; i++)
+	    svm->H[i][i] = dot_product_func(i, i, svm);
+
+	for (i = 0; i < n; i++)
+	    for (j = i + 1; j < n; j++)
+		svm->H[j][i] = svm->H[i][j] = dot_product_func(i, j, svm);
+
+    }
+
+    svm_smo(svm);
+
+    svm->non_bound_support = svm->bound_support = 0;
+    for (i = 0; i < n; i++) {
+	if (svm->alph[i] > 0) {
+	    if (svm->alph[i] < svm->Cw[i])
+		svm->non_bound_support++;
+	    else
+		svm->bound_support++;
+	}
+    }
+}
+
+
+static void svm_smo(SupportVectorMachine * SVM)
+{
+    int i, k;
+    int numChanged;
+    int examineAll;
+    int nloops = 0;
+
+    SVM->end_support_i = SVM->N;
+
+    if (SVM->kernel_type == SVM_KERNEL_LINEAR) {
+	SVM->kernel_func = dot_product_func;
+	SVM->learned_func = learned_func_linear;
+    }
+
+    if (SVM->kernel_type == SVM_KERNEL_GAUSSIAN) {
+	/*
+	   SVM->precomputed_self_dot_product=(double *)G_calloc (SVM->N,sizeof(double));
+	 */
+	for (i = 0; i < SVM->N; i++)
+	    SVM->precomputed_self_dot_product[i] =
+		dot_product_func(i, i, SVM);
+	SVM->kernel_func = rbf_kernel;
+	SVM->learned_func = learned_func_nonlinear;
+    }
+
+    if (SVM->kernel_type == SVM_KERNEL_DIRECT) {
+	SVM->kernel_func = dot_product_func;
+	SVM->learned_func = learned_func_linear;
+    }
+
+    numChanged = 0;
+    examineAll = 1;
+
+    SVM->convergence = 1;
+    while (SVM->convergence == 1 && (numChanged > 0 || examineAll)) {
+	numChanged = 0;
+	if (examineAll) {
+	    for (k = 0; k < SVM->N; k++)
+		numChanged += examineExample(k, SVM);
+	}
+	else {
+	    for (k = 0; k < SVM->N; k++)
+		if (SVM->alph[k] > 0 && SVM->alph[k] < SVM->Cw[k])
+		    numChanged += examineExample(k, SVM);
+	}
+	if (examineAll == 1)
+	    examineAll = 0;
+	else if (numChanged == 0)
+	    examineAll = 1;
+
+	nloops += 1;
+	if (nloops == SVM->maxloops)
+	    SVM->convergence = 0;
+	if (SVM->verbose == 1)
+	    fprintf(stderr, "%6d\b\b\b\b\b\b\b", nloops);
+    }
+}
+
+
+static double learned_func_linear(int k, SupportVectorMachine * SVM)
+{
+    double s = 0.0;
+    int i;
+
+    for (i = 0; i < SVM->d; i++)
+	s += SVM->w[i] * SVM->dense_points[k][i];
+
+    s -= SVM->b;
+
+    return s;
+}
+
+static double learned_func_nonlinear(int k, SupportVectorMachine * SVM)
+{
+    double s = 0.0;
+    int i;
+
+    for (i = 0; i < SVM->end_support_i; i++)
+	if (SVM->alph[i] > 0)
+	    s += SVM->alph[i] * SVM->target[i] * SVM->kernel_func(i, k, SVM);
+
+    s -= SVM->b;
+
+    return s;
+}
+
+static double rbf_kernel(int i1, int i2, SupportVectorMachine * SVM)
+{
+    double s;
+
+    s = dot_product_func(i1, i2, SVM);
+
+    s *= -2;
+
+    s += SVM->precomputed_self_dot_product[i1] +
+	SVM->precomputed_self_dot_product[i2];
+
+    return exp(-s / SVM->two_sigma_squared);
+}
+
+
+static double dot_product_func(int i1, int i2, SupportVectorMachine * SVM)
+{
+    double dot = 0.0;
+    int i;
+
+    for (i = 0; i < SVM->d; i++)
+	dot += SVM->dense_points[i1][i] * SVM->dense_points[i2][i];
+
+    return dot;
+}
+
+static int examineExample(int i1, SupportVectorMachine * SVM)
+{
+    double y1, alph1, E1, r1;
+
+    y1 = SVM->target[i1];
+    alph1 = SVM->alph[i1];
+
+    if (alph1 > 0 && alph1 < SVM->Cw[i1])
+	E1 = SVM->error_cache[i1];
+    else
+	E1 = SVM->learned_func(i1, SVM) - y1;
+
+    r1 = y1 * E1;
+
+    if ((r1 < -SVM->tolerance && alph1 < SVM->Cw[i1]) ||
+	(r1 > SVM->tolerance && alph1 > 0)) {
+	{
+	    int k, i2;
+	    double tmax;
+
+	    for (i2 = (-1), tmax = 0, k = 0; k < SVM->end_support_i; k++)
+		if (SVM->alph[k] > 0 && SVM->alph[k] < SVM->Cw[k]) {
+		    double E2, temp;
+
+		    E2 = SVM->error_cache[k];
+
+		    temp = fabs(E1 - E2);
+
+		    if (temp > tmax) {
+			tmax = temp;
+			i2 = k;
+		    }
+		}
+
+	    if (i2 >= 0) {
+		if (takeStep(i1, i2, SVM))
+		    return 1;
+	    }
+	}
+	{
+	    int k0, k, i2;
+
+	    for (k0 = (int)(drand48() * SVM->end_support_i), k = k0;
+		 k < SVM->end_support_i + k0; k++) {
+		i2 = k % SVM->end_support_i;
+		if (SVM->alph[i2] > 0 && SVM->alph[i2] < SVM->Cw[i2]) {
+		    if (takeStep(i1, i2, SVM))
+			return 1;
+		}
+	    }
+	}
+	{
+	    int k0, k, i2;
+
+	    for (k0 = (int)(drand48() * SVM->end_support_i), k = k0;
+		 k < SVM->end_support_i + k0; k++) {
+		i2 = k % SVM->end_support_i;
+		if (takeStep(i1, i2, SVM))
+		    return 1;
+	    }
+	}
+    }
+    return 0;
+}
+
+
+static int takeStep(int i1, int i2, SupportVectorMachine * SVM)
+{
+    int y1, y2, s;
+    double alph1, alph2;
+    double a1, a2;
+    double E1, E2, L, H, k11, k12, k22, eta, Lobj, Hobj;
+
+    if (i1 == i2)
+	return 0;
+
+    alph1 = SVM->alph[i1];
+    y1 = SVM->target[i1];
+    if (alph1 > 0 && alph1 < SVM->Cw[i1])
+	E1 = SVM->error_cache[i1];
+    else
+	E1 = SVM->learned_func(i1, SVM) - y1;
+
+
+    alph2 = SVM->alph[i2];
+    y2 = SVM->target[i2];
+    if (alph2 > 0 && alph2 < SVM->Cw[i2])
+	E2 = SVM->error_cache[i2];
+    else
+	E2 = SVM->learned_func(i2, SVM) - y2;
+
+    s = y1 * y2;
+
+    if (y1 == y2) {
+	double gamma;
+
+	gamma = alph1 + alph2;
+	if (gamma - SVM->Cw[i1] > 0)
+	    L = gamma - SVM->Cw[i1];
+	else
+	    L = 0.0;
+
+	if (gamma < SVM->Cw[i2])
+	    H = gamma;
+	else
+	    H = SVM->Cw[i2];
+
+
+    }
+    else {
+	double gamma;
+
+	gamma = alph2 - alph1;
+
+	if (gamma > 0)
+	    L = gamma;
+	else
+	    L = 0.0;
+
+	if (SVM->Cw[i1] + gamma < SVM->Cw[i2])
+	    H = SVM->Cw[i1] + gamma;
+	else
+	    H = SVM->Cw[i2];
+    }
+
+    if (L == H)
+	return 0;
+
+    if (SVM->kernel_type != SVM_KERNEL_DIRECT) {
+	k11 = SVM->kernel_func(i1, i1, SVM);
+	k12 = SVM->kernel_func(i1, i2, SVM);
+	k22 = SVM->kernel_func(i2, i2, SVM);
+    }
+    else {
+	k11 = SVM->H[i1][i1];
+	k12 = SVM->H[i1][i2];
+	k22 = SVM->H[i2][i2];
+    }
+
+
+    eta = 2 * k12 - k11 - k22;
+
+    if (eta < 0) {
+	a2 = alph2 + y2 * (E2 - E1) / eta;
+	if (a2 < L)
+	    a2 = L;
+	else if (a2 > H)
+	    a2 = H;
+    }
+    else {
+	{
+	    double c1, c2;
+
+	    c1 = eta / 2;
+	    c2 = y2 * (E1 - E2) - eta * alph2;
+	    Lobj = c1 * L * L + c2 * L;
+	    Hobj = c1 * H * H + c2 * H;
+	}
+	if (Lobj > Hobj + SVM->eps)
+	    a2 = L;
+	else if (Lobj < Hobj - SVM->eps)
+	    a2 = H;
+	else
+	    a2 = alph2;
+    }
+
+    if (fabs(a2 - alph2) < SVM->eps * (a2 + alph2 + SVM->eps))
+	return 0;
+
+    a1 = alph1 - s * (a2 - alph2);
+
+    if (a1 < 0) {
+	a2 += s * a1;
+	a1 = 0;
+    }
+    else if (a1 > SVM->Cw[i1]) {
+	double t;
+
+	t = a1 - SVM->Cw[i1];
+	a2 += s * t;
+	a1 = SVM->Cw[i1];
+    }
+
+    {
+	double b1, b2, bnew;
+
+	if (a1 > 0 && a1 < SVM->Cw[i1])
+	    bnew =
+		SVM->b + E1 + y1 * (a1 - alph1) * k11 + y2 * (a2 -
+							      alph2) * k12;
+	else {
+	    if (a2 > 0 && a2 < SVM->Cw[i2])
+		bnew =
+		    SVM->b + E2 + y1 * (a1 - alph1) * k12 + y2 * (a2 -
+								  alph2) *
+		    k22;
+	    else {
+		b1 = SVM->b + E1 + y1 * (a1 - alph1) * k11 + y2 * (a2 -
+								   alph2) *
+		    k12;
+		b2 = SVM->b + E2 + y1 * (a1 - alph1) * k12 + y2 * (a2 -
+								   alph2) *
+		    k22;
+		bnew = (b1 + b2) / 2;
+	    }
+	}
+
+	SVM->delta_b = bnew - SVM->b;
+	SVM->b = bnew;
+    }
+
+    if (SVM->kernel_type == SVM_KERNEL_LINEAR ||
+	SVM->kernel_type == SVM_KERNEL_DIRECT) {
+	double t1, t2;
+	int i;
+
+	t1 = y1 * (a1 - alph1);
+	t2 = y2 * (a2 - alph2);
+
+	for (i = 0; i < SVM->d; i++)
+	    SVM->w[i] +=
+		SVM->dense_points[i1][i] * t1 + SVM->dense_points[i2][i] * t2;
+    }
+
+    {
+	double t1, t2;
+	int i;
+
+	t1 = y1 * (a1 - alph1);
+	t2 = y2 * (a2 - alph2);
+
+	if (SVM->kernel_type != SVM_KERNEL_DIRECT) {
+	    for (i = 0; i < SVM->end_support_i; i++)
+		SVM->error_cache[i] +=
+		    t1 * SVM->kernel_func(i1, i,
+					  SVM) + t2 * SVM->kernel_func(i2, i,
+								       SVM) -
+		    SVM->delta_b;
+	}
+	else {
+	    for (i = 0; i < SVM->end_support_i; i++)
+		SVM->error_cache[i] +=
+		    t1 * SVM->H[i1][i] + t2 * SVM->H[i2][i] - SVM->delta_b;
+	}
+
+    }
+
+    SVM->alph[i1] = a1;
+    SVM->alph[i2] = a2;
+
+    return 1;
+
+
+}
+
+static int distance_from_span_sv(double **M, double *m, int n, double Const,
+				 double **H, double *h, double **H,
+				 double **K, double *k, double **K,
+				 double eps, double threshold)
+{
+    int i, j, l;
+
+    double **invM = NULL;
+    double **HM = NULL, **HMH = NULL, *tnH = NULL, **HMK = NULL, **KM =
+	NULL, **KMK = NULL, *tnK = NULL, **tH = NULL, **tK = NULL;
+    double mMm;
+    double gap;
+    double *alpha = NULL, *beta = NULL;
+    double L, f;
+    double tmpalpha, tmpbeta, tmpL, tmpf;
+
+    /*alloc memory */
+    invM = (double **)G_calloc(n, sizeof(double *));
+    for (i = 0; i < n; i++)
+	invM[i] = (double *)G_calloc(n, sizeof(double));
+
+    if (mH > 0) {
+	HM = (double **)G_calloc(mH, sizeof(double *));
+	for (i = 0; i < mH; i++)
+	    HM[i] = (double *)G_calloc(n, sizeof(double));
+
+	HMH = (double **)G_calloc(mH, sizeof(double *));
+	for (i = 0; i < mH; i++)
+	    HMH[i] = (double *)G_calloc(mH, sizeof(double));
+
+	tnH = (double *)G_calloc(mH, sizeof(double));
+
+	tH = (double **)G_calloc(n, sizeof(double *));
+	for (i = 0; i < n; i++)
+	    tH[i] = (double *)G_calloc(mH, sizeof(double));
+
+	for (i = 0; i < mH; i++)
+	    for (j = 0; j < n; j++)
+		tH[j][i] = H[i][j];
+    }
+
+    if (mH > 0 && mK > 0) {
+	HMK = (double **)G_calloc(mH, sizeof(double *));
+	for (i = 0; i < mH; i++)
+	    HMK[i] = (double *)G_calloc(mK, sizeof(double));
+    }
+
+    if (mK > 0) {
+	KM = (double **)G_calloc(mK, sizeof(double *));
+	for (i = 0; i < mK; i++)
+	    KM[i] = (double *)G_calloc(n, sizeof(double));
+
+	KMK = (double **)G_calloc(mK, sizeof(double *));
+	for (i = 0; i < mK; i++)
+	    KMK[i] = (double *)G_calloc(mK, sizeof(double));
+
+	tnK = (double *)G_calloc(mK, sizeof(double));
+
+	tK = (double **)G_calloc(n, sizeof(double *));
+	for (i = 0; i < n; i++)
+	    tK[i] = (double *)G_calloc(mK, sizeof(double));
+
+	for (i = 0; i < mK; i++)
+	    for (j = 0; j < n; j++)
+		tK[j][i] = K[i][j];
+    }
+
+    /*compute inverse of M */
+    inverse_of_double_matrix(M, invM, n);
+
+    /*compute matrices products */
+    if (mH > 0) {
+	product_double_matrix_double_matrix(H, invM, mH, n, n, HM);
+	product_double_matrix_double_matrix(HM, tH, mH, n, mH, HMH);
+	product_double_matrix_double_vector(HM, m, mH, n, tnH);
+	for (i = 0; i < mH; i++)
+	    tnH[i] += 2. * h[i];
+    }
+
+    if (mH > 0 && mK > 0)
+	product_double_matrix_double_matrix(HM, tK, mH, n, mK, HMK);
+
+    if (mK > 0) {
+	product_double_matrix_double_matrix(K, invM, mK, n, n, KM);
+	product_double_matrix_double_matrix(KM, tK, mK, n, mK, KMK);
+	product_double_matrix_double_vector(KM, m, mK, n, tnK);
+	for (i = 0; i < mK; i++)
+	    tnK[i] += 2. * k[i];
+    }
+
+
+    mMm = 0.0;
+    for (i = 0; i < n; i++)
+	for (j = 0; j < n; j++)
+	    mMm += m[i] * m[j] * invM[i][j];
+    mMm *= -.5;
+
+    if (mH > 0)
+	alpha = (double *)G_calloc(mH, sizeof(double));
+    if (mK > 0)
+	beta = (double *)G_calloc(mK, sizeof(double));
+
+    gap = eps + 1;
+    /*gradient ascendent on the dual Lagrangian */
+    while (gap > eps) {
+	if (mH > 0 && mK > 0) {
+	    for (l = 0; l < mH; l++) {
+
+		tmpalpha = .0;
+		for (i = 0; i < mH; i++)
+		    if (alpha[i] > 0)
+			tmpalpha += HMH[i][l] * alpha[i];
+
+		tmpalpha += tnH[l];
+
+
+		for (i = 0; i < mK; i++)
+		    tmpalpha += HMK[l][i] * beta[i];
+
+		alpha[l] -= tmpalpha / HMH[l][l];
+
+		if (alpha[l] < .0)
+		    alpha[l] = .0;
+	    }
+
+	    for (l = 0; l < mK; l++) {
+		tmpbeta = .0;
+		for (i = 0; i < mK; i++)
+		    tmpbeta += KMK[i][l] * beta[i];
+
+		tmpbeta += tnK[l];
+
+
+		for (i = 0; i < mH; i++)
+		    if (alpha[i] > 0)
+			tmpbeta += HMK[i][l] * alpha[i];
+
+		beta[l] -= tmpbeta / KMK[l][l];
+
+	    }
+	}
+	else if (mH > 0 && mK == 0) {
+	    for (l = 0; l < mH; l++) {
+
+		tmpalpha = .0;
+		for (i = 0; i < mH; i++)
+		    if (alpha[i] > 0)
+			tmpalpha += HMH[i][l] * alpha[i];
+
+		tmpalpha += tnH[l];
+
+		alpha[l] -= tmpalpha / HMH[l][l];
+		if (alpha[l] < .0)
+		    alpha[l] = .0;
+	    }
+	}
+	else if (mH == 0 && mK > 0) {
+	    for (l = 0; l < mK; l++) {
+		tmpbeta = .0;
+		for (i = 0; i < mK; i++)
+		    tmpbeta += KMK[i][l] * beta[i];
+
+		tmpbeta += tnK[l];
+
+		beta[l] -= tmpbeta / KMK[l][l];
+
+	    }
+	}
+
+	/*value of the dual Lagrangian */
+	L = mMm;
+
+	tmpL = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		for (j = 0; j < mH; j++)
+		    if (alpha[j] > 0)
+			tmpL += alpha[i] * alpha[j] * HMH[i][j];
+	L -= .5 * tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		tmpL += alpha[i] * tnH[i];
+	L -= tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mK; i++)
+	    for (j = 0; j < mK; j++)
+		tmpL += beta[i] * beta[j] * KMK[i][j];
+	L -= .5 * tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mK; i++)
+	    tmpL += beta[i] * tnK[i];
+	L -= tmpL;
+
+	tmpL = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		for (j = 0; j < mK; j++)
+		    tmpL += alpha[i] * beta[j] * HMK[i][j];
+	L -= tmpL;
+
+	L *= .5;
+
+	/*value of the objective function */
+	f = mMm - L;
+
+	tmpf = .0;
+	for (i = 0; i < mH; i++)
+	    if (alpha[i] > 0)
+		tmpf += alpha[i] * tnH[i];
+	f -= .5 * tmpf;
+
+	tmpf = .0;
+	for (i = 0; i < mK; i++)
+	    tmpf += beta[i] * tnK[i];
+	f -= .5 * tmpf;
+
+	/* gap between dual Lagrangian and objective function (stopping criteria) */
+	gap = fabs((f - L) / (f + 1.));
+	//    printf("%f\n",gap);
+
+	f += Const;
+	if (f < threshold)
+	    break;
+
+    }
+
+
+    /*free memory */
+    for (i = 0; i < n; i++)
+	G_free(invM[i]);
+    G_free(invM);
+
+    if (mH > 0) {
+	G_free(alpha);
+	G_free(tnH);
+	for (i = 0; i < mH; i++) {
+	    G_free(HM[i]);
+	    G_free(HMH[i]);
+	}
+	G_free(HM);
+	G_free(HMH);
+	for (i = 0; i < n; i++)
+	    G_free(tH[i]);
+	G_free(tH);
+    }
+
+    if (mK > 0) {
+	G_free(beta);
+	G_free(tnK);
+	for (i = 0; i < mK; i++) {
+	    G_free(KM[i]);
+	    G_free(KMK[i]);
+	}
+	G_free(KM);
+	G_free(KMK);
+	for (i = 0; i < n; i++)
+	    G_free(tK[i]);
+	G_free(tK);
+    }
+
+    if (mK > 0 && mH > 0) {
+	for (i = 0; i < mH; i++)
+	    G_free(HMK[i]);
+	G_free(HMK);
+    }
+
+    if (f < threshold)
+	return 0;
+    else
+	return 1;
+
+}
+
+void estimate_cv_error(SupportVectorMachine * SVM)
+{
+    double **M, *m, **H, *h, **K, *k;
+    int indx1, indx2, p, n_span;
+    double threshold;
+    double Const;
+    int i, j;
+    int neg_samples;
+    double en, ep, et;
+
+    M = (double **)G_calloc(SVM->non_bound_support, sizeof(double *));
+    for (i = 0; i < SVM->non_bound_support; i++)
+	M[i] = (double *)G_calloc(SVM->non_bound_support, sizeof(double));
+    m = (double *)G_calloc(SVM->non_bound_support, sizeof(double));
+    H = (double **)G_calloc(2 * SVM->non_bound_support, sizeof(double *));
+    for (i = 0; i < 2 * SVM->non_bound_support; i++)
+	H[i] = (double *)G_calloc(SVM->non_bound_support, sizeof(double));
+    h = (double *)G_calloc(2 * SVM->non_bound_support, sizeof(double));
+    K = (double **)G_calloc(1, sizeof(double *));
+    K[0] = (double *)G_calloc(SVM->non_bound_support, sizeof(double));
+    k = (double *)G_calloc(1, sizeof(double));
+    for (i = 0; i < SVM->non_bound_support; i++)
+	K[0][i] = 1.;
+    k[0] = 1.;
+
+    et = en = ep = .0;
+    neg_samples = 0;
+    for (p = 0; p < SVM->N; p++) {
+	if (SVM->target[p] < 0)
+	    neg_samples += 1;
+	if (SVM->alph[p] > 0) {
+	    if (SVM->learned_func(p, SVM) * SVM->target[p] < 0) {
+		fprintf(stderr, "Data %d: training error\n", p);
+		et += 1.;
+		if (SVM->target[p] < 0)
+		    en += 1.;
+		else
+		    ep += 1.;
+	    }
+	    else {
+		if (SVM->alph[p] < SVM->Cw[p])
+		    n_span = SVM->non_bound_support - 1;
+		else
+		    n_span = SVM->non_bound_support;
+		indx1 = 0;
+		indx2 = 0;
+		for (i = 0; i < SVM->N; i++)
+		    if (i != p && SVM->alph[i] > 0 &&
+			SVM->alph[i] < SVM->Cw[i]) {
+			for (j = i; j < SVM->N; j++)
+			    if (j != p && SVM->alph[j] > 0 &&
+				SVM->alph[j] < SVM->Cw[j]) {
+				M[indx1][indx2] = M[indx2][indx1] =
+				    SVM->kernel_func(i, j, SVM);
+				indx2++;
+			    }
+			indx1++;
+			indx2 = indx1;
+		    }
+
+		if (n_span > SVM->d)
+		    for (i = 0; i < n_span; i++)
+			M[i][i] += drand48() * M[i][i] / 100.;
+
+		indx1 = 0;
+		for (i = 0; i < SVM->N; i++)
+		    if (i != p && SVM->alph[i] > 0 &&
+			SVM->alph[i] < SVM->Cw[i]) {
+			m[indx1] = -2. * SVM->kernel_func(i, p, SVM);
+			indx1++;
+		    }
+
+		indx1 = 0;
+		for (i = 0; i < 2 * n_span; i++)
+		    for (j = 0; j < n_span; j++)
+			H[i][j] = .0;
+		for (i = 0; i < SVM->N; i++)
+		    if (i != p && SVM->alph[i] > 0 &&
+			SVM->alph[i] < SVM->Cw[i]) {
+			H[indx1][indx1] = 1.;
+			H[indx1 + n_span][indx1] = -1.;
+			if (SVM->target[i] == SVM->target[p]) {
+			    h[indx1] =
+				(SVM->Cw[i] - SVM->alph[i]) / SVM->alph[p];
+			    h[indx1 + n_span] = SVM->alph[i] / SVM->alph[p];
+			    indx1++;
+			}
+			else {
+			    h[indx1] = SVM->alph[i] / SVM->alph[p];
+			    h[indx1 + n_span] =
+				(SVM->Cw[i] - SVM->alph[i]) / SVM->alph[p];
+			    indx1++;
+			}
+		    }
+
+		threshold =
+		    SVM->learned_func(p, SVM) * SVM->target[p] / SVM->alph[p];
+		Const = SVM->kernel_func(p, p, SVM);
+		if (distance_from_span_sv
+		    (M, m, n_span, Const, H, h, 2 * n_span, K, k, 1, SVM->eps,
+		     threshold) == 1) {
+		    fprintf(stderr, "Data %d: cv error\n", p);
+		    et += 1.;
+		    if (SVM->target[p] < 0)
+			en += 1.;
+		    else
+			ep += 1.;
+		}
+		else
+		    fprintf(stderr, "Data %d: correctly classified\n", p);
+	    }
+	}
+    }
+    et /= SVM->N;
+    en /= neg_samples;
+    ep /= (SVM->N - neg_samples);
+
+    fprintf(stdout, "Accuracy: %f\n", 1 - et);
+    fprintf(stdout, "Perrors\tclass +1: %f\tclass -1: %f\n", ep, en);
+
+}
+
+void write_svm(char *file, SupportVectorMachine * svm, Features * features)
+
+     /*
+        write svm structure to a file 
+      */
+{
+    FILE *fpout;
+    int i, j;
+    char tempbuf[500];
+    int np_weights = 0;
+
+    for (i = 0; i < svm->N; i++) {
+	if (svm->alph[i] > 0.0) {
+	    np_weights += 1;
+	}
+    }
+
+    if ((fpout = fopen(file, "w")) == NULL) {
+	sprintf(tempbuf, "write_svm-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    write_header_features(fpout, features);
+    fprintf(fpout, "#####################\n");
+    fprintf(fpout, "MODEL:\n");
+    fprintf(fpout, "#####################\n");
+
+    fprintf(fpout, "Model:\n");
+    fprintf(fpout, "SupportVectorMachine\n");
+    fprintf(fpout, "Convergence:\n");
+    fprintf(fpout, "%d\n", svm->convergence);
+    fprintf(fpout, "Kernel Type:\n");
+    if (svm->kernel_type == SVM_KERNEL_LINEAR) {
+	fprintf(fpout, "linear_kernel\n");
+    }
+    if (svm->kernel_type == SVM_KERNEL_GAUSSIAN) {
+	fprintf(fpout, "gaussian_kernel\n");
+    }
+    if (svm->kernel_type == SVM_KERNEL_DIRECT) {
+	fprintf(fpout, "2pbk_kernel\n");
+    }
+    fprintf(fpout, "Kernel parameter:\n");
+    fprintf(fpout, "%f\n", svm->two_sigma_squared);
+
+    fprintf(fpout, "Optimization parameter:\n");
+    fprintf(fpout, "%f\n", svm->C);
+
+    fprintf(fpout, "Cost parameter:\n");
+    fprintf(fpout, "%f\n", svm->cost);
+
+    fprintf(fpout, "Convergence parameters:\n");
+    fprintf(fpout, "tol\teps\tmaxloops\n");
+    fprintf(fpout, "%e\t%e\t%d\n", svm->tolerance, svm->eps, svm->maxloops);
+
+    fprintf(fpout, "Number of kernel:\n");
+    fprintf(fpout, "%d\n", np_weights);
+    fprintf(fpout, "Dimension:\n");
+    fprintf(fpout, "%d\n", svm->d);
+
+    fprintf(fpout, "Offset:\n");
+    fprintf(fpout, "%f\n", svm->b);
+
+    if (svm->kernel_type == SVM_KERNEL_GAUSSIAN) {
+	fprintf(fpout, "Kernel - Label - Weight:\n");
+	for (i = 0; i < svm->N; i++) {
+	    if (svm->alph[i] > 0.0) {
+		for (j = 0; j < svm->d; j++) {
+		    fprintf(fpout, "%f\t", svm->dense_points[i][j]);
+		}
+		fprintf(fpout, "%d\t%e\n", svm->target[i], svm->alph[i]);
+	    }
+	}
+    }
+    if (svm->kernel_type == SVM_KERNEL_LINEAR ||
+	svm->kernel_type == SVM_KERNEL_DIRECT) {
+	fprintf(fpout, "Weight:\n");
+	fprintf(fpout, "%f", svm->w[0]);
+	for (i = 1; i < svm->d; i++) {
+	    fprintf(fpout, "\t%f", svm->w[i]);
+	}
+	fprintf(fpout, "\n");
+    }
+
+    if (svm->kernel_type == SVM_KERNEL_DIRECT) {
+	fprintf(fpout, "Support Vector:\n");
+	for (i = 0; i < svm->N; i++)
+	    fprintf(fpout, "%f\n", svm->alph[i]);
+    }
+
+    if (features->f_pca[0]) {
+	fprintf(fpout, "#####################\n");
+	fprintf(fpout, "PRINC. COMP.:\n");
+	fprintf(fpout, "#####################\n");
+
+	fprintf(fpout, "Number of pc:\n");
+	fprintf(fpout, "%d\n", features->npc);
+
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    fprintf(fpout, "PCA: Layer %d\n", i + 1);
+	    write_pca(fpout, &(features->pca[i]));
+	}
+    }
+    fclose(fpout);
+}
+
+void test_svm(SupportVectorMachine * svm, Features * features, char *file)
+
+     /*
+        test svm model on a set of data (features) and write the results
+        into a file. To standard output accuracy and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    double pred;
+    double *error;
+    double accuracy;
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_svm-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if ((pred =
+		     predict_svm(svm,
+				 features->value[i])) * features->class[i] <=
+		    0.0) {
+		    error[j] += 1.0;
+		    accuracy += 1.0;
+		}
+		fprintf(fp, "%d\t%f\n", features->class[i], pred);
+		break;
+	    }
+	}
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    fclose(fp);
+
+    fprintf(stdout, "Accuracy: %f\n", accuracy);
+    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", features->p_classes[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", data_in_each_class[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", (int)error[j]);
+    }
+    fprintf(stdout, "\n");
+
+    for (j = 0; j < features->nclasses; j++) {
+	error[j] /= data_in_each_class[j];
+    }
+
+    fprintf(stdout, "Perrors\t%f", error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%f", error[j]);
+    }
+    fprintf(stdout, "\n");
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+double predict_svm(SupportVectorMachine * svm, double *x)
+
+     /* 
+        given a svm model, return the predicted margin of a test point x
+      */
+{
+    int i, j;
+    double y = 0.0;
+    double K;
+
+    if (svm->kernel_type == SVM_KERNEL_GAUSSIAN) {
+	for (i = 0; i < svm->N; i++) {
+	    if (svm->alph[i] > 0) {
+		K = 0.0;
+		for (j = 0; j < svm->d; j++)
+		    K += (svm->dense_points[i][j] -
+			  x[j]) * (svm->dense_points[i][j] - x[j]);
+		y += svm->alph[i] * svm->target[i] * exp(-K /
+							 svm->
+							 two_sigma_squared);
+	    }
+	}
+	y -= svm->b;
+    }
+
+    if (svm->kernel_type == SVM_KERNEL_LINEAR) {
+	K = 0.0;
+	for (j = 0; j < svm->d; j++)
+	    K += svm->w[j] * x[j];
+	y = K - svm->b;
+    }
+
+    if (svm->kernel_type == SVM_KERNEL_DIRECT) {
+	double *models;
+	double x1, x2;
+	int t;
+
+	models = (double *)G_calloc(svm->d, sizeof(double));
+
+
+	for (t = 0; t < svm->d; t++) {
+	    models[t] = 0.0;
+	    x1 = dot_product(x, svm->models[t].x1, svm->orig_d);
+	    x2 = dot_product(x, svm->models[t].x2, svm->orig_d);
+	    models[t] = svm->models[t].w_coeff *
+		(svm->models[t].y1 * x1 + svm->models[t].y2 * x2) +
+		svm->models[t].b;
+	    if (models[t] > 1)
+		models[t] = 1.0;
+	    else if (models[t] < -1)
+		models[t] = -1.0;
+	}
+
+	y = 0.0;
+	for (i = 0; i < svm->N; i++)
+	    if (svm->alph[i] > 0)
+		for (t = 0; t < svm->d; t++)
+		    y += svm->alph[i] * svm->target[i] *
+			svm->dense_points[i][t] * models[t];
+
+	y -= svm->b;
+	G_free(models);
+    }
+
+    return y;
+}
+
+
+
+void compute_svm_bagging(BSupportVectorMachine * bsvm, int bagging,
+			 int nsamples, int nvar, double **data,
+			 int *data_class, int svm_kernel, double kp, double C,
+			 double tol, double eps, int maxloops,
+			 int svm_verbose, double *svm_W)
+{
+    int i, b;
+    int *bsamples;
+    double **xdata_training;
+    int *xclasses_training;
+    double *prob;
+    int nk;
+    int *extracted;
+    int index;
+
+
+    bsvm->svm = (SupportVectorMachine *) G_calloc(bagging,
+						  sizeof
+						  (SupportVectorMachine));
+    bsvm->nsvm = bagging;
+    bsvm->weights = (double *)G_calloc(bsvm->nsvm, sizeof(double));
+
+    for (b = 0; b < bsvm->nsvm; b++) {
+	bsvm->weights[b] = 1.0 / bsvm->nsvm;
+    }
+
+
+    extracted = (int *)G_calloc(nsamples, sizeof(int));
+    prob = (double *)G_calloc(nsamples, sizeof(double));
+    bsamples = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+
+    for (i = 0; i < nsamples; i++) {
+	prob[i] = 1.0 / nsamples;
+    }
+
+    for (b = 0; b < bsvm->nsvm; b++) {
+	for (i = 0; i < nsamples; i++) {
+	    extracted[i] = 0;
+	}
+	Bootsamples(nsamples, prob, bsamples);
+	for (i = 0; i < nsamples; i++) {
+	    extracted[bsamples[i]] = 1;
+	}
+	nk = 0;
+	for (i = 0; i < nsamples; i++) {
+	    if (extracted[i]) {
+		nk += 1;
+	    }
+	}
+
+	index = 0;
+	for (i = 0; i < nsamples; i++) {
+	    if (extracted[i]) {
+		xdata_training[index] = data[i];
+		xclasses_training[index++] = data_class[i];
+	    }
+	}
+
+	compute_svm(&(bsvm->svm[b]), nk, nvar, xdata_training,
+		    xclasses_training, svm_kernel, kp, C, tol,
+		    eps, maxloops, svm_verbose, svm_W);
+
+    }
+
+    G_free(bsamples);
+    G_free(xclasses_training);
+    G_free(prob);
+    G_free(extracted);
+    G_free(xdata_training);
+}
+
+
+void write_bagging_boosting_svm(char *file, BSupportVectorMachine * bsvm,
+				Features * features)
+
+     /*
+        write a bagging or boosting svm to a file
+      */
+{
+    int i, j;
+    FILE *fp;
+    char tempbuf[500];
+    int b;
+    int np_weights;
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf,
+		"write_bagging_boosting_svm-> Can't open file %s for writing",
+		file);
+	G_fatal_error(tempbuf);
+    }
+
+    write_header_features(fp, features);
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "MODEL:\n");
+    fprintf(fp, "#####################\n");
+
+    fprintf(fp, "Model:\n");
+    fprintf(fp, "B-SupportVectorMachine\n");
+    fprintf(fp, "Cost parameter:\n");
+    fprintf(fp, "%f\n", bsvm->w);
+    fprintf(fp, "Number of models:\n");
+    fprintf(fp, "%d\n", bsvm->nsvm);
+    fprintf(fp, "Weights:\n");
+    fprintf(fp, "%f", bsvm->weights[0]);
+    for (b = 1; b < bsvm->nsvm; b++) {
+	fprintf(fp, "\t%f", bsvm->weights[b]);
+    }
+    fprintf(fp, "\n");
+    for (b = 0; b < bsvm->nsvm; b++) {
+
+	np_weights = 0;
+	for (i = 0; i < bsvm->svm[b].N; i++) {
+	    if (bsvm->svm[b].alph[i] > 0.0) {
+		np_weights += 1;
+	    }
+	}
+	fprintf(fp, "Convergence:\n");
+	fprintf(fp, "%d\n", bsvm->svm[b].convergence);
+
+	fprintf(fp, "Kernel Type:\n");
+	if (bsvm->svm[b].kernel_type == SVM_KERNEL_GAUSSIAN) {
+	    fprintf(fp, "gaussian_kernel\n");
+	}
+	if (bsvm->svm[b].kernel_type == SVM_KERNEL_LINEAR) {
+	    fprintf(fp, "linear_kernel\n");
+	}
+	fprintf(fp, "Kernel parameter:\n");
+	fprintf(fp, "%f\n", bsvm->svm[b].two_sigma_squared);
+
+	fprintf(fp, "Optimization parameter:\n");
+	fprintf(fp, "%f\n", bsvm->svm[b].C);
+
+	fprintf(fp, "Cost parameter:\n");
+	fprintf(fp, "%f\n", bsvm->svm[b].cost);
+
+	fprintf(fp, "Convergence parameters:\n");
+	fprintf(fp, "tol\teps\tmaxloops\n");
+	fprintf(fp, "%e\t%e\t%d\n", bsvm->svm[b].tolerance,
+		bsvm->svm[b].eps, bsvm->svm[b].maxloops);
+
+	fprintf(fp, "Number of kernel:\n");
+	fprintf(fp, "%d\n", np_weights);
+	fprintf(fp, "Dimension:\n");
+	fprintf(fp, "%d\n", bsvm->svm[b].d);
+
+	fprintf(fp, "Offset:\n");
+	fprintf(fp, "%f\n", bsvm->svm[b].b);
+
+
+
+	if (bsvm->svm[b].kernel_type == SVM_KERNEL_GAUSSIAN) {
+	    fprintf(fp, "Kernel - Label - Weight:\n");
+	    for (i = 0; i < bsvm->svm[b].N; i++) {
+		if (bsvm->svm[b].alph[i] > 0.0) {
+		    for (j = 0; j < bsvm->svm[b].d; j++) {
+			fprintf(fp, "%f\t", bsvm->svm[b].dense_points[i][j]);
+		    }
+		    fprintf(fp, "%d\t%f\n", bsvm->svm[b].target[i],
+			    bsvm->svm[b].alph[i]);
+		}
+	    }
+	}
+	if (bsvm->svm[b].kernel_type == SVM_KERNEL_LINEAR) {
+	    fprintf(fp, "Weight:\n");
+	    fprintf(fp, "%f", bsvm->svm[b].w[0]);
+	    for (i = 1; i < bsvm->svm[b].d; i++) {
+		fprintf(fp, "\t%f", bsvm->svm[b].w[i]);
+	    }
+	    fprintf(fp, "\n");
+	}
+
+    }
+
+    if (features->f_pca[0]) {
+	fprintf(fp, "#####################\n");
+	fprintf(fp, "PRINC. COMP.:\n");
+	fprintf(fp, "#####################\n");
+
+	fprintf(fp, "Number of pc:\n");
+	fprintf(fp, "%d\n", features->npc);
+
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    fprintf(fp, "PCA: Layer %d\n", i + 1);
+	    write_pca(fp, &(features->pca[i]));
+	}
+    }
+
+    fclose(fp);
+}
+
+
+void compute_svm_boosting(BSupportVectorMachine * bsvm, int boosting,
+			  double w, int nsamples, int nvar, double **data,
+			  int *data_class, int *classes, int *classes,
+			  int svm_kernel, double kp, double C, double tol,
+			  double svm_eps, int maxloops, int svm_verbose,
+			  double *svm_W, int weights_boosting)
+{
+    int i, b;
+    int *bsamples;
+    double **xdata_training;
+    int *xclasses_training;
+    double *prob;
+    double e00, e01, e10, e11, prior0, prior1;
+    int *error;
+    double eps, totprob;
+    double totbeta;
+    int nk;
+    int *extracted;
+    int index;
+
+    if (weights_boosting == 1) {
+	bsvm->w_evolution = (double **)G_calloc(nsamples, sizeof(double *));
+	for (i = 0; i < nsamples; i++)
+	    bsvm->w_evolution[i] =
+		(double *)G_calloc(boosting + 3, sizeof(double));
+    }
+
+    bsvm->svm = (SupportVectorMachine *) G_calloc(boosting,
+						  sizeof
+						  (SupportVectorMachine));
+    bsvm->nsvm = boosting;
+    bsvm->weights = (double *)G_calloc(bsvm->nsvm, sizeof(double));
+    bsvm->w = w;
+
+    extracted = (int *)G_calloc(nsamples, sizeof(int));
+    prob = (double *)G_calloc(nsamples, sizeof(double));
+    bsamples = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+    error = (int *)G_calloc(nsamples, sizeof(int));
+
+    for (i = 0; i < nsamples; i++) {
+	prob[i] = 1.0 / nsamples;
+    }
+
+    for (b = 0; b < bsvm->nsvm; b++) {
+	if (weights_boosting == 1)
+	    for (i = 0; i < nsamples; i++)
+		bsvm->w_evolution[i][b] = prob[i];
+
+
+	for (i = 0; i < nsamples; i++) {
+	    extracted[i] = 0;
+	}
+	Bootsamples(nsamples, prob, bsamples);
+	for (i = 0; i < nsamples; i++) {
+	    extracted[bsamples[i]] = 1;
+	}
+	nk = 0;
+	for (i = 0; i < nsamples; i++) {
+	    if (extracted[i]) {
+		nk += 1;
+	    }
+	}
+
+	index = 0;
+	for (i = 0; i < nsamples; i++) {
+	    if (extracted[i]) {
+		xdata_training[index] = data[i];
+		xclasses_training[index++] = data_class[i];
+	    }
+	}
+
+	compute_svm(&(bsvm->svm[b]), nk, nvar, xdata_training,
+		    xclasses_training, svm_kernel, kp, C, tol,
+		    svm_eps, maxloops, svm_verbose, svm_W);
+
+	e00 = e01 = e10 = e11 = prior0 = prior1 = 0.0;
+	for (i = 0; i < nsamples; i++) {
+	    if (data_class[i] == classes[0]) {
+		if (predict_svm(&(bsvm->svm[b]), data[i]) * data_class[i] <=
+		    0.0) {
+		    error[i] = TRUE;
+		    e01 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e00 += prob[i];
+		}
+		prior0 += prob[i];
+	    }
+	    else {
+		if (predict_svm(&(bsvm->svm[b]), data[i]) * data_class[i] <=
+		    0.0) {
+		    error[i] = TRUE;
+		    e10 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e11 += prob[i];
+		}
+		prior1 += prob[i];
+	    }
+	}
+	eps = (1.0 - e00 / (e00 + e01)) * prior0 * bsvm->w +
+	    (1.0 - e11 / (e10 + e11)) * prior1 * (2.0 - bsvm->w);
+	if (eps > 0.0 && eps < 0.5) {
+	    bsvm->weights[b] = 0.5 * log((1.0 - eps) / eps);
+	    totprob = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		if (error[i]) {
+		    if (data_class[i] == classes[0]) {
+			prob[i] = prob[i] * exp(bsvm->weights[b] * bsvm->w);
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(bsvm->weights[b] * (2.0 - bsvm->w));
+		    }
+		}
+		else {
+		    if (data_class[i] == classes[0]) {
+			prob[i] =
+			    prob[i] * exp(-bsvm->weights[b] *
+					  (2.0 - bsvm->w));
+		    }
+		    else {
+			prob[i] = prob[i] * exp(-bsvm->weights[b] * bsvm->w);
+		    }
+		}
+		totprob += prob[i];
+	    }
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] /= totprob;
+	    }
+	}
+	else {
+	    bsvm->weights[b] = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] = 1.0 / nsamples;
+	    }
+	}
+
+    }
+
+    totbeta = 0.0;
+    for (b = 0; b < bsvm->nsvm; b++) {
+	totbeta += bsvm->weights[b];
+    }
+    for (b = 0; b < bsvm->nsvm; b++) {
+	bsvm->weights[b] /= totbeta;
+    }
+
+
+    G_free(bsamples);
+    G_free(xclasses_training);
+    G_free(prob);
+    G_free(extracted);
+    G_free(xdata_training);
+    G_free(error);
+
+}
+
+double predict_bsvm(BSupportVectorMachine * bsvm, double *x)
+
+     /* 
+        given a bsvm model, return the predicted margin of a test point x
+      */
+{
+    int b;
+    int predict;
+    double out;
+    double pred;
+
+    out = 0.0;
+    for (b = 0; b < bsvm->nsvm; b++) {
+	pred = predict_svm(&(bsvm->svm[b]), x);
+	if (pred < 0.0) {
+	    predict = -1;
+	}
+	else if (pred > 0.0) {
+	    predict = 1;
+	}
+	else {
+	    predict = 0;
+	}
+	out += predict * bsvm->weights[b];
+    }
+    return out;
+}
+
+void test_bsvm(BSupportVectorMachine * bsvm, Features * features, char *file)
+
+     /*
+        test bagging or boosting svm model on a set of data (features) 
+        and write the results into a file. To standard output accuracy 
+        and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    double pred;
+    double *error;
+    double accuracy;
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_bsvm-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if ((pred =
+		     predict_bsvm(bsvm,
+				  features->value[i])) * features->class[i] <=
+		    0.0) {
+		    error[j] += 1.0;
+		    accuracy += 1.0;
+		}
+		fprintf(fp, "%d\t%f\n", features->class[i], pred);
+		break;
+	    }
+	}
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    fclose(fp);
+
+    fprintf(stdout, "Accuracy: %f\n", accuracy);
+    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", features->p_classes[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", data_in_each_class[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", (int)error[j]);
+    }
+    fprintf(stdout, "\n");
+
+    for (j = 0; j < features->nclasses; j++) {
+	error[j] /= data_in_each_class[j];
+    }
+
+    fprintf(stdout, "Perrors\t%f", error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%f", error[j]);
+    }
+    fprintf(stdout, "\n");
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+
+void test_bsvm_progressive(BSupportVectorMachine * bsvm, Features * features,
+			   char *file)
+
+     /*
+        test bagging or boosting svm model on a set of data (features) 
+        and write the results into a file. To standard output accuracy 
+        and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    double pred;
+    double *error;
+    double accuracy;
+    int b;
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_bsvm-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    for (b = 1; b <= bsvm->nsvm; b++) {
+	if ((b < 100 && b % 2 == 1) || (b >= 100) || (b == bsvm->nsvm)) {
+	    accuracy = 0.0;
+	    for (j = 0; j < features->nclasses; j++) {
+		error[j] = .0;
+		data_in_each_class[j] = 0;
+	    }
+	    for (i = 0; i < features->nexamples; i++) {
+		for (j = 0; j < features->nclasses; j++) {
+		    if (features->class[i] == features->p_classes[j]) {
+			data_in_each_class[j] += 1;
+			if ((pred =
+			     predict_bsvm_progressive(bsvm,
+						      features->value[i], b))
+			    * features->class[i] <= 0.0) {
+			    error[j] += 1.0;
+			    accuracy += 1.0;
+			}
+			if (b == bsvm->nsvm)
+			    fprintf(fp, "%d\t%f\n", features->class[i], pred);
+			break;
+		    }
+		}
+	    }
+
+	    accuracy /= features->nexamples;
+	    accuracy = 1.0 - accuracy;
+
+	    if (b == bsvm->nsvm)
+		fclose(fp);
+
+	    fprintf(stdout, "nmodels = %d\n", b);
+	    fprintf(stdout, "Accuracy: %f\n", accuracy);
+	    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%d", features->p_classes[j]);
+	    }
+	    fprintf(stdout, "\n");
+	    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%d", data_in_each_class[j]);
+	    }
+	    fprintf(stdout, "\n");
+	    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%d", (int)error[j]);
+	    }
+	    fprintf(stdout, "\n");
+
+	    for (j = 0; j < features->nclasses; j++) {
+		error[j] /= data_in_each_class[j];
+	    }
+
+	    fprintf(stdout, "Perrors\t%f", error[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%f", error[j]);
+	    }
+	    fprintf(stdout, "\n");
+	}
+    }
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+double predict_bsvm_progressive(BSupportVectorMachine * bsvm, double *x,
+				int bmax)
+
+     /* 
+        given a bsvm model, return the predicted margin of a test point x
+      */
+{
+    int b;
+    int predict;
+    double out;
+    double pred;
+
+    out = 0.0;
+    for (b = 0; b < bmax; b++) {
+	pred = predict_svm(&(bsvm->svm[b]), x);
+	if (pred < 0.0) {
+	    predict = -1;
+	}
+	else if (pred > 0.0) {
+	    predict = 1;
+	}
+	else {
+	    predict = 0;
+	}
+	out += predict * bsvm->weights[b];
+    }
+    return out;
+}
+
+double dot_product(double *x, double *y, int n)
+{
+    double out = .0;
+
+    n--;
+    while (n >= 0)
+	out += x[n] * y[n--];
+
+    return out;
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/test.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/test.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/test.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,272 @@
+/*
+   The following routines are borrowed from "Numerical Recipes in C"
+   and rearranged by Stefano Merler
+
+   for 
+
+   statistical test computation
+
+   Supported functions for:
+   - KS test the normal distribution of data
+   - KS test for equality of 2 distribution
+   - t-test for mean
+ */
+
+#include <math.h>
+#include <grass/gis.h>
+#include "global.h"
+
+#define EPS1 0.001
+#define EPS2 1.0e-8
+#define MAX(a,b) (maxarg1=(a),maxarg2=(b),(maxarg1)>(maxarg2)?(maxarg1):(maxarg2))
+
+double maxarg1, maxarg2;
+
+double probks();
+double probks2();
+double betai();
+double gammln();
+double betacf();
+
+void ksone_normal(double *data, int n, double p1, double p2, double *d,
+		  double *prob)
+
+     /*
+        KS test for normal distribution. data is the array of data 
+        of length n. p1 and p2 mean and sd of the normal distribution
+        tested. On output d is the value of the test and prob
+        the p-value
+      */
+{
+    int j;
+    double fo = 0.0, fn, ff, en, dt;
+
+    shell(n, data);
+    en = (double)n;
+
+    *d = 0.0;
+
+    for (j = 1; j <= n; j++) {
+	fn = j / en;
+	ff = cumulative_normal_distribution(p1, p2, data[j - 1]);
+	dt = MAX(fabs(fo - ff), fabs(fn - ff));
+	if (dt > *d)
+	    *d = dt;
+	fo = fn;
+    }
+    *prob = probks2(*d, n);
+
+}
+
+void kstwo(double *data1, int n1, double *data2, int n2, double *d,
+	   double *prob)
+
+     /*
+        KS test for testing 2 distribution. data1 is the first 
+        array of data of length n1. data2 is the second array
+        of data of length n2. On output d is the value of the
+        test and prob the p-value
+      */
+{
+    int j1 = 1, j2 = 1;
+    double en1, en2, fn1 = 0.0, fn2 = 0.0, dt, d1, d2;
+
+    en1 = n1;
+    en2 = n2;
+    *d = 0.0;
+    shell(n1, data1);
+    shell(n2, data2);
+    while (j1 <= n1 && j2 <= n2) {
+	if ((d1 = data1[j1 - 1]) <= (d2 = data2[j2 - 1])) {
+	    fn1 = (j1++) / en1;
+	}
+	if (d2 <= d1) {
+	    fn2 = (j2++) / en2;
+	}
+	if ((dt = fabs(fn2 - fn1)) > *d)
+	    *d = dt;
+    }
+    *prob = probks(sqrt(en1 * en2 / (en1 + en2)) * (*d));
+}
+
+double probks(double alam)
+
+     /*
+        evaluate Q(KS) function i.e significance
+      */
+{
+    int j;
+    double a2, fac = 2.0, sum = 0.0, term, termbf = 0.0;
+
+    a2 = -2.0 * alam * alam;
+    for (j = 1; j <= 100; j++) {
+	term = fac * exp(a2 * j * j);
+	sum += term;
+	if (fabs(term) <= EPS1 * termbf || fabs(term) <= EPS2 * sum)
+	    return sum;
+	fac = -fac;
+	termbf = fabs(term);
+    }
+    return 1.0;
+}
+
+double probks2(double alam, int ndata)
+
+     /*
+        evaluate Q(KS) function (Dallal-Wilkinson approx.) 
+        i.e significance depending from the number of
+        data ndata
+      */
+{
+    double ks, p, n;
+
+    n = (double)ndata;
+    ks = alam;
+
+    if (ndata > 100) {
+	ks = ks * pow(n / 100., 0.49);
+	n = 100.;
+    }
+    p = exp(-7.01256 * ks * ks * (n + 2.78019) +
+	    2.99587 * ks * sqrt(n + 2.78019) - 0.122119 + 0.974598 / sqrt(n) +
+	    1.67997 / n);
+    return (p);
+}
+
+double normal_distribution(x, mu, sigma)
+     /*
+        normal distribution with mean mu and
+        standard deviation sigma computed at point x
+
+        1/(sigma*sqrt(PIG) exp(-(x-mu)^2/2sigma^2)
+      */
+     double x, mu, sigma;
+{
+    return exp(-1 * (x - mu) * (x - mu) / (2.0 * sigma * sigma)) / (sigma *
+								    sqrt(2 *
+									 PIG));
+
+
+}
+
+double cumulative_normal_distribution(double mu, double sigma, double x)
+
+     /*
+        cumulative probability of the normal
+        distribution with mean mu and
+        standard deviation sigma, i.e.
+        integral from -Inf to x
+
+      */
+{
+    return trapzd2(normal_distribution, mu, sigma, -10., x, 1000);
+}
+
+#undef EPS1
+#undef EPS2
+
+double gammln(double xx)
+{
+    double x, tmp, ser;
+
+    static double cof[6] = { 76.18009173, -86.50532033, 24.01409822,
+	-1.231739516, 0.120858003e-2, -0.536382e-5
+    };
+    int j;
+
+    x = xx - 1.0;
+    tmp = x + 5.5;
+    tmp -= (x + 0.5) * log(tmp);
+    ser = 1.0;
+    for (j = 0; j <= 5; j++) {
+	x += 1.0;
+	ser += cof[j] / x;
+    }
+    return -tmp + log(2.50662827465 * ser);
+}
+
+#define ITMAX 1000000
+#define EPS 3.0e-7
+
+double betacf(double a, double b, double x)
+{
+    double qap, qam, qab, em, tem, d;
+    double bz, bm = 1.0, bp, bpp;
+    double az = 1.0, am = 1.0, ap, app, aold;
+    int m;
+
+    qab = a + b;
+    qap = a + 1.0;
+    qam = a - 1.0;
+
+    bz = 1.0 - qab * x / qap;
+
+    for (m = 1; m <= ITMAX; m++) {
+	em = (double)m;
+	tem = em + em;
+	d = em * (b - em) * x / ((qam + tem) * (a + tem));
+	ap = az + d * am;
+	bp = bz + d * bm;
+	d = -(a + em) * (qab + em) * x / ((qap + tem) * (a + tem));
+	app = ap + d * az;
+	bpp = bp + d * bz;
+	aold = az;
+	am = ap / bpp;
+	bm = bp / bpp;
+	az = app / bpp;
+	bz = 1.0;
+	if (fabs(az - aold) < (EPS * fabs(az)))
+	    return az;
+    }
+    G_warning("a or b tto big, or ITMAX too small in BETACF\n");
+}
+
+#undef ITMAX
+#undef EPS
+
+double betai(double a, double b, double x)
+{
+    double bt;
+
+    if (x < 0.0 || x > 1.0) {
+	G_warning("bad x in BETAI\n");
+    }
+    if (x == 0.0 || x == 1.0) {
+	bt = 0.0;
+    }
+    else {
+	bt = exp(gammln(a + b) - gammln(a) - gammln(b) + a * log(x) +
+		 b * log(1.0 - x));
+    }
+    if (x < (a + 1.0) / (a + b + 2.0)) {
+	return bt * betacf(a, b, x) / a;
+    }
+    else {
+	return 1.0 - bt * betacf(b, a, 1.0 - x) / b;
+    }
+}
+
+double sqrarg;
+
+#define SQR(a) (sqrarg=(a),sqrarg*sqrarg)
+
+void tutest(double *data1, int n1, double *data2, int n2, double *t,
+	    double *prob)
+
+     /*
+      */
+{
+    double var1, var2, df, ave1, ave2;
+
+
+    ave1 = mean_of_double_array(data1, n1);
+    var1 = var_of_double_array_given_mean(data1, n1, ave1);
+
+    ave2 = mean_of_double_array(data2, n2);
+    var2 = var_of_double_array_given_mean(data2, n2, ave2);
+
+    *t = (ave1 - ave2) / sqrt(var1 / n1 + var2 / n2);
+    df = SQR(var1 / n1 + var2 / n2) / (SQR(var1 / n1) / (n1 - 1) +
+				       SQR(var2 / n2) / (n2 - 1));
+    *prob = betai(0.5 * df, 0.5, df / (df + SQR(*t)));
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/training.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/training.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/training.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,256 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   structure Training management
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include "global.h"
+
+void inizialize_training(Training * training)
+
+     /* 
+        alloc memory for training (see global.h for details)
+      */
+{
+    int i;
+
+    training->nexamples = 0;
+    training->east =
+	(double *)G_calloc(TRAINING_MAX_EXAMPLES, sizeof(double));
+    training->north =
+	(double *)G_calloc(TRAINING_MAX_EXAMPLES, sizeof(double));
+    training->class = (int *)G_calloc(TRAINING_MAX_EXAMPLES, sizeof(int));
+    training->mapnames =
+	(char ***)G_calloc(TRAINING_MAX_EXAMPLES, sizeof(char **));
+    for (i = 0; i < TRAINING_MAX_EXAMPLES; i++) {
+	training->mapnames[i] =
+	    (char **)G_calloc(TRAINING_MAX_LAYERS, sizeof(char *));
+    }
+    training->data =
+	(double **)G_calloc(TRAINING_MAX_EXAMPLES, sizeof(double *));
+}
+
+void read_training(file, training)
+     void read_training(char *file, Training * training)
+
+     /*
+        read training structure from a file. Supported formats
+        GRASS_data:list of labelled raster maps
+        TABLE_data:list of labelled vecors
+      */
+{
+    FILE *fp;
+    char tempbuf[500];
+    char *line = NULL;
+    int i, j;
+    int index;
+    int tmprow, tmpcol;
+    double tmpew, tmpns;
+    int nlayers;
+    int training_type = 0;
+    int tmpc;
+
+    fp = fopen(file, "r");
+    if (fp == NULL) {
+	sprintf(tempbuf, "read_training-> Can't open file %s for reading",
+		file);
+	G_fatal_error(tempbuf);
+    }
+    if (G_getl2(tempbuf, sizeof(tempbuf) - 1, fp) == 0) {
+	G_fatal_error("read_training-> File %s is empty", file);
+	fclose(fp);
+    }
+
+    training->file = file;
+
+    line = GetLine(fp);
+
+    /*line=GetLine(fp); */
+
+
+    if (strcmp(line, "GrassTraining") == 0) {
+	training_type = GRASS_data;
+    }
+    if (strcmp(line, "TableTraining") == 0) {
+	training_type = TABLE_data;
+    }
+
+    switch (training_type) {
+    case GRASS_data:
+	training->data_type = training_type;
+	if (training->nexamples == 0) {
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    sscanf(line, "%d", &(training->nlayers));
+	    if (training->nlayers > TRAINING_MAX_LAYERS) {
+		sprintf(tempbuf,
+			"read_training-> Maximum number of layers is %d",
+			TRAINING_MAX_LAYERS);
+		G_fatal_error(tempbuf);
+	    }
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	}
+	else {
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    sscanf(line, "%d", &nlayers);
+	    if (nlayers != training->nlayers) {
+		sprintf(tempbuf,
+			"read_training-> Training files must contain same number of layers");
+		G_fatal_error(tempbuf);
+	    }
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	}
+	while ((line = GetLine(fp)) != NULL) {
+	    for (i = 0; i < training->nlayers; i++) {
+		j = 0;
+		training->mapnames[training->nexamples][i] =
+		    (char *)G_calloc(strlen(line) - 1, sizeof(char));
+		index = 0;
+		while (line[j] > 44 && line[j] < 123)
+		    training->mapnames[training->nexamples][i][index++] =
+			line[j++];
+		training->mapnames[training->nexamples][i]
+		    [strlen(training->mapnames[training->nexamples][i])] =
+		    '\0';
+		line = (char *)strchr(line, '\t');
+		line++;
+	    }
+	    sscanf(line, "%d", &(training->class[training->nexamples]));
+
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    sscanf(line, "%lf", &(training->east[training->nexamples]));
+
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    sscanf(line, "%lf", &(training->north[training->nexamples]));
+
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    if (training->nexamples == 0) {
+		sscanf(line, "%d", &(training->rows));
+	    }
+	    else {
+		sscanf(line, "%d", &tmprow);
+	    }
+
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    if (training->nexamples == 0) {
+		sscanf(line, "%d", &(training->cols));
+	    }
+	    else {
+		sscanf(line, "%d", &tmpcol);
+	    }
+
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    if (training->nexamples == 0) {
+		sscanf(line, "%lf", &(training->ew_res));
+	    }
+	    else {
+		sscanf(line, "%lf", &tmpew);
+	    }
+
+	    line = (char *)strchr(line, '\t');
+	    *line++;
+	    if (training->nexamples == 0) {
+		sscanf(line, "%lf", &(training->ns_res));
+	    }
+	    else {
+		sscanf(line, "%lf", &tmpns);
+	    }
+
+	    if (training->nexamples > 0) {
+		if ((tmprow != training->rows) || (tmpcol != training->cols)) {
+		    sprintf(tempbuf,
+			    "read_training-> Example %d: different number of rows or cols",
+			    training->nexamples + 1);
+		    G_fatal_error(tempbuf);
+		}
+		if (fabs((tmpew - training->ew_res) / training->ew_res) > 0.1) {
+		    sprintf(tempbuf,
+			    "read_training-> Example %d: EW-resolution differs more than 10%%",
+			    training->nexamples + 1);
+		    G_warning(tempbuf);
+		}
+		if (fabs((tmpns - training->ns_res) / training->ns_res) > 0.1) {
+		    sprintf(tempbuf,
+			    "read_training-> Example %d: NS-resolution differs more than 10%%",
+			    training->nexamples + 1);
+		    G_warning(tempbuf);
+		}
+
+	    }
+	    training->nexamples += 1;
+	    if (training->nexamples == TRAINING_MAX_EXAMPLES) {
+		sprintf(tempbuf,
+			"read_training-> Maximum number of training data is %d",
+			TRAINING_MAX_EXAMPLES);
+		G_fatal_error(tempbuf);
+	    }
+
+	}
+	break;
+    case TABLE_data:
+	training->data_type = training_type;
+	training->rows = 1;
+	training->ew_res = 0.0;
+	training->ns_res = 0.0;
+	training->nlayers = 1;
+	if (training->nexamples == 0) {
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    sscanf(line, "%d", &(training->cols));
+	}
+	else {
+	    line = GetLine(fp);
+	    line = GetLine(fp);
+	    sscanf(line, "%d", &(tmpc));
+	    if (tmpc != training->cols) {
+		sprintf(tempbuf,
+			"read_training-> training data must have same number of columns");
+		G_fatal_error(tempbuf);
+	    }
+	}
+	line = GetLine(fp);
+	while ((line = GetLine(fp)) != NULL) {
+	    training->data[training->nexamples] =
+		(double *)G_calloc(training->cols, sizeof(double));
+	    for (i = 0; i < training->cols; i++) {
+		sscanf(line, "%lf",
+		       &(training->data[training->nexamples][i]));
+		line = (char *)strchr(line, '\t');
+		*line++;
+	    }
+	    sscanf(line, "%d", &(training->class[training->nexamples]));
+	    training->nexamples += 1;
+	    if (training->nexamples == TRAINING_MAX_EXAMPLES) {
+		sprintf(tempbuf,
+			"read_training-> Maximum number of training data is %d",
+			TRAINING_MAX_EXAMPLES);
+		G_fatal_error(tempbuf);
+	    }
+	}
+	break;
+
+    default:
+	sprintf(tempbuf, "read_training-> Format not recognized");
+	G_fatal_error(tempbuf);
+	break;
+    }
+    fclose(fp);
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/tree.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/tree.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/tree.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,2113 @@
+/*
+   The following routines are written and tested by Stefano Merler
+
+   for
+
+   structure Tree and BTree management
+ */
+
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+static void split_node();
+
+void compute_tree(Tree * tree, int nsamples, int nvar, double **data,
+		  int *data_class, int *classes, int *classes, int stamps,
+		  int minsize, double *costs)
+
+     /*
+        receives in input training data of dimensions nsamples x nvar, 
+        with class labels data_class, the possible classes (of length nclasses)
+        optionally, the user can build stamps 
+        and decide the minimal number of cases within a node as 
+        stopping criteria. 
+      */
+{
+
+    int i, j;
+    int node_class_index;
+    int max_node_points;
+    int xx;
+    double sumpriors;
+
+    tree->stamps = stamps;
+    tree->minsize = minsize;
+
+
+    tree->node = (Node *) G_malloc(sizeof(Node));
+    tree->node[0].nclasses = nclasses;
+
+
+    tree->node[0].npoints = nsamples;
+    tree->node[0].nvar = nvar;
+    tree->node[0].data = data;
+    tree->node[0].classes = data_class;
+
+
+    tree->node[0].npoints_for_class = (int *)G_calloc(nclasses, sizeof(int));
+    tree->node[0].priors = (double *)G_calloc(nclasses, sizeof(double));
+
+
+    for (i = 0; i < tree->node[0].npoints; i++) {
+	for (j = 0; j < nclasses; j++)
+	    if (classes[j] == tree->node[0].classes[i]) {
+		tree->node[0].npoints_for_class[j] += 1;
+		break;
+	    }
+    }
+
+    node_class_index = 0;
+    max_node_points = 0;
+    for (j = 0; j < nclasses; j++)
+	if (tree->node[0].npoints_for_class[j] > max_node_points) {
+	    max_node_points = tree->node[0].npoints_for_class[j];
+	    node_class_index = j;
+	}
+    tree->node[0].class = classes[node_class_index];
+
+    sumpriors = .0;
+    for (j = 0; j < nclasses; j++)
+	sumpriors += costs[j] * tree->node[0].npoints_for_class[j];
+    for (j = 0; j < nclasses; j++)
+	tree->node[0].priors[j] =
+	    costs[j] * tree->node[0].npoints_for_class[j] / sumpriors;
+
+    tree->node[0].terminal = TRUE;
+    if (entropy(tree->node[0].priors, nclasses) > 0)
+	tree->node[0].terminal = FALSE;
+
+    tree->nnodes = 1;
+    for (xx = 0; xx < tree->nnodes; xx++)
+	if (!tree->node[xx].terminal) {
+	    tree->node[xx].left = tree->nnodes;
+	    tree->node[xx].right = tree->nnodes + 1;
+	    tree->node =
+		(Node *) G_realloc(tree->node,
+				   (tree->nnodes + 2) * sizeof(Node));
+	    split_node(&(tree->node[xx]), &(tree->node[tree->nnodes]),
+		       &(tree->node[tree->nnodes + 1]), classes, nclasses,
+		       costs);
+	    if (tree->minsize > 0) {
+		if (tree->node[tree->nnodes].npoints < tree->minsize)
+		    tree->node[tree->nnodes].terminal = TRUE;
+		if (tree->node[tree->nnodes + 1].npoints < tree->minsize)
+		    tree->node[tree->nnodes + 1].terminal = TRUE;
+	    }
+	    if (tree->stamps) {
+		tree->node[tree->nnodes].terminal = TRUE;
+		tree->node[tree->nnodes + 1].terminal = TRUE;
+	    }
+	    tree->nnodes += 2;
+	}
+
+}
+
+
+static void split_node(Node * node, Node * nodeL, Node * nodeR, int *classes,
+		       int *classes, double *costs)
+{
+    int **indx;
+    double *tmpvar;
+    int i, j, k;
+    int **npL, **npR;
+    double **prL, **prR;
+    int totL, totR;
+    double a, b;
+    double *decrease_in_inpurity;
+    double max_decrease;
+    int splitvar = 0;
+    int splitvalue = 0;
+    int morenumerous;
+    double sumpriors;
+
+    nodeL->priors = (double *)G_calloc(nclasses, sizeof(double));
+    nodeR->priors = (double *)G_calloc(nclasses, sizeof(double));
+    nodeL->npoints_for_class = (int *)G_calloc(nclasses, sizeof(int));
+    nodeR->npoints_for_class = (int *)G_calloc(nclasses, sizeof(int));
+
+    indx = (int **)G_calloc(node->nvar, sizeof(int *));
+    for (i = 0; i < node->nvar; i++)
+	indx[i] = (int *)G_calloc(node->npoints, sizeof(int));
+
+    tmpvar = (double *)G_calloc(node->npoints, sizeof(double));
+    decrease_in_inpurity =
+	(double *)G_calloc(node->npoints - 1, sizeof(double));
+
+    npL = (int **)G_calloc(node->npoints, sizeof(int *));
+    for (i = 0; i < node->npoints; i++)
+	npL[i] = (int *)G_calloc(nclasses, sizeof(int));
+    npR = (int **)G_calloc(node->npoints, sizeof(int *));
+    for (i = 0; i < node->npoints; i++)
+	npR[i] = (int *)G_calloc(nclasses, sizeof(int));
+
+    prL = (double **)G_calloc(node->npoints, sizeof(double *));
+    for (i = 0; i < node->npoints; i++)
+	prL[i] = (double *)G_calloc(nclasses, sizeof(double));
+    prR = (double **)G_calloc(node->npoints, sizeof(double *));
+    for (i = 0; i < node->npoints; i++)
+	prR[i] = (double *)G_calloc(nclasses, sizeof(double));
+
+    for (i = 0; i < node->nvar; i++) {
+	for (j = 0; j < node->npoints; j++)
+	    tmpvar[j] = node->data[j][i];
+
+	indexx_1(node->npoints, tmpvar, indx[i]);
+
+	for (k = 0; k < nclasses; k++)
+	    if (node->classes[indx[i][0]] == classes[k]) {
+		npL[0][k] = 1;
+		npR[0][k] = node->npoints_for_class[k] - npL[0][k];
+	    }
+	    else {
+		npL[0][k] = 0;
+		npR[0][k] = node->npoints_for_class[k];
+	    }
+
+
+	for (j = 1; j < node->npoints - 1; j++)
+	    for (k = 0; k < nclasses; k++)
+		if (node->classes[indx[i][j]] == classes[k]) {
+		    npL[j][k] = npL[j - 1][k] + 1;
+		    npR[j][k] = node->npoints_for_class[k] - npL[j][k];
+		}
+		else {
+		    npL[j][k] = npL[j - 1][k];
+		    npR[j][k] = node->npoints_for_class[k] - npL[j][k];
+		}
+
+
+	for (j = 0; j < node->npoints - 1; j++) {
+	    totL = totR = 0;
+	    for (k = 0; k < nclasses; k++)
+		totL += (double)npL[j][k];
+
+	    sumpriors = 0.;
+	    for (k = 0; k < nclasses; k++)
+		sumpriors += costs[k] * npL[j][k];
+	    for (k = 0; k < nclasses; k++)
+		prL[j][k] = costs[k] * npL[j][k] / sumpriors;
+
+	    for (k = 0; k < nclasses; k++)
+		totR += (double)npR[j][k];
+
+	    sumpriors = 0.;
+	    for (k = 0; k < nclasses; k++)
+		sumpriors += costs[k] * npR[j][k];
+	    for (k = 0; k < nclasses; k++)
+		prR[j][k] = costs[k] * npR[j][k] / sumpriors;
+
+	    a = (double)totL / (double)node->npoints;
+	    b = (double)totR / (double)node->npoints;
+
+	    decrease_in_inpurity[j] = entropy(node->priors, nclasses) -
+		a * entropy(prL[j], nclasses) - b * entropy(prR[j], nclasses);
+	}
+
+	if (i == 0) {
+	    splitvar = 0;
+	    splitvalue = 0;
+	    max_decrease = decrease_in_inpurity[0];
+
+	    for (k = 0; k < nclasses; k++)
+		nodeL->priors[k] = prL[splitvalue][k];
+	    for (k = 0; k < nclasses; k++)
+		nodeR->priors[k] = prR[splitvalue][k];
+
+	    for (k = 0; k < nclasses; k++)
+		nodeL->npoints_for_class[k] = npL[splitvalue][k];
+	    for (k = 0; k < nclasses; k++)
+		nodeR->npoints_for_class[k] = npR[splitvalue][k];
+	}
+
+	for (j = 0; j < node->npoints - 1; j++)
+	    if (decrease_in_inpurity[j] > max_decrease) {
+		max_decrease = decrease_in_inpurity[j];
+
+		splitvar = i;
+		splitvalue = j;
+
+		for (k = 0; k < nclasses; k++)
+		    nodeL->priors[k] = prL[splitvalue][k];
+		for (k = 0; k < nclasses; k++)
+		    nodeR->priors[k] = prR[splitvalue][k];
+
+		for (k = 0; k < nclasses; k++)
+		    nodeL->npoints_for_class[k] = npL[splitvalue][k];
+		for (k = 0; k < nclasses; k++)
+		    nodeR->npoints_for_class[k] = npR[splitvalue][k];
+
+
+	    }
+
+    }
+
+    if (splitvar < 0 && splitvalue < 0) {
+	node->value = 0;
+	node->terminal = TRUE;
+	nodeL->nclasses = node->nclasses;
+	nodeL->npoints = 0;
+	nodeL->terminal = TRUE;
+	nodeL->class = -9999;
+	nodeR->nclasses = node->nclasses;
+	nodeR->npoints = 0;
+	nodeR->terminal = TRUE;
+	nodeR->class = -9999;
+	return;
+    }
+
+    node->var = splitvar;
+    node->value = (node->data[indx[splitvar][splitvalue]][node->var] +
+		   node->data[indx[splitvar][splitvalue + 1]][node->var]) /
+	2.;
+
+
+    nodeL->nvar = node->nvar;
+    nodeL->nclasses = node->nclasses;
+    nodeL->npoints = splitvalue + 1;
+
+    nodeL->terminal = TRUE;
+    if (entropy(nodeL->priors, nclasses) > 0)
+	nodeL->terminal = FALSE;
+
+    nodeL->data = (double **)G_calloc(nodeL->npoints, sizeof(double *));
+    nodeL->classes = (int *)G_calloc(nodeL->npoints, sizeof(int));
+
+    for (i = 0; i < nodeL->npoints; i++) {
+	nodeL->data[i] = node->data[indx[splitvar][i]];
+	nodeL->classes[i] = node->classes[indx[splitvar][i]];
+    }
+
+
+    morenumerous = 0;
+    for (k = 0; k < nclasses; k++)
+	if (nodeL->npoints_for_class[k] > morenumerous) {
+	    morenumerous = nodeL->npoints_for_class[k];
+	    nodeL->class = classes[k];
+	}
+
+
+
+    nodeR->nvar = node->nvar;
+    nodeR->nclasses = node->nclasses;
+    nodeR->npoints = node->npoints - nodeL->npoints;
+
+    nodeR->terminal = TRUE;
+    if (entropy(nodeR->priors, nclasses) > 0)
+	nodeR->terminal = FALSE;
+
+
+    nodeR->data = (double **)G_calloc(nodeR->npoints, sizeof(double *));
+    nodeR->classes = (int *)G_calloc(nodeR->npoints, sizeof(int));
+
+    for (i = 0; i < nodeR->npoints; i++) {
+	nodeR->data[i] = node->data[indx[splitvar][nodeL->npoints + i]];
+	nodeR->classes[i] = node->classes[indx[splitvar][nodeL->npoints + i]];
+    }
+
+    morenumerous = 0;
+    for (k = 0; k < nclasses; k++)
+	if (nodeR->npoints_for_class[k] > morenumerous) {
+	    morenumerous = nodeR->npoints_for_class[k];
+	    nodeR->class = classes[k];
+	}
+
+
+    for (i = 0; i < node->nvar; i++)
+	G_free(indx[i]);
+    G_free(indx);
+
+    for (i = 0; i < node->npoints; i++)
+	G_free(npL[i]);
+    G_free(npL);
+    for (i = 0; i < node->npoints; i++)
+	G_free(npR[i]);
+    G_free(npR);
+
+    for (i = 0; i < node->npoints; i++)
+	G_free(prL[i]);
+    G_free(prL);
+    for (i = 0; i < node->npoints; i++)
+	G_free(prR[i]);
+    G_free(prR);
+
+    G_free(tmpvar);
+    G_free(decrease_in_inpurity);
+}
+
+void write_tree(char *file, Tree * tree, Features * features)
+
+     /*
+        write a tree model to a file
+      */
+{
+    int i, j;
+    FILE *fp;
+    char tempbuf[500];
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "write_tree-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+
+    write_header_features(fp, features);
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "MODEL:\n");
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "Model:\n");
+    fprintf(fp, "ClassificationTree\n");
+    fprintf(fp, "Number of nodes:\n");
+    fprintf(fp, "%d\n", tree->nnodes);
+    fprintf(fp, "Number of classes:\n");
+    fprintf(fp, "%d\n", tree->node[0].nclasses);
+    fprintf(fp, "Number of features:\n");
+    fprintf(fp, "%d\n\n", tree->node[0].nvar);
+    fprintf(fp, "Tree structure:\n");
+    fprintf(fp, "terminal\tndata\t");
+    for (j = 0; j < tree->node[0].nclasses; j++)
+	fprintf(fp, "data_cl%d\t", j + 1);
+    for (j = 0; j < tree->node[0].nclasses; j++)
+	fprintf(fp, "prior_cl%d\t", j + 1);
+    fprintf(fp,
+	    "class\tchild_left\tchild_right\tsplit_variable\tsplit_value\n");
+    for (i = 0; i < tree->nnodes; i++) {
+	fprintf(fp, "%d\t%d\t", tree->node[i].terminal,
+		tree->node[i].npoints);
+	for (j = 0; j < tree->node[i].nclasses; j++)
+	    fprintf(fp, "%d\t", tree->node[i].npoints_for_class[j]);
+	for (j = 0; j < tree->node[i].nclasses; j++)
+	    fprintf(fp, "%f\t", tree->node[i].priors[j]);
+	if (tree->node[i].terminal)
+	    fprintf(fp, "%d\n", tree->node[i].class);
+	else
+	    fprintf(fp, "%d\t%d\t%d\t%d\t%f\n", tree->node[i].class,
+		    tree->node[i].left, tree->node[i].right,
+		    tree->node[i].var, tree->node[i].value);
+    }
+    if (features->f_pca[0]) {
+	fprintf(fp, "#####################\n");
+	fprintf(fp, "PRINC. COMP.:\n");
+	fprintf(fp, "#####################\n");
+	fprintf(fp, "Number of pc:\n");
+	fprintf(fp, "%d\n", features->npc);
+
+
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    fprintf(fp, "PCA: Layer %d\n", i + 1);
+	    write_pca(fp, &(features->pca[i]));
+	}
+    }
+
+    fclose(fp);
+}
+
+
+void compute_tree_boosting(BTree * btree, int boosting, double w,
+			   int nsamples, int nvar, double **data,
+			   int *data_class, int *classes, int *classes,
+			   int stamps, int minsize, int weights_boosting,
+			   double *costs)
+
+     /*
+        receives in input training data of dimensions nsamples x nvar, 
+        with class labels data_class, the possible classes (of length nclasses)
+        and computes a boosting tree model (number of models = boosting) using w
+
+        optionally, the user can build stamps and decide the 
+        minimal number of cases within a node as stopping criteria. 
+      */
+{
+    int i, b;
+    int *bsamples;
+    double **xdata_training;
+    int *xclasses_training;
+    double *prob;
+    double e00, e01, e10, e11, prior0, prior1;
+    int *error;
+    double eps, totprob;
+    double totbeta;
+
+    if (weights_boosting == 1) {
+	btree->w_evolution = (double **)G_calloc(nsamples, sizeof(double *));
+	for (i = 0; i < nsamples; i++)
+	    btree->w_evolution[i] =
+		(double *)G_calloc(boosting + 3, sizeof(double));
+    }
+
+    btree->tree = (Tree *) G_calloc(boosting, sizeof(Tree));
+    btree->ntrees = boosting;
+    btree->weights = (double *)G_calloc(btree->ntrees, sizeof(double));
+    btree->w = w;
+
+    prob = (double *)G_calloc(nsamples, sizeof(double));
+    bsamples = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+    error = (int *)G_calloc(nsamples, sizeof(int));
+
+    for (i = 0; i < nsamples; i++) {
+	prob[i] = 1.0 / nsamples;
+    }
+
+    for (b = 0; b < btree->ntrees; b++) {
+	if (weights_boosting == 1)
+	    for (i = 0; i < nsamples; i++)
+		btree->w_evolution[i][b] = prob[i];
+
+	Bootsamples(nsamples, prob, bsamples);
+	/* QUESTA PARTE SERVE PER VEDERE GLI ESTRATTI 
+	   AD OGNI RIPETIZIONE: AL MOMENTO LA DISABILITO, 
+	   AGGIUNGERE OPZIONE NEL MAIN DI i.pr_model PER ATTIVARLA
+	   {
+	   int out,j;
+
+	   out=0;
+	   for(j=0;j<nsamples;j++)
+	   if(bsamples[j]==0){
+	   out=1;
+	   break;
+	   }
+	   fprintf(stderr,"%d",out);
+
+	   for(i=1;i<nsamples;i++){
+	   out=0;
+	   for(j=0;j<nsamples;j++)
+	   if(bsamples[j]==i){
+	   out=1;
+	   break;
+	   }
+	   fprintf(stderr,"\t%d",out);
+	   }
+	   fprintf(stderr,"\n");
+	   }
+	 */
+	for (i = 0; i < nsamples; i++) {
+	    xdata_training[i] = data[bsamples[i]];
+	    xclasses_training[i] = data_class[bsamples[i]];
+	}
+	compute_tree(&(btree->tree[b]), nsamples, nvar, xdata_training,
+		     xclasses_training, nclasses, classes, stamps, minsize,
+		     costs);
+
+	e00 = e01 = e10 = e11 = prior0 = prior1 = 0.0;
+	for (i = 0; i < nsamples; i++) {
+	    if (data_class[i] == classes[0]) {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+		    error[i] = TRUE;
+		    e01 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e00 += prob[i];
+		}
+		prior0 += prob[i];
+	    }
+	    else {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+		    error[i] = TRUE;
+		    e10 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e11 += prob[i];
+		}
+		prior1 += prob[i];
+	    }
+	}
+	eps = (1.0 - e00 / (e00 + e01)) * prior0 * btree->w +
+	    (1.0 - e11 / (e10 + e11)) * prior1 * (2.0 - btree->w);
+	if (eps > 0.0 && eps < 0.5) {
+	    btree->weights[b] = 0.5 * log((1.0 - eps) / eps);
+	    totprob = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		if (error[i]) {
+		    if (data_class[i] == classes[0]) {
+			prob[i] = prob[i] * exp(btree->weights[b] * btree->w);
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		}
+		else {
+		    if (data_class[i] == classes[0]) {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] * btree->w);
+		    }
+		}
+		totprob += prob[i];
+	    }
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] /= totprob;
+	    }
+	}
+	else {
+	    btree->weights[b] = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] = 1.0 / nsamples;
+	    }
+	}
+    }
+
+    totbeta = 0.0;
+    for (b = 0; b < btree->ntrees; b++) {
+	totbeta += btree->weights[b];
+    }
+    if (totbeta > 0) {
+	for (b = 0; b < btree->ntrees; b++) {
+	    btree->weights[b] /= totbeta;
+	}
+    }
+    else {
+	fprintf(stderr, "WARNING: weights all null, set to 1/nmodels\n");
+	for (b = 0; b < btree->ntrees; b++) {
+	    btree->weights[b] = 1. / btree->ntrees;
+	}
+    }
+
+    G_free(bsamples);
+    G_free(xclasses_training);
+    G_free(prob);
+    G_free(error);
+    G_free(xdata_training);
+}
+
+/*########################Regularized adaboost################################   */
+/*        The following routines are written and tested by Mauro Martinelli      */
+void compute_tree_boosting_reg(BTree * btree, int boosting, double w,
+			       int nsamples, int nvar, double **data,
+			       int *data_class, int *classes, int *classes,
+			       int stamps, int minsize, int weights_boosting,
+			       double *costs, double *misratio)
+
+     /*
+        receives in input training data of dimensions nsamples x nvar,
+        with class labels data_class, the possible classes (of length nclasses)
+        and computes a boosting tree model (number of models = boosting) using w
+
+        optionally, the user can build stamps and decide the
+        minimal number of cases within a node as stopping criteria.
+        It calculates the misclassification ratio for every training data.
+
+      */
+{
+    int i, b, j;
+    int *bsamples;
+    double **xdata_training;
+    int *xclasses_training;
+    double *prob;
+    double e00, e01, e10, e11, prior0, prior1;
+    int *error;
+    double eps, totprob;
+    double totbeta;
+    double *mis;
+    double *notextracted;
+    int *extracted;
+
+
+    if (weights_boosting == 1) {
+	btree->w_evolution = (double **)G_calloc(nsamples, sizeof(double *));
+	for (i = 0; i < nsamples; i++)
+	    btree->w_evolution[i] =
+		(double *)G_calloc(boosting + 3, sizeof(double));
+    }
+
+    btree->tree = (Tree *) G_calloc(boosting, sizeof(Tree));
+    btree->ntrees = boosting;
+    btree->weights = (double *)G_calloc(btree->ntrees, sizeof(double));
+    btree->w = w;
+
+    notextracted = (double *)G_calloc(nsamples, sizeof(double));
+    extracted = (int *)G_calloc(nsamples, sizeof(int));
+    mis = (double *)G_calloc(nsamples, sizeof(double));
+    prob = (double *)G_calloc(nsamples, sizeof(double));
+    bsamples = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+    error = (int *)G_calloc(nsamples, sizeof(int));
+
+    for (i = 0; i < nsamples; i++) {
+	prob[i] = 1.0 / nsamples;
+    }
+
+    for (b = 0; b < btree->ntrees; b++) {
+	if (weights_boosting == 1)
+	    for (i = 0; i < nsamples; i++)
+		btree->w_evolution[i][b] = prob[i];
+
+	Bootsamples(nsamples, prob, bsamples);
+	for (i = 0; i < nsamples; i++) {
+	    xdata_training[i] = data[bsamples[i]];
+	    xclasses_training[i] = data_class[bsamples[i]];
+	}
+	compute_tree(&(btree->tree[b]), nsamples, nvar, xdata_training,
+		     xclasses_training, nclasses, classes, stamps, minsize,
+		     costs);
+
+	e00 = e01 = e10 = e11 = prior0 = prior1 = 0.0;
+	for (i = 0; i < nsamples; i++) {
+	    if (data_class[i] == classes[0]) {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+		    error[i] = TRUE;
+		    e01 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e00 += prob[i];
+		}
+		prior0 += prob[i];
+	    }
+	    else {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+		    error[i] = TRUE;
+		    e10 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e11 += prob[i];
+		}
+		prior1 += prob[i];
+	    }
+	}
+	eps = (1.0 - e00 / (e00 + e01)) * prior0 * btree->w +
+	    (1.0 - e11 / (e10 + e11)) * prior1 * (2.0 - btree->w);
+	if (eps > 0.0 && eps < 0.5) {
+	    btree->weights[b] = 0.5 * log((1.0 - eps) / eps);
+	    totprob = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		if (error[i]) {
+		    if (data_class[i] == classes[0]) {
+			prob[i] = prob[i] * exp(btree->weights[b] * btree->w);
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		}
+		else {
+		    if (data_class[i] == classes[0]) {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] * btree->w);
+		    }
+		}
+		totprob += prob[i];
+	    }
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] /= totprob;
+	    }
+	}
+	else {
+	    btree->weights[b] = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] = 1.0 / nsamples;
+	    }
+	}
+	/*Misclassification ratio */
+	for (i = 0; i < nsamples; i++) {
+	    extracted[i] = 0;
+	    for (j = 0; j < nsamples; j++) {
+		if (bsamples[j] == i) {
+		    extracted[i] = 1;
+		    break;
+		}
+	    }
+	    if (extracted[i] == 0) {
+		notextracted[i] += 1;
+		if (error[i] == TRUE)
+		    mis[i] += 1;
+	    }
+	}
+    }
+
+    for (i = 0; i < nsamples; i++) {
+
+	if (notextracted[i] == 0) {
+	    misratio[i] = 0;
+	    fprintf(stdout, "WARNING: the point %d is always extracted\n", i);
+	}
+	else {
+	    misratio[i] = mis[i] / notextracted[i];
+	}
+    }
+
+
+    totbeta = 0.0;
+    for (b = 0; b < btree->ntrees; b++) {
+	totbeta += btree->weights[b];
+    }
+    if (totbeta > 0) {
+	for (b = 0; b < btree->ntrees; b++) {
+	    btree->weights[b] /= totbeta;
+	}
+    }
+    else {
+	fprintf(stderr, "WARNING: weights all null, set to 1/nmodels\n");
+	for (b = 0; b < btree->ntrees; b++) {
+	    btree->weights[b] = 1. / btree->ntrees;
+	}
+    }
+
+    G_free(bsamples);
+    G_free(xclasses_training);
+    G_free(prob);
+    G_free(error);
+    G_free(xdata_training);
+    G_free(mis);
+    G_free(notextracted);
+    G_free(extracted);
+}
+
+
+void regularized_boosting(int boosting, double w, int nsamples, int nvar,
+			  double **data, int *data_class, int nclasses,
+			  int *classes, int stamps, int minsize,
+			  int weights_boosting, double *costs,
+			  double *misratio, int reg, Features test_features,
+			  char *file, Features validation_features,
+			  int reg_verbose, char nametest[150],
+			  char nameval[150], char modelout[150],
+			  Features train_features, int testset)
+
+     /*
+        compute btree model on the new trainng set extracted, test the btree model on the validation
+        set and it calculates the best tree model. Write into the output file the best tree model.
+      */
+{
+    BTree *btree;
+    int i, j, k, run, t, z;
+    double **xdata_training;
+    int *xclasses_training;
+    int *ncampioni;
+    double r, regd, bestr;
+    double *accuracy;
+    double *toterror;
+    double bestaccuracy;
+    int vet;
+    char testtrain[150];
+
+    fprintf(stdout, "-----------------------------------\n");
+    fprintf(stdout, "Training and prediction on validation data: %s\n",
+	    nameval);
+    if (reg_verbose > 1) {
+	fprintf(stdout,
+		"Class of validation features (regularized boosting):%d\n",
+		validation_features.nclasses);
+	fprintf(stdout,
+		"Interval number of misclassification ratio (regularized boosting):%d\n",
+		reg);
+	fprintf(stdout, "-----------------------------------\n");
+    }
+
+    btree = (BTree *) G_calloc(reg, sizeof(BTree));
+    ncampioni = (int *)G_calloc(reg, sizeof(int));
+    accuracy = (double *)G_calloc(reg, sizeof(double));
+    toterror = (double *)G_calloc(reg, sizeof(double));
+
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    for (k = 0; k < nsamples; k++) {
+	xdata_training[k] = (double *)G_calloc(nvar, sizeof(double));
+    }
+
+    bestaccuracy = 0.0;
+    r = 0;
+    run = 0;
+    bestr = 0;
+
+    /*for each 'r' it's creating the new training set */
+    for (i = 0; i < reg; i++) {
+	regd = reg;
+	r = (1 - (i * (1 / regd)));
+
+	ncampioni[i] = 0;
+	for (j = 0; j < nsamples; j++) {
+	    if (misratio[j] <= r)
+		ncampioni[i] = ncampioni[i] + 1;
+	}
+	if ((i == 0) || ((ncampioni[i]) != (ncampioni[i - 1]))) {
+
+	    if (ncampioni[i] < 11) {
+		if (reg_verbose > 1) {
+		    fprintf(stdout,
+			    "WARNING:at run %d the training set is too small\n",
+			    i);
+		}
+		break;
+	    }
+	    else {
+		if (reg_verbose > 1) {
+		    fprintf(stdout,
+			    "%d samples extracted at run %d and 'r' is: %e\n",
+			    ncampioni[i], i, r);
+		}
+		vet = 0;
+		for (j = 0; j < nsamples; j++) {
+		    if (misratio[j] <= r) {
+			xdata_training[vet] = data[j];
+			xclasses_training[vet] = data_class[j];
+			vet = vet + 1;
+		    }
+		}
+
+		compute_tree_boosting(&btree[i], boosting, w, ncampioni[i],
+				      nvar, xdata_training, xclasses_training,
+				      nclasses, classes, stamps, minsize,
+				      weights_boosting, costs);
+		accuracy[i] =
+		    test_regularized_boosting(&btree[i],
+					      &validation_features);
+
+		if (reg_verbose == 1) {
+		    toterror[i] = 1 - accuracy[i];
+		    fprintf(stdout, "%e\t%e\n", r, toterror[i]);
+		}
+
+		if (reg_verbose > 1) {
+		    fprintf(stdout, "Accuracy at run :%d is :%e\n", i,
+			    accuracy[i]);
+		    fprintf(stdout, "-----------------------------------\n");
+		}
+
+		if (accuracy[i] > bestaccuracy) {
+		    bestaccuracy = accuracy[i];
+		    run = i;
+		    bestr = r;
+		}
+		else {
+		    for (t = 0; t < btree[i].ntrees; t++) {
+			for (z = 0; z < btree[i].tree[t].nnodes; z++) {
+			    G_free(btree[i].tree[t].node[z].priors);
+			    G_free(btree[i].tree[t].
+				   node[z].npoints_for_class);
+			}
+
+			G_free(btree[i].tree[t].node);
+		    }
+		    G_free(btree[i].tree);
+		    G_free(btree[i].weights);
+		}
+	    }
+	}
+    }
+    fprintf(stdout,
+	    "Best accuracy on the validation feautres= %e at run :%d (r= %e)\n",
+	    bestaccuracy, run, bestr);
+    fprintf(stdout, "-----------------------------------\n");
+
+
+    if (reg_verbose > 0) {
+	sprintf(testtrain, "%s_tr_predshaved", modelout);
+
+	fprintf(stdout, "Prediction on training data after shave:\n");
+	test_btree(&btree[run], &train_features, testtrain);
+	fprintf(stdout, "-----------------------------------\n");
+    }
+
+
+
+    if (testset == 1) {
+
+	fprintf(stdout,
+		"Test the standard tree model ('r'=1) on test data: %s\n",
+		nametest);
+	test_btree(&btree[0], &test_features, file);
+	fprintf(stdout, "Test the best tree model on test data: %s\n",
+		nametest);
+	test_btree(&btree[run], &test_features, file);
+    }
+
+
+    fprintf(stdout, "Output file: %s\n", modelout);
+    write_bagging_boosting_tree(modelout, &btree[run], &train_features);
+
+
+    G_free(ncampioni);
+    G_free(accuracy);
+    G_free(xdata_training);
+    G_free(xclasses_training);
+
+}
+
+
+
+double test_regularized_boosting(BTree * btree, Features * features)
+
+     /*
+        test a btree model on a reduced set of original data
+        (features) and return the accuracy of the btree model
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    int predI;
+    double predD;
+    double accuracy;
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+
+
+
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if (features->nclasses == 2) {
+		    if ((predD =
+			 predict_btree_2class(btree,
+					      features->value[i])) *
+			features->class[i] <= 0) {
+			accuracy += 1.0;
+		    }
+		}
+		else {
+		    if ((predI =
+			 predict_btree_multiclass(btree, features->value[i],
+						  features->nclasses,
+						  features->p_classes)) !=
+			features->class[i]) {
+			accuracy += 1.0;
+		    }
+		}
+		break;
+	    }
+	}
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    G_free(data_in_each_class);
+    return accuracy;
+}
+
+void test_btree_reg(BTree * btree, Features * features, char *file,
+		    double *misratio)
+
+     /*
+        test a btree model on a set of data
+        (features) and write the results into a file. To standard output
+        accuracy and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    int predI;
+    double predD;
+    double *error;
+    double accuracy;
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	fprintf(stderr, "test_btree_reg-> Can't open file %s for writing",
+		file);
+	exit(-1);
+    }
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	if (features->examples_dim < 3) {	/*if examples_dim is small it's printing the training feature on the outpu file */
+	    for (j = 0; j < features->examples_dim; j++) {
+		fprintf(fp, "%e\t", features->value[i][j]);
+	    }
+	}
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if (features->nclasses == 2) {
+		    if ((predD =
+			 predict_btree_2class(btree,
+					      features->value[i])) *
+			features->class[i] <= 0) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%f", features->class[i], predD);
+		}
+		else {
+		    if ((predI =
+			 predict_btree_multiclass(btree, features->value[i],
+						  features->nclasses,
+						  features->p_classes)) !=
+			features->class[i]) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%d", features->class[i], predI);
+		}
+		break;
+	    }
+	}
+	fprintf(fp, "\t%e\n", misratio[i]);
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    fclose(fp);
+
+    fprintf(stdout, "Accuracy: %f\n", accuracy);
+    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", features->p_classes[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", data_in_each_class[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", (int)error[j]);
+    }
+    fprintf(stdout, "\n");
+
+    for (j = 0; j < features->nclasses; j++) {
+	error[j] /= data_in_each_class[j];
+    }
+
+    fprintf(stdout, "Perrors\t%f", error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%f", error[j]);
+    }
+    fprintf(stdout, "\n");
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+void shaving_and_compute(int boosting, double w, int nsamples, int nvar,
+			 double **data, int *data_class, int *classes,
+			 int *classes, int stamps, int minsize,
+			 int weights_boosting, double *costs,
+			 double *misratio, int reg_verbose,
+			 double misclass_ratio, char *outfile,
+			 char modello[150], Features features,
+			 Features test_features, char *outfile1, int testfile)
+
+     /*
+        compute btree model on the shaved trainng set, test and write into the output file the tree model
+      */
+{
+    BTree *btree;
+    int j, k, n, t;
+    double **xdata_training;
+    int *xclasses_training;
+    int ncampioni;
+    int vet, ind;
+    int *shaved;
+    int nshaved;
+
+    fprintf(stdout, "-----------------------------------\n");
+
+    if (reg_verbose > 0) {
+	fprintf(stdout,
+		"Max misclassification ratio on the new training set(regularized boosting):%f\n",
+		misclass_ratio);
+	fprintf(stdout, "-----------------------------------\n");
+    }
+
+
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    for (k = 0; k < nsamples; k++) {
+	xdata_training[k] = (double *)G_calloc(nvar, sizeof(double));
+    }
+
+    ind = 0;
+    vet = 0;
+    ncampioni = 0;
+    for (j = 0; j < nsamples; j++) {
+	if (misratio[j] <= misclass_ratio)
+	    ncampioni++;
+    }
+
+    nshaved = nsamples - ncampioni;
+    shaved = (int *)G_calloc(nshaved, sizeof(int));
+
+    if (ncampioni < 11) {
+	if (reg_verbose > 0) {
+	    fprintf(stdout, "WARNING:the training set is too small\n");
+	}
+	exit(1);
+    }
+    else {
+	if (reg_verbose > 0) {
+	    fprintf(stdout, "%d samples extracted \n", ncampioni);
+	}
+	for (j = 0; j < nsamples; j++) {
+	    if (misratio[j] <= misclass_ratio) {
+		for (n = 0; n < nvar; n++) {
+		    xdata_training[vet][n] = data[j][n];
+		}
+		xclasses_training[vet] = data_class[j];
+		vet = vet + 1;
+	    }
+	    else {
+		shaved[ind] = j;
+		ind = ind + 1;
+	    }
+	}
+	compute_tree_boosting(&btree, boosting, w, ncampioni, nvar,
+			      xdata_training, xclasses_training,
+			      nclasses, classes, stamps, minsize,
+			      weights_boosting, costs);
+    }
+    if (reg_verbose > 0) {
+	fprintf(stdout, "shaved %d samples :\n", nshaved);
+	for (t = 0; t < nshaved; t++) {
+	    fprintf(stdout, "%d\n", shaved[t]);
+	}
+	fprintf(stdout, "-----------------------------------\n");
+
+    }
+
+    fprintf(stdout, "Prediction on training data \n");
+    test_btree(&btree, &features, outfile);
+
+    fprintf(stdout, "-----------------------------------\n");
+
+    if (testfile == 1) {
+	fprintf(stdout, "Prediction on test data \n");
+	test_btree(&btree, &test_features, outfile1);
+    }
+
+    fprintf(stdout, "Output model: %s\n", modello);
+    write_bagging_boosting_tree(modello, &btree, &features);
+
+    G_free(xdata_training);
+    G_free(xclasses_training);
+    G_free(shaved);
+}
+
+/*############################################################################################ */
+
+
+void compute_tree_bagging(BTree * btree, int bagging, int nsamples, int nvar,
+			  double **data, int *data_class, int *classes,
+			  int *classes, int stamps, int minsize,
+			  double *costs)
+
+     /*
+        receives in input training data of dimensions nsamples x nvar, 
+        with class labels data_class, the possible classes (of length nclasses)
+
+        optionally, the user can build stamps and decide the 
+        minimal number of cases within a node as stopping criteria. 
+      */
+{
+    int i, b;
+    int *bsamples;
+    double **xdata_training;
+    int *xclasses_training;
+    double *prob;
+
+    btree->tree = (Tree *) G_calloc(bagging, sizeof(Tree));
+    btree->ntrees = bagging;
+    btree->weights = (double *)G_calloc(btree->ntrees, sizeof(double));
+    btree->w = -1.0;
+
+    for (b = 0; b < btree->ntrees; b++) {
+	btree->weights[b] = 1.0 / btree->ntrees;
+    }
+
+    prob = (double *)G_calloc(nsamples, sizeof(double));
+    bsamples = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+
+    for (i = 0; i < nsamples; i++) {
+	prob[i] = 1.0 / nsamples;
+    }
+
+    for (b = 0; b < btree->ntrees; b++) {
+	Bootsamples(nsamples, prob, bsamples);
+	for (i = 0; i < nsamples; i++) {
+	    xdata_training[i] = data[bsamples[i]];
+	    xclasses_training[i] = data_class[bsamples[i]];
+	}
+	compute_tree(&(btree->tree[b]), nsamples, nvar, xdata_training,
+		     xclasses_training, nclasses, classes, stamps, minsize,
+		     costs);
+    }
+
+
+    G_free(bsamples);
+    G_free(xclasses_training);
+    G_free(prob);
+    G_free(xdata_training);
+}
+
+void write_bagging_boosting_tree(char *file, BTree * btree,
+				 Features * features)
+
+     /*
+        write a bagging or boosting tree model to a file
+      */
+{
+    int i, j;
+    FILE *fp;
+    char tempbuf[500];
+    int b;
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf,
+		"write_bagging_boosting_tree-> Can't open file %s for writing",
+		file);
+	G_fatal_error(tempbuf);
+    }
+
+    write_header_features(fp, features);
+    fprintf(fp, "#####################\n");
+    fprintf(fp, "MODEL:\n");
+    fprintf(fp, "#####################\n");
+
+    fprintf(fp, "Model:\n");
+    fprintf(fp, "B-ClassificationTree\n");
+    fprintf(fp, "Cost parameter:\n");
+    fprintf(fp, "%f\n", btree->w);
+    fprintf(fp, "Number of models:\n");
+    fprintf(fp, "%d\n", btree->ntrees);
+    fprintf(fp, "Weights:\n");
+    fprintf(fp, "%f", btree->weights[0]);
+    for (b = 1; b < btree->ntrees; b++) {
+	fprintf(fp, "\t%f", btree->weights[b]);
+    }
+    fprintf(fp, "\n");
+
+    for (b = 0; b < btree->ntrees; b++) {
+	fprintf(fp, "Number of nodes:\n");
+	fprintf(fp, "%d\n", btree->tree[b].nnodes);
+	fprintf(fp, "Number of classes:\n");
+	fprintf(fp, "%d\n", btree->tree[b].node[0].nclasses);
+	fprintf(fp, "Number of features:\n");
+	fprintf(fp, "%d\n", btree->tree[b].node[0].nvar);
+	fprintf(fp, "Tree structure:\n");
+	fprintf(fp, "terminal\tndata\t");
+	for (j = 0; j < btree->tree[b].node[0].nclasses; j++)
+	    fprintf(fp, "data_cl%d\t", j + 1);
+	for (j = 0; j < btree->tree[b].node[0].nclasses; j++)
+	    fprintf(fp, "prior_cl%d\t", j + 1);
+	fprintf(fp,
+		"class\tchild_left\tchild_right\tsplit_variable\tsplit_value\n");
+	for (i = 0; i < btree->tree[b].nnodes; i++) {
+	    fprintf(fp, "%d\t%d\t", btree->tree[b].node[i].terminal,
+		    btree->tree[b].node[i].npoints);
+	    for (j = 0; j < btree->tree[b].node[i].nclasses; j++)
+		fprintf(fp, "%d\t",
+			btree->tree[b].node[i].npoints_for_class[j]);
+	    for (j = 0; j < btree->tree[b].node[i].nclasses; j++)
+		fprintf(fp, "%f\t", btree->tree[b].node[i].priors[j]);
+	    if (btree->tree[b].node[i].terminal)
+		fprintf(fp, "%d\n", btree->tree[b].node[i].class);
+	    else
+		fprintf(fp, "%d\t%d\t%d\t%d\t%f\n",
+			btree->tree[b].node[i].class,
+			btree->tree[b].node[i].left,
+			btree->tree[b].node[i].right,
+			btree->tree[b].node[i].var,
+			btree->tree[b].node[i].value);
+	}
+    }
+
+
+    if (features->f_pca[0]) {
+	fprintf(fp, "#####################\n");
+	fprintf(fp, "PRINC. COMP.:\n");
+	fprintf(fp, "#####################\n");
+
+	fprintf(fp, "Number of pc:\n");
+	fprintf(fp, "%d\n", features->npc);
+
+	for (i = 0; i < features->f_pca[1]; i++) {
+	    fprintf(fp, "PCA: Layer %d\n", i + 1);
+	    write_pca(fp, &(features->pca[i]));
+	}
+    }
+    fclose(fp);
+}
+
+int predict_tree_multiclass(Tree * tree, double *x)
+
+     /* 
+        multiclass problems: given a tree model, return the predicted class 
+        of a test point x
+      */
+{
+    int act_node;
+
+    act_node = 0;
+
+    for (;;) {
+	if (tree->node[act_node].terminal) {
+	    return tree->node[act_node].class;
+	}
+	else {
+	    if (x[tree->node[act_node].var] <= tree->node[act_node].value)
+		act_node = tree->node[act_node].left;
+	    else
+		act_node = tree->node[act_node].right;
+	}
+    }
+}
+
+double predict_tree_2class(Tree * tree, double *x)
+
+     /* 
+        2 class problems: given a tree model, return the proportion of data
+        in the terminal node (with sign) of a test point x
+      */
+{
+    int act_node;
+
+    act_node = 0;
+
+    for (;;) {
+	if (tree->node[act_node].terminal) {
+	    if (tree->node[act_node].priors[0] >
+		tree->node[act_node].priors[1]) {
+		return tree->node[act_node].priors[0] *
+		    tree->node[act_node].class;
+	    }
+	    else {
+		return tree->node[act_node].priors[1] *
+		    tree->node[act_node].class;
+	    }
+	}
+	else {
+	    if (x[tree->node[act_node].var] <= tree->node[act_node].value)
+		act_node = tree->node[act_node].left;
+	    else
+		act_node = tree->node[act_node].right;
+	}
+    }
+}
+
+void test_tree(Tree * tree, Features * features, char *file)
+
+     /*
+        test a tree model on a set of data (features) and write the results
+        into a file. To standard output accuracy and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    int predI;
+    double predD;
+    double *error;
+    double accuracy;
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_tree-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if (features->nclasses == 2) {
+		    if ((predD =
+			 predict_tree_2class(tree,
+					     features->value[i])) *
+			features->class[i] <= 0) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%f\n", features->class[i], predD);
+		}
+		else {
+		    if ((predI =
+			 predict_tree_multiclass(tree,
+						 features->value[i])) !=
+			features->class[i]) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%d\n", features->class[i], predI);
+		}
+		break;
+	    }
+	}
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    fclose(fp);
+
+    fprintf(stdout, "Accuracy: %f\n", accuracy);
+    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", features->p_classes[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", data_in_each_class[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", (int)error[j]);
+    }
+    fprintf(stdout, "\n");
+
+    for (j = 0; j < features->nclasses; j++) {
+	error[j] /= data_in_each_class[j];
+    }
+
+    fprintf(stdout, "Perrors\t%f", error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%f", error[j]);
+    }
+    fprintf(stdout, "\n");
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+double predict_btree_2class(BTree * btree, double *x)
+
+     /* 
+        for 2 classes problems: given a btree model, return the predicted
+        margin of a test point x
+      */
+{
+    int b;
+    double predict;
+
+    predict = 0.0;
+    for (b = 0; b < btree->ntrees; b++) {
+	predict +=
+	    predict_tree_multiclass(&(btree->tree[b]), x) * btree->weights[b];
+    }
+    return predict;
+}
+
+int predict_btree_multiclass(BTree * btree, double *x, int *classes,
+			     int *classes)
+
+     /* 
+        for multiclasses problems: given a btree model, return the predicted
+        class of a test point x
+      */
+{
+    int *predict;
+    int b, j;
+    int pred_class;
+    int max_class;
+    int max;
+
+    predict = (int *)G_calloc(nclasses, sizeof(int));
+
+    for (b = 0; b < btree->ntrees; b++) {
+	pred_class = predict_tree_multiclass(&(btree->tree[b]), x);
+	for (j = 0; j < nclasses; j++) {
+	    if (pred_class == classes[j]) {
+		predict[j] += 1;
+	    }
+	}
+    }
+
+    max = 0;
+    max_class = 0;
+    for (j = 0; j < nclasses; j++) {
+	if (predict[j] > max) {
+	    max = predict[j];
+	    max_class = j;
+	}
+    }
+
+    G_free(predict);
+    return classes[max_class];
+}
+
+
+void test_btree(BTree * btree, Features * features, char *file)
+
+     /*
+        test a btree model on a set of data 
+        (features) and write the results into a file. To standard output
+        accuracy and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    int predI;
+    double predD;
+    double *error;
+    double accuracy;
+
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_btree-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    accuracy = 0.0;
+    for (i = 0; i < features->nexamples; i++) {
+	for (j = 0; j < features->nclasses; j++) {
+	    if (features->class[i] == features->p_classes[j]) {
+		data_in_each_class[j] += 1;
+		if (features->nclasses == 2) {
+		    if ((predD =
+			 predict_btree_2class(btree,
+					      features->value[i])) *
+			features->class[i] <= 0) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%f\n", features->class[i], predD);
+		}
+		else {
+		    if ((predI =
+			 predict_btree_multiclass(btree, features->value[i],
+						  features->nclasses,
+						  features->p_classes)) !=
+			features->class[i]) {
+			error[j] += 1.0;
+			accuracy += 1.0;
+		    }
+		    fprintf(fp, "%d\t%d\n", features->class[i], predI);
+		}
+		break;
+	    }
+	}
+    }
+
+    accuracy /= features->nexamples;
+    accuracy = 1.0 - accuracy;
+
+    fclose(fp);
+
+    fprintf(stdout, "Accuracy: %f\n", accuracy);
+    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", features->p_classes[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", data_in_each_class[j]);
+    }
+    fprintf(stdout, "\n");
+    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%d", (int)error[j]);
+    }
+    fprintf(stdout, "\n");
+
+    for (j = 0; j < features->nclasses; j++) {
+	error[j] /= data_in_each_class[j];
+    }
+
+    fprintf(stdout, "Perrors\t%f", error[0]);
+    for (j = 1; j < features->nclasses; j++) {
+	fprintf(stdout, "\t%f", error[j]);
+    }
+    fprintf(stdout, "\n");
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+void test_btree_progressive(BTree * btree, Features * features, char *file)
+
+     /*
+        test a btree model on a set of data 
+        (features) and write the results into a file. To standard output
+        accuracy and error on each class
+      */
+{
+    int i, j;
+    int *data_in_each_class;
+    FILE *fp;
+    char tempbuf[500];
+    int predI;
+    double predD;
+    double *error;
+    double accuracy;
+    int b;
+
+    fp = fopen(file, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "test_btree-> Can't open file %s for writing", file);
+	G_fatal_error(tempbuf);
+    }
+
+    data_in_each_class = (int *)G_calloc(features->nclasses, sizeof(int));
+    error = (double *)G_calloc(features->nclasses, sizeof(double));
+
+    for (b = 1; b <= btree->ntrees; b++) {
+	if ((b < 100 && b % 2 == 1) || (b >= 100) || (b == btree->ntrees)) {
+	    /*
+	       if((b<500 && b%10==0) || (b<5000 && b%100==0) || (b%1000==0)||
+	       (b==btree->ntrees)){
+	     */
+	    accuracy = 0.0;
+	    for (j = 0; j < features->nclasses; j++) {
+		error[j] = .0;
+		data_in_each_class[j] = 0;
+	    }
+	    for (i = 0; i < features->nexamples; i++) {
+		for (j = 0; j < features->nclasses; j++) {
+		    if (features->class[i] == features->p_classes[j]) {
+			data_in_each_class[j] += 1;
+			if (features->nclasses == 2) {
+			    if ((predD =
+				 predict_btree_2class_progressive(btree,
+								  features->
+								  value[i],
+								  b))
+				* features->class[i] <= 0) {
+				error[j] += 1.0;
+				accuracy += 1.0;
+			    }
+			    if (b == btree->ntrees)
+				fprintf(fp, "%d\t%f\n", features->class[i],
+					predD);
+			}
+			else {
+			    if ((predI =
+				 predict_btree_multiclass_progressive(btree,
+								      features->
+								      value
+								      [i],
+								      features->
+								      nclasses,
+								      features->
+								      p_classes,
+								      b))
+				!= features->class[i]) {
+				error[j] += 1.0;
+				accuracy += 1.0;
+			    }
+			    if (b == btree->ntrees)
+				fprintf(fp, "%d\t%d\n", features->class[i],
+					predI);
+			}
+			break;
+		    }
+		}
+	    }
+	    accuracy /= features->nexamples;
+	    accuracy = 1.0 - accuracy;
+
+	    if (b == btree->ntrees)
+		fclose(fp);
+
+	    fprintf(stdout, "nmodels = %d\n", b);
+	    fprintf(stdout, "Accuracy: %f\n", accuracy);
+	    fprintf(stdout, "Class\t%d", features->p_classes[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%d", features->p_classes[j]);
+	    }
+	    fprintf(stdout, "\n");
+	    fprintf(stdout, "Ndata\t%d", data_in_each_class[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%d", data_in_each_class[j]);
+	    }
+	    fprintf(stdout, "\n");
+	    fprintf(stdout, "Nerrors\t%d", (int)error[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%d", (int)error[j]);
+	    }
+	    fprintf(stdout, "\n");
+
+	    for (j = 0; j < features->nclasses; j++) {
+		error[j] /= data_in_each_class[j];
+	    }
+
+	    fprintf(stdout, "Perrors\t%f", error[0]);
+	    for (j = 1; j < features->nclasses; j++) {
+		fprintf(stdout, "\t%f", error[j]);
+	    }
+	    fprintf(stdout, "\n");
+	    fflush(stdout);
+	}
+    }
+    G_free(data_in_each_class);
+    G_free(error);
+}
+
+double predict_btree_2class_progressive(BTree * btree, double *x, int bmax)
+
+     /* 
+        for 2 classes problems: given a btree model, return the predicted
+        margin of a test point x
+      */
+{
+    int b;
+    double predict;
+
+    predict = 0.0;
+    for (b = 0; b < bmax; b++) {
+	predict +=
+	    predict_tree_multiclass(&(btree->tree[b]), x) * btree->weights[b];
+    }
+    return predict;
+}
+
+int predict_btree_multiclass_progressive(BTree * btree, double *x,
+					 int *classes, int *classes, int bmax)
+
+     /* 
+        for multiclasses problems: given a btree model, return the predicted
+        class of a test point x
+      */
+{
+    int *predict;
+    int b, j;
+    int pred_class;
+    int max_class;
+    int max;
+
+    predict = (int *)G_calloc(nclasses, sizeof(int));
+
+    for (b = 0; b < bmax; b++) {
+	pred_class = predict_tree_multiclass(&(btree->tree[b]), x);
+	for (j = 0; j < nclasses; j++) {
+	    if (pred_class == classes[j]) {
+		predict[j] += 1;
+	    }
+	}
+    }
+
+    max = 0;
+    max_class = 0;
+    for (j = 0; j < nclasses; j++) {
+	if (predict[j] > max) {
+	    max = predict[j];
+	    max_class = j;
+	}
+    }
+
+    G_free(predict);
+    return classes[max_class];
+}
+
+
+void compute_tree_boosting_parallel(BTree * btree, int boosting,
+				    int parallel_boosting, double w,
+				    int nsamples, int nvar, double **data,
+				    int *data_class, int nclasses,
+				    int *classes, int stamps, int minsize,
+				    int weights_boosting, double *costs)
+
+     /*
+        receives in input training data of dimensions nsamples x nvar, 
+        with class labels data_class, the possible classes (of length nclasses)
+        and computes a boosting tree model (number of models = boosting) using w
+
+        optionally, the user can build stamps and decide the 
+        minimal number of cases within a node as stopping criteria. 
+      */
+{
+    int i, j, b;
+    int *bsamples;
+    double **xdata_training;
+    int *xclasses_training;
+    double *prob;
+    double e00, e01, e10, e11, prior0, prior1;
+    int *error;
+    double eps, totprob;
+    double totbeta;
+    double mean, variance, alpha, beta;
+    int idum = 1;
+
+
+#ifdef LEAVEOUTEASYPOINTS
+#undef LEAVEOUTEASYPOINTS
+#endif
+
+
+#ifdef LEAVEOUTEASYPOINTS
+    int **correct;
+
+    correct = (int **)G_calloc(nsamples, sizeof(int *));
+    for (i = 0; i < nsamples; i++) {
+	correct[i] = (int *)G_calloc(boosting, sizeof(int));
+    }
+#endif
+
+    btree->w_evolution = (double **)G_calloc(nsamples, sizeof(double *));
+    for (i = 0; i < nsamples; i++)
+	btree->w_evolution[i] =
+	    (double *)G_calloc(parallel_boosting + 3, sizeof(double));
+
+    btree->tree = (Tree *) G_calloc(parallel_boosting, sizeof(Tree));
+    btree->ntrees = parallel_boosting;
+    btree->weights = (double *)G_calloc(btree->ntrees, sizeof(double));
+    btree->w = w;
+
+    prob = (double *)G_calloc(nsamples, sizeof(double));
+    bsamples = (int *)G_calloc(nsamples, sizeof(int));
+    xdata_training = (double **)G_calloc(nsamples, sizeof(double *));
+    xclasses_training = (int *)G_calloc(nsamples, sizeof(int));
+    error = (int *)G_calloc(nsamples, sizeof(int));
+
+    for (i = 0; i < nsamples; i++) {
+	prob[i] = 1.0 / nsamples;
+    }
+
+    for (b = 0; b < boosting; b++) {
+	for (i = 0; i < nsamples; i++)
+	    btree->w_evolution[i][b] = prob[i];
+
+	Bootsamples(nsamples, prob, bsamples);
+	for (i = 0; i < nsamples; i++) {
+	    xdata_training[i] = data[bsamples[i]];
+	    xclasses_training[i] = data_class[bsamples[i]];
+	}
+	compute_tree(&(btree->tree[b]), nsamples, nvar, xdata_training,
+		     xclasses_training, nclasses, classes, stamps, minsize,
+		     costs);
+
+	e00 = e01 = e10 = e11 = prior0 = prior1 = 0.0;
+	for (i = 0; i < nsamples; i++) {
+	    if (data_class[i] == classes[0]) {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+#ifdef LEAVEOUTEASYPOINTS
+		    correct[i][b] = FALSE;
+#endif
+		    error[i] = TRUE;
+		    e01 += prob[i];
+		}
+		else {
+#ifdef LEAVEOUTEASYPOINTS
+		    correct[i][b] = TRUE;
+#endif
+		    error[i] = FALSE;
+		    e00 += prob[i];
+		}
+		prior0 += prob[i];
+	    }
+	    else {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+#ifdef LEAVEOUTEASYPOINTS
+		    correct[i][b] = FALSE;
+#endif
+		    error[i] = TRUE;
+		    e10 += prob[i];
+		}
+		else {
+#ifdef LEAVEOUTEASYPOINTS
+		    correct[i][b] = TRUE;
+#endif
+		    error[i] = FALSE;
+		    e11 += prob[i];
+		}
+		prior1 += prob[i];
+	    }
+	}
+	eps = (1.0 - e00 / (e00 + e01)) * prior0 * btree->w +
+	    (1.0 - e11 / (e10 + e11)) * prior1 * (2.0 - btree->w);
+	if (eps > 0.0 && eps < 0.5) {
+	    btree->weights[b] = 0.5 * log((1.0 - eps) / eps);
+	    totprob = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		if (error[i]) {
+		    if (data_class[i] == classes[0]) {
+			prob[i] = prob[i] * exp(btree->weights[b] * btree->w);
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		}
+		else {
+		    if (data_class[i] == classes[0]) {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] * btree->w);
+		    }
+		}
+		totprob += prob[i];
+	    }
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] /= totprob;
+	    }
+	}
+	else {
+	    btree->weights[b] = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] = 1.0 / nsamples;
+	    }
+	}
+    }
+
+    for (i = 0; i < nsamples; i++) {
+#ifdef LEAVEOUTEASYPOINTS
+	double p0, p1, p00, p11;
+
+	p0 = p1 = p00 = p11 = 0.0;
+	for (b = 20; b < boosting - 1; b++) {
+	    if (correct[i][b] == 0) {
+		p0 += 1.0;
+		if (correct[i][b + 1] == 0) {
+		    p00 += 1.0;
+		}
+	    }
+	    else {
+		p1 += 1.0;
+		if (correct[i][b + 1] == 1) {
+		    p11 += 1.0;
+		}
+	    }
+	}
+	fprintf(stdout, "%f\t%f\n", p11 / p1, p00 / p0);
+#endif
+
+
+
+	mean = mean_of_double_array(btree->w_evolution[i], boosting);
+	variance = var_of_double_array(btree->w_evolution[i], boosting);
+	alpha = mean * mean / variance;
+	beta = variance / mean;
+
+
+	for (j = boosting; j < parallel_boosting; j++)
+	    btree->w_evolution[i][j] = gamdev(alpha, beta, &idum);
+    }
+
+    for (b = boosting; b < parallel_boosting; b++) {
+	for (i = 0; i < nsamples; i++)
+	    prob[i] = btree->w_evolution[i][b];
+	Bootsamples(nsamples, prob, bsamples);
+	for (i = 0; i < nsamples; i++) {
+	    xdata_training[i] = data[bsamples[i]];
+	    xclasses_training[i] = data_class[bsamples[i]];
+	}
+	compute_tree(&(btree->tree[b]), nsamples, nvar, xdata_training,
+		     xclasses_training, nclasses, classes, stamps, minsize,
+		     costs);
+
+	e00 = e01 = e10 = e11 = prior0 = prior1 = 0.0;
+	for (i = 0; i < nsamples; i++) {
+	    if (data_class[i] == classes[0]) {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+		    error[i] = TRUE;
+		    e01 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e00 += prob[i];
+		}
+		prior0 += prob[i];
+	    }
+	    else {
+		if (predict_tree_multiclass(&(btree->tree[b]), data[i]) !=
+		    data_class[i]) {
+		    error[i] = TRUE;
+		    e10 += prob[i];
+		}
+		else {
+		    error[i] = FALSE;
+		    e11 += prob[i];
+		}
+		prior1 += prob[i];
+	    }
+	}
+	eps = (1.0 - e00 / (e00 + e01)) * prior0 * btree->w +
+	    (1.0 - e11 / (e10 + e11)) * prior1 * (2.0 - btree->w);
+	if (eps > 0.0 && eps < 0.5) {
+	    btree->weights[b] = 0.5 * log((1.0 - eps) / eps);
+	    totprob = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		if (error[i]) {
+		    if (data_class[i] == classes[0]) {
+			prob[i] = prob[i] * exp(btree->weights[b] * btree->w);
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		}
+		else {
+		    if (data_class[i] == classes[0]) {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] *
+					  (2.0 - btree->w));
+		    }
+		    else {
+			prob[i] =
+			    prob[i] * exp(-btree->weights[b] * btree->w);
+		    }
+		}
+		totprob += prob[i];
+	    }
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] /= totprob;
+	    }
+	}
+	else {
+	    btree->weights[b] = 0.0;
+	    for (i = 0; i < nsamples; i++) {
+		prob[i] = 1.0 / nsamples;
+	    }
+	}
+    }
+
+
+    totbeta = 0.0;
+    for (b = 0; b < btree->ntrees; b++) {
+	totbeta += btree->weights[b];
+    }
+    for (b = 0; b < btree->ntrees; b++) {
+	btree->weights[b] /= totbeta;
+    }
+
+    G_free(bsamples);
+    G_free(xclasses_training);
+    G_free(prob);
+    G_free(error);
+    G_free(xdata_training);
+
+}

Modified: grass-addons/grass7/imagery/i.pr/PRLIB/write_matrix.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/PRLIB/write_matrix.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/PRLIB/write_matrix.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,24 @@
+#include <grass/gis.h>
+#include <stdio.h>
+
+void write_matrix(char *outfile, double **matrix, int r, int c)
+{
+    FILE *fp;
+    int i, j;
+    char tempbuf[500];
+
+    fp = fopen(outfile, "w");
+    if (fp == NULL) {
+	sprintf(tempbuf, "write_matrix-> Can't open file <%s> for writing",
+		outfile);
+	G_fatal_error(tempbuf);
+    }
+
+    for (i = 0; i < r; i++) {
+	fprintf(fp, "%e", matrix[i][0]);
+	for (j = 1; j < c; j++)
+	    fprintf(fp, "\t%e", matrix[i][j]);
+	fprintf(fp, "\n");
+    }
+    fclose(fp);
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_blob/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_blob/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_blob/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,148 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+int main(int argc, char *argv[])
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Option *opt4;
+    struct Option *opt5;
+    char tempbuf[500];
+    char *mapset;
+    struct Cell_head cellhd;
+    double **matrix;
+    DCELL *rowbuf;
+    DCELL *tf;
+    int fd;
+    double minv, maxv;
+    int minp, maxp;
+    int i, j;
+    Blob *blobs;
+    int nblobs, npoints;
+    BlobSites *sites;
+
+    /* Define the different options */
+
+    opt1 = G_define_option();
+    opt1->key = "input_map";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->gisprompt = "old,cell,raster";
+    opt1->description = "Input raster map for searching for blobs.";
+
+    opt2 = G_define_option();
+    opt2->key = "min_pixels";
+    opt2->type = TYPE_INTEGER;
+    opt2->required = YES;
+    opt2->description = "minimum number of pixels defining a blob";
+
+    opt3 = G_define_option();
+    opt3->key = "max_pixels";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = YES;
+    opt3->description = "maximum number of pixels defining a blob";
+
+    opt4 = G_define_option();
+    opt4->key = "min_value";
+    opt4->type = TYPE_DOUBLE;
+    opt4->required = YES;
+    opt4->description = "minimum value of the map for defining a blob";
+
+    opt5 = G_define_option();
+    opt5->key = "max_value";
+    opt5->type = TYPE_DOUBLE;
+    opt5->required = YES;
+    opt5->description =
+	"maximum value of the map for defining a blob\n\n\tThe output is a site file, BUT it will be printed to standard output.";
+
+  /***** Start of main *****/
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to search for blobs. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    if (G_parser(argc, argv) < 0)
+	exit(EXIT_FAILURE);
+
+    sscanf(opt2->answer, "%d", &minp);
+    sscanf(opt3->answer, "%d", &maxp);
+    sscanf(opt4->answer, "%lf", &minv);
+    sscanf(opt5->answer, "%lf", &maxv);
+
+
+    if ((mapset = G_find_cell2(opt1->answer, "")) == NULL) {
+	sprintf(tempbuf, "can't open raster map <%s> for reading",
+		opt1->answer);
+	G_fatal_error(tempbuf);
+    }
+
+    if ((fd = G_open_cell_old(opt1->answer, mapset)) < 0) {
+	sprintf(tempbuf, "error opening raster map <%s>", opt1->answer);
+	G_fatal_error(tempbuf);
+    }
+
+    G_get_window(&cellhd);
+
+    rowbuf = (DCELL *) G_calloc(cellhd.cols * cellhd.rows, sizeof(DCELL));
+    tf = rowbuf;
+    matrix = (double **)G_calloc(cellhd.rows, sizeof(double *));
+    for (i = 0; i < cellhd.rows; i++)
+	matrix[i] = (double *)G_calloc(cellhd.cols, sizeof(double));
+
+    for (i = 0; i < cellhd.rows; i++) {
+	G_get_d_raster_row(fd, tf, i);
+	for (j = 0; j < cellhd.cols; j++) {
+	    if (G_is_d_null_value(tf))
+		*tf = maxv + 1.0;
+	    matrix[i][j] = *tf;
+	    tf++;
+	}
+    }
+    G_close_cell(fd);
+
+    nblobs = 0;
+    npoints = 0;
+    find_blob(matrix, cellhd.rows, cellhd.cols, &blobs, &npoints, &nblobs,
+	      minv, maxv);
+    sites = (BlobSites *) G_calloc(nblobs, sizeof(BlobSites));
+
+    extract_sites_from_blob(blobs, npoints, nblobs, &cellhd, sites, matrix);
+
+    for (i = 0; i < nblobs; i++)
+	if ((sites[i].n >= minp) && (sites[i].n <= maxp))
+	    fprintf(stdout, "%f|%f|#%d%s%f\n", sites[i].east, sites[i].north,
+		    sites[i].n, "%", sites[i].min);
+
+
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_classify/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_classify/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_classify/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,463 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+
+int extract_array_with_null();
+
+int main(int argc, char *argv[])
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    int model_type;
+    NearestNeighbor nn;
+    GaussianMixture gm;
+    Tree tree;
+    SupportVectorMachine svm;
+    BTree btree;
+    BSupportVectorMachine bsvm;
+    char tmpbuf[500];
+    char *mapset;
+    struct Cell_head cellhd;
+    double ***matrix;
+    DCELL *rowbuf;
+    DCELL *tf;
+    int *fd;
+    int r, c, l;
+    Features features;
+    int i, j;
+    int fdout;
+    int *space_for_each_layer;
+    double *wind_vect;
+    double *X;
+    int borderC, borderR, borderC_upper, dim;
+    double mean, sd;
+    int corrent_feature;
+    double *projected;
+    int *compute_features;
+    DCELL *output_cell;
+    int set_null;
+    int n_input_map;
+    int R, C;
+    int last_row;
+
+    /* Define the different options */
+
+    opt1 = G_define_option();
+    opt1->key = "input_map";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->multiple = YES;
+    opt1->gisprompt = "old,cell,raster";
+    opt1->description =
+	"Input raster maps to be classified.\n\t\tIt is required a number of maps at least equal to the number of maps\n\t\tused for the training. If this number is greater the last maps will be ignored.\n\t\tWARNING: the order in which the maps are given should be compared \n\t\twith that used for the training.";
+
+    opt2 = G_define_option();
+    opt2->key = "model";
+    opt2->type = TYPE_STRING;
+    opt2->required = YES;
+    opt2->description =
+	"Input file containing the model (output of i .pr_model).\n\t\tIf the data used for model development are not GRASS_data the program will abort.";
+
+    opt3 = G_define_option();
+    opt3->key = "output_map";
+    opt3->type = TYPE_STRING;
+    opt3->required = YES;
+    opt3->gisprompt = "new,cell,raster";
+    opt3->description =
+	"Name of the output raster map conaining the resulting classification.";
+
+  /***** Start of main *****/
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to classify raster map based on model defined in i.pr.* modules. "
+	 "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	 "Decision Tree and SVM classification techniques. Also includes "
+	 "cross-validation and bagging methods for model validation.");
+
+    if (G_parser(argc, argv) < 0)
+	exit(EXIT_FAILURE);
+
+    /*read the model */
+    model_type = read_model(opt2->answer, &features, &nn, &gm, &tree,
+			    &svm, &btree, &bsvm);
+
+    if (features.training.data_type != GRASS_data) {
+	sprintf(tmpbuf, "Model build using othe than GRASS data\n");
+	G_fatal_error(tmpbuf);
+    }
+    if (model_type == 0) {
+	sprintf(tmpbuf, "Model not recognized\n");
+	G_fatal_error(tmpbuf);
+    }
+
+    if (model_type == GM_model) {
+	compute_test_gm(&gm);
+    }
+
+    /* load current region */
+    G_get_window(&cellhd);
+    if (fabs((cellhd.ew_res - features.training.ew_res) /
+	     features.training.ew_res) > 0.1) {
+	sprintf(tmpbuf,
+		"EW resolution of training data and test map differs more than 10%%\n");
+	G_warning(tmpbuf);
+    }
+    if (fabs((cellhd.ns_res - features.training.ns_res) /
+	     features.training.ns_res) > 0.1) {
+	sprintf(tmpbuf,
+		"NS resolution of training data and test map differs more than 10%%\n");
+	G_warning(tmpbuf);
+    }
+
+    /*compute features space */
+    dim = features.training.rows * features.training.cols;
+
+    space_for_each_layer = (int *)G_calloc(features.training.nlayers,
+					   sizeof(int));
+    compute_features =
+	(int *)G_calloc(features.training.nlayers, sizeof(int));
+    for (j = 0; j < features.training.nlayers; j++) {
+	if (features.f_mean[0]) {
+	    for (i = 2; i < 2 + features.f_mean[1]; i++) {
+		if (features.f_mean[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features.f_variance[0]) {
+	    for (i = 2; i < 2 + features.f_variance[1]; i++) {
+		if (features.f_variance[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features.f_pca[0]) {
+	    for (i = 2; i < 2 + features.f_pca[1]; i++) {
+		if (features.f_pca[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += dim;
+		}
+	    }
+	}
+	if (space_for_each_layer[j] == 0) {
+	    space_for_each_layer[j] = dim;
+	}
+    }
+
+    /*alloc memory */
+    matrix =
+	(double ***)G_calloc(features.training.nlayers, sizeof(double **));
+    for (l = 0; l < features.training.nlayers; l++) {
+	matrix[l] =
+	    (double **)G_calloc(features.training.rows, sizeof(double *));
+	for (r = 0; r < features.training.rows; r++) {
+	    matrix[l][r] = (double *)G_calloc(cellhd.cols, sizeof(double));
+	}
+    }
+    fd = (int *)G_calloc(features.training.nlayers, sizeof(int));
+    X = (double *)G_calloc(features.examples_dim, sizeof(double));
+
+    wind_vect = (double *)G_calloc(dim, sizeof(double));
+
+    /* this make the program working, but... */
+    features.npc = features.examples_dim;
+
+    projected = (double *)G_calloc(features.npc, sizeof(double));
+
+    output_cell = G_allocate_d_raster_buf();
+
+    /*open the input maps */
+    n_input_map = 0;
+    for (l = 0; opt1->answers[l]; l++) {
+	if ((mapset = G_find_cell2(opt1->answers[l], "")) == NULL) {
+	    sprintf(tmpbuf, "raster map [%s] not available",
+		    opt1->answers[l]);
+	    G_fatal_error(tmpbuf);
+	}
+
+	if ((fd[l] = G_open_cell_old(opt1->answers[l], mapset)) < 0) {
+	    sprintf(tmpbuf, "error opening raster map [%s]",
+		    opt1->answers[l]);
+	    G_fatal_error(tmpbuf);
+	}
+	n_input_map += 1;
+    }
+
+    if (n_input_map < features.training.nlayers) {
+	sprintf(tmpbuf, "Model requires %d input maps\n",
+		features.training.nlayers);
+	G_fatal_error(tmpbuf);
+    }
+    if (n_input_map > features.training.nlayers) {
+	sprintf(tmpbuf, "Only first %d maps considered\n",
+		features.training.nlayers);
+	G_warning(tmpbuf);
+    }
+
+    /*open the output map */
+    fdout = open_new_DCELL(opt3->answer);
+
+    /*useful vars */
+    borderC = (features.training.cols - 1) / 2;
+    borderC_upper = cellhd.cols - borderC;
+    borderR = (features.training.rows - 1) / 2;
+    last_row = features.training.rows - 1;
+
+
+    /*read first rows */
+    for (r = 0; r < features.training.rows; r++) {
+	rowbuf =
+	    (DCELL *) G_calloc(features.training.nlayers * cellhd.cols,
+			       sizeof(DCELL));
+	tf = rowbuf;
+	for (l = 0; l < features.training.nlayers; l++) {
+	    if (G_get_d_raster_row(fd[l], tf, r) < 0) {
+		sprintf(tmpbuf, "Error reading raster map <%s>\n",
+			opt1->answers[l]);
+		G_fatal_error(tmpbuf);
+	    }
+	    for (c = 0; c < cellhd.cols; c++) {
+		if (G_is_d_null_value(tf))
+		    *tf = 0.0;
+		matrix[l][r][c] = *tf;
+		tf++;
+	    }
+	}
+	G_free(rowbuf);
+    }
+
+    /*write the first rows of the output map */
+    for (c = 1; c <= cellhd.cols; c++)
+	G_set_d_null_value(output_cell, c);
+    for (r = 0; r < borderR; r++)
+	G_put_d_raster_row(fdout, output_cell);
+
+    /*computing... */
+    r = features.training.rows;
+
+    while (r <= cellhd.rows) {
+	for (c = borderC; c < borderC_upper; c++) {
+	    corrent_feature = 0;
+	    for (l = 0; l < features.training.nlayers; l++) {
+		set_null = extract_array_with_null(features.training.rows,
+						   features.training.cols,
+						   c, borderC, matrix[l],
+						   wind_vect);
+
+		if (set_null) {
+		    break;
+		}
+		else {
+		    mean = mean_of_double_array(wind_vect, dim);
+		    sd = sd_of_double_array_given_mean(wind_vect, dim, mean);
+
+		    if (features.f_normalize[0]) {
+			for (j = 2; j < 2 + features.f_normalize[1]; j++) {
+			    if (features.f_normalize[j] == l) {
+				for (i = 0; i < dim; i++) {
+				    wind_vect[i] = (wind_vect[i] - mean) / sd;
+				}
+				break;
+			    }
+			}
+		    }
+
+		    if (!compute_features[l]) {
+			for (i = 0; i < dim; i++) {
+			    X[corrent_feature + i] = wind_vect[i];
+			}
+			corrent_feature += dim;
+		    }
+		    else {
+			if (features.f_mean[0]) {
+			    for (j = 2; j < 2 + features.f_mean[1]; j++) {
+				if (features.f_mean[j] == l) {
+				    X[corrent_feature] = mean;
+				    corrent_feature += 1;
+				    break;
+				}
+			    }
+			}
+			if (features.f_variance[0]) {
+			    for (j = 2; j < 2 + features.f_variance[1]; j++) {
+				if (features.f_variance[j] == l) {
+				    X[corrent_feature] = sd * sd;
+				    corrent_feature += 1;
+				    break;
+				}
+			    }
+			}
+			if (features.f_pca[0]) {
+			    for (j = 2; j < 2 + features.f_pca[1]; j++) {
+				if (features.f_pca[j] == l) {
+				    product_double_vector_double_matrix
+					(features.pca[l].eigmat, wind_vect,
+					 dim, features.npc, projected);
+
+				    for (i = 0; i < features.npc; i++) {
+					X[corrent_feature + i] = projected[i];
+				    }
+				    corrent_feature += features.npc;
+				    break;
+				}
+			    }
+			}
+		    }
+		}
+	    }
+	    if (set_null) {
+		output_cell[c] = 0.0;
+	    }
+	    else {
+		if (features.f_standardize[0]) {
+		    for (i = 2; i < 2 + features.f_standardize[1]; i++) {
+			X[features.f_standardize[i]] =
+			    (X[features.f_standardize[i]] -
+			     features.mean[i - 2]) / features.sd[i - 2];
+		    }
+		}
+		if (features.nclasses == 2) {
+		    switch (model_type) {
+		    case NN_model:
+			output_cell[c] =
+			    predict_nn_2class(&nn, X, nn.k, features.nclasses,
+					      features.p_classes);
+			break;
+		    case GM_model:
+			output_cell[c] = predict_gm_2class(&gm, X);
+			break;
+		    case CT_model:
+			output_cell[c] = predict_tree_2class(&tree, X);
+			break;
+		    case SVM_model:
+			output_cell[c] = predict_svm(&svm, X);
+			break;
+		    case BCT_model:
+			output_cell[c] = predict_btree_2class(&btree, X);
+			break;
+		    case BSVM_model:
+			output_cell[c] = predict_bsvm(&bsvm, X);
+			break;
+		    default:
+			break;
+		    }
+		}
+		else {
+		    switch (model_type) {
+		    case NN_model:
+			output_cell[c] =
+			    predict_nn_multiclass(&nn, X, nn.k,
+						  features.nclasses,
+						  features.p_classes);
+			break;
+		    case GM_model:
+			output_cell[c] = predict_gm_multiclass(&gm, X);
+			break;
+		    case CT_model:
+			output_cell[c] = predict_tree_multiclass(&tree, X);
+			break;
+		    case BCT_model:
+			output_cell[c] =
+			    predict_btree_multiclass(&btree, X,
+						     features.nclasses,
+						     features.p_classes);
+			break;
+		    default:
+			break;
+		    }
+		}
+	    }
+	}
+	G_put_d_raster_row(fdout, output_cell);
+	percent(r, cellhd.rows, 1);
+
+	if (r < cellhd.rows) {
+	    for (l = 0; l < features.training.nlayers; l++) {
+		for (R = 0; R < last_row; R++) {
+		    for (C = 0; C < cellhd.cols; C++) {
+			matrix[l][R][C] = matrix[l][R + 1][C];
+		    }
+		}
+
+		rowbuf = (DCELL *) G_calloc(cellhd.cols, sizeof(DCELL));
+		tf = rowbuf;
+
+		if (G_get_d_raster_row(fd[l], tf, r) < 0) {
+		    sprintf(tmpbuf, "Error reading raster map <%s>\n",
+			    opt1->answers[l]);
+		    G_fatal_error(tmpbuf);
+		}
+		for (c = 0; c < cellhd.cols; c++) {
+		    if (G_is_d_null_value(tf))
+			*tf = 0.0;
+		    matrix[l][last_row][c] = *tf;
+		    tf++;
+		}
+		G_free(rowbuf);
+	    }
+	}
+	r += 1;
+    }
+
+    /*write the last rows of the output map */
+    for (c = 1; c <= cellhd.cols; c++)
+	G_set_d_null_value(output_cell, c);
+    for (r = 0; r < borderR; r++)
+	G_put_d_raster_row(fdout, output_cell);
+
+    G_close_cell(fdout);
+    return 0;
+}
+
+int extract_array_with_null(int R, int C, int c, int bc, double **mat,
+			    double *wind_vect)
+{
+    int i, j;
+    double sum;
+    int index;
+
+    sum = 0.0;
+    index = 0;
+    for (i = 0; i < R; i++) {
+	for (j = 0; j < C; j++) {
+	    wind_vect[index] = mat[i][c - bc + j];
+	    sum += wind_vect[index++];
+	}
+    }
+    if (sum == 0.0) {
+	return 1;
+    }
+    else {
+	return 0;
+    }
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_features/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_features/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_features/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,329 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+int main(int argc, char **argv)
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Option *opt4;
+    struct Option *opt5;
+    struct Option *opt6;
+    struct Option *opt7;
+    struct Option *opt8;
+
+    struct Flag *flag_s;
+    char *training_file[TRAINING_MAX_INPUTFILES];
+    int ntraining_file;
+    int i, j;
+    Features features;
+    char *tmpbuf;
+    int nclasses_for_pca;
+    char tempbuf[500];
+    char opt1desc[500];
+    FILE *fp;
+
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to process training data for feature extration. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    sprintf(opt1desc,
+	    "Input files (max %d) containing training data.\n\t\t2 formats are currently supported:\n\t\t1) GRASS_data (output of i.pr_training)\n\t\t2) TABLE_data.",
+	    TRAINING_MAX_INPUTFILES);
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "training";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description = opt1desc;
+    opt1->multiple = YES;
+
+    opt2 = G_define_option();
+    opt2->key = "features";
+    opt2->type = TYPE_STRING;
+    opt2->required = YES;
+    opt2->description = "Name of the output file containing the features.";
+
+    opt4 = G_define_option();
+    opt4->key = "normalize";
+    opt4->type = TYPE_INTEGER;
+    opt4->required = NO;
+    opt4->description = "Numbers of the layers to be normalized.";
+    opt4->multiple = YES;
+
+    opt6 = G_define_option();
+    opt6->key = "mean";
+    opt6->type = TYPE_INTEGER;
+    opt6->required = NO;
+    opt6->description =
+	"Numbers of the layers on which to compute the mean value.";
+    opt6->multiple = YES;
+
+    opt7 = G_define_option();
+    opt7->key = "variance";
+    opt7->type = TYPE_INTEGER;
+    opt7->required = NO;
+    opt7->description =
+	"Numbers of the layers on which to compute the variance.";
+    opt7->multiple = YES;
+
+    opt8 = G_define_option();
+    opt8->key = "prin_comp";
+    opt8->type = TYPE_INTEGER;
+    opt8->required = NO;
+    opt8->description =
+	"Numbers of the layers on which to compute the principal components.";
+    opt8->multiple = YES;
+
+    opt3 = G_define_option();
+    opt3->key = "class_pc";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = NO;
+    opt3->description =
+	"Classes of the data to be used for computing the principal components.\n\t\tIf not set, all the examples will be used.";
+    opt3->multiple = YES;
+
+    opt5 = G_define_option();
+    opt5->key = "standardize";
+    opt5->type = TYPE_INTEGER;
+    opt5->required = NO;
+    opt5->description =
+	"Numbers of features to be standardize.\n\t\tWARNING: not related to the number of layers.";
+    opt5->multiple = YES;
+
+
+    flag_s = G_define_flag();
+    flag_s->key = 's';
+    flag_s->description =
+	"Print site file containing coordinates of examples and class labels\n\t(from training data) into features option and exit.";
+
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+    /*read input training files */
+    ntraining_file = 0;
+    for (i = 0; (training_file[ntraining_file] = opt1->answers[i]); i++) {
+	ntraining_file += 1;
+	if (ntraining_file > TRAINING_MAX_INPUTFILES) {
+	    sprintf(tempbuf, "Maximum nomber of allowed training files is %d",
+		    TRAINING_MAX_INPUTFILES);
+	    G_fatal_error(tempbuf);
+	}
+    }
+
+
+    /*fill training structure */
+    inizialize_training(&(features.training));
+    for (i = 0; i < ntraining_file; i++) {
+	read_training(training_file[i], &(features.training));
+    }
+
+    if (flag_s->answer) {
+	if ((fp = fopen(opt2->answer, "w")) == NULL) {
+	    sprintf(tempbuf, "Can't open file %s for writing", opt2->answer);
+	    G_fatal_error(tempbuf);
+	}
+	for (i = 0; i < features.training.nexamples; i++) {
+	    fprintf(fp, "%f|%f|#%d\n", features.training.east[i],
+		    features.training.north[i], features.training.class[i]);
+	}
+	fclose(fp);
+	return 0;
+    }
+
+    /*which action to do on the data */
+    features.f_normalize =
+	(int *)G_calloc(features.training.nlayers + 1, sizeof(int));
+    features.f_standardize =
+	(int *)G_calloc(features.training.nlayers + 1, sizeof(int));
+    features.f_mean =
+	(int *)G_calloc(features.training.nlayers + 1, sizeof(int));
+    features.f_variance =
+	(int *)G_calloc(features.training.nlayers + 1, sizeof(int));
+    features.f_pca =
+	(int *)G_calloc(features.training.nlayers + 1, sizeof(int));
+
+
+    if (opt4->answers) {
+	j = 0;
+	for (i = 0; (tmpbuf = opt4->answers[i]); i++) {
+	    j += 1;
+	}
+	features.f_normalize = (int *)G_calloc(2 + j, sizeof(int));
+	features.f_normalize[0] = 1;
+	features.f_normalize[1] = j;
+	for (i = 2; i < 2 + j; i++) {
+	    sscanf(opt4->answers[i - 2], "%d", &(features.f_normalize[i]));
+	    if ((features.f_normalize[i] <= 0) ||
+		(features.f_normalize[i] > features.training.nlayers)) {
+		sprintf(tempbuf, "nlayers = %d\n", features.training.nlayers);
+		G_fatal_error(tempbuf);
+	    }
+	    features.f_normalize[i] -= 1;
+	}
+    }
+    else {
+	features.f_normalize = (int *)G_calloc(2, sizeof(int));
+    }
+
+
+
+    if (opt6->answers) {
+	j = 0;
+	for (i = 0; (tmpbuf = opt6->answers[i]); i++) {
+	    j += 1;
+	}
+	features.f_mean = (int *)G_calloc(2 + j, sizeof(int));
+	features.f_mean[0] = 1;
+	features.f_mean[1] = j;
+	for (i = 2; i < 2 + j; i++) {
+	    sscanf(opt6->answers[i - 2], "%d", &(features.f_mean[i]));
+	    if ((features.f_mean[i] <= 0) ||
+		(features.f_mean[i] > features.training.nlayers)) {
+		sprintf(tempbuf, "nlayers = %d\n", features.training.nlayers);
+		G_fatal_error(tempbuf);
+	    }
+	    features.f_mean[i] -= 1;
+	}
+    }
+    else {
+	features.f_mean = (int *)G_calloc(2, sizeof(int));
+    }
+
+
+
+    if (opt7->answers) {
+	j = 0;
+	for (i = 0; (tmpbuf = opt7->answers[i]); i++) {
+	    j += 1;
+	}
+	features.f_variance = (int *)G_calloc(2 + j, sizeof(int));
+	features.f_variance[0] = 1;
+	features.f_variance[1] = j;
+	for (i = 2; i < 2 + j; i++) {
+	    sscanf(opt7->answers[i - 2], "%d", &(features.f_variance[i]));
+	    if ((features.f_variance[i] <= 0) ||
+		(features.f_variance[i] > features.training.nlayers)) {
+		sprintf(tempbuf, "nlayers = %d\n", features.training.nlayers);
+		G_fatal_error(tempbuf);
+	    }
+	    features.f_variance[i] -= 1;
+	}
+    }
+    else {
+	features.f_variance = (int *)G_calloc(2, sizeof(int));
+    }
+
+
+
+    if (opt8->answers) {
+	j = 0;
+	for (i = 0; (tmpbuf = opt8->answers[i]); i++) {
+	    j += 1;
+	}
+	features.f_pca = (int *)G_calloc(2 + j, sizeof(int));
+	features.f_pca[0] = 1;
+	features.f_pca[1] = j;
+	for (i = 2; i < 2 + j; i++) {
+	    sscanf(opt8->answers[i - 2], "%d", &(features.f_pca[i]));
+	    if ((features.f_pca[i] <= 0) ||
+		(features.f_pca[i] > features.training.nlayers)) {
+		sprintf(tempbuf, "nlayers = %d\n", features.training.nlayers);
+		G_fatal_error(tempbuf);
+	    }
+	    features.f_pca[i] -= 1;
+	}
+    }
+    else {
+	features.f_pca = (int *)G_calloc(2, sizeof(int));
+    }
+
+
+
+    if (features.f_pca[0]) {
+	if (opt3->answers) {
+	    nclasses_for_pca = 0;
+	    for (i = 0; (tmpbuf = opt3->answers[i]); i++) {
+		nclasses_for_pca += 1;
+	    }
+	    features.pca_class =
+		(int *)G_calloc(2 + nclasses_for_pca, sizeof(int));
+	    features.pca_class[0] = 1;
+	    features.pca_class[1] = nclasses_for_pca;
+	    for (i = 2; i < 2 + nclasses_for_pca; i++) {
+		sscanf(opt3->answers[i - 2], "%d", &(features.pca_class[i]));
+	    }
+	}
+	else {
+	    features.pca_class = (int *)G_calloc(2, sizeof(int));
+	}
+    }
+
+    if (opt5->answers) {
+	j = 0;
+	for (i = 0; (tmpbuf = opt5->answers[i]); i++) {
+	    j += 1;
+	}
+	features.f_standardize = (int *)G_calloc(2 + j, sizeof(int));
+	features.f_standardize[0] = 1;
+	features.f_standardize[1] = j;
+	for (i = 2; i < 2 + j; i++) {
+	    sscanf(opt5->answers[i - 2], "%d", &(features.f_standardize[i]));
+	    features.f_standardize[i] -= 1;
+	}
+    }
+    else {
+	features.f_standardize = (int *)G_calloc(2, sizeof(int));
+    }
+
+
+
+    /*fill features structure */
+    compute_features(&features);
+
+
+    /*standardize features */
+    if (features.f_standardize[0]) {
+	standardize_features(&features);
+    }
+
+    /*write features */
+    write_features(opt2->answer, &features);
+
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_features_additional/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_features_additional/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_features_additional/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,378 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+void generate_features();
+
+int main(int argc, char **argv)
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+
+    char *training_file[TRAINING_MAX_INPUTFILES];
+    int ntraining_file;
+    int i;
+    Features features, features_out;
+    char tempbuf[500];
+    char opt1desc[500];
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to add new features to existing feature in i.pr.* modules. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    sprintf(opt1desc,
+	    "Input files (max %d) containing training data.\n\t\t2 formats are currently supported:\n\t\t1) GRASS_data (output of i.pr_training)\n\t\t2) TABLE_data.",
+	    TRAINING_MAX_INPUTFILES);
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "training";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description = opt1desc;
+    opt1->multiple = YES;
+
+    opt2 = G_define_option();
+    opt2->key = "features";
+    opt2->type = TYPE_STRING;
+    opt2->required = YES;
+    opt2->description = "Name of the input file containing the features.";
+
+
+    opt3 = G_define_option();
+    opt3->key = "features_out";
+    opt3->type = TYPE_STRING;
+    opt3->required = YES;
+    opt3->description =
+	"Name of the output file containing the resulting features.";
+
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+    /*read input training files */
+    ntraining_file = 0;
+    for (i = 0; (training_file[ntraining_file] = opt1->answers[i]); i++) {
+	ntraining_file += 1;
+	if (ntraining_file > TRAINING_MAX_INPUTFILES) {
+	    sprintf(tempbuf, "Maximum nomber of allowed training files is %d",
+		    TRAINING_MAX_INPUTFILES);
+	    G_fatal_error(tempbuf);
+	}
+    }
+
+
+    /*fill training structure */
+    inizialize_training(&(features_out.training));
+    for (i = 0; i < ntraining_file; i++) {
+	read_training(training_file[i], &(features_out.training));
+    }
+
+    /*read features */
+    read_features(opt2->answer, &features, -1);
+
+    /*which action to do on the data */
+    features_out.f_normalize = features.f_normalize;
+    features_out.f_standardize = features.f_standardize;
+    features_out.f_mean = features.f_mean;
+    features_out.f_variance = features.f_variance;
+    features_out.f_pca = features.f_pca;
+    features_out.f_standardize = features.f_standardize;
+
+    if (features_out.f_pca[0]) {
+	features_out.pca = features.pca;
+	features_out.pca_class = (int *)G_calloc(1, sizeof(int));
+    }
+
+
+    /*fill features structure */
+    generate_features(&features, &features_out);
+
+    /*standardize features */
+    if (features_out.f_standardize[0]) {
+	features_out.mean = features.mean;
+	features_out.sd = features.sd;
+	standardize_features(&features_out);
+    }
+
+    /*write features */
+    write_features(opt3->answer, &features_out);
+
+    return 0;
+}
+
+void generate_features(Features * features, Features * features_out)
+{
+    int i, j, k, l;
+    char *mapset;
+    int fp;
+    struct Cell_head cellhd;
+    DCELL *rowbuf;
+    DCELL *tf;
+    DCELL **matrix;
+    int r, c;
+    char tempbuf[500];
+    int *compute_features;
+    int dim;
+    DCELL *mean = NULL, *sd = NULL;
+    int corrent_feature;
+    int *space_for_each_layer;
+    DCELL *projected = NULL;
+    int addclass;
+
+    compute_features =
+	(int *)G_calloc(features_out->training.nlayers, sizeof(int));
+
+    features_out->nexamples = features_out->training.nexamples;
+    dim = features_out->training.rows * features_out->training.cols;
+
+    /*compute space */
+    space_for_each_layer = (int *)G_calloc(features_out->training.nlayers,
+					   sizeof(int));
+    features_out->examples_dim = 0;
+    for (j = 0; j < features_out->training.nlayers; j++) {
+	if (features_out->f_mean[0]) {
+	    for (i = 2; i < 2 + features_out->f_mean[1]; i++) {
+		if (features_out->f_mean[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features_out->f_variance[0]) {
+	    for (i = 2; i < 2 + features_out->f_variance[1]; i++) {
+		if (features_out->f_variance[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features_out->f_pca[0]) {
+	    for (i = 2; i < 2 + features_out->f_pca[1]; i++) {
+		if (features_out->f_pca[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += dim;
+		}
+	    }
+	}
+	if (space_for_each_layer[j] == 0) {
+	    space_for_each_layer[j] = dim;
+	}
+	features_out->examples_dim += space_for_each_layer[j];
+    }
+
+    /*alloc memory */
+    features_out->value =
+	(double **)G_calloc(features_out->nexamples, sizeof(double *));
+    for (i = 0; i < features_out->nexamples; i++) {
+	features_out->value[i] =
+	    (double *)G_calloc(features_out->examples_dim, sizeof(double));
+    }
+    features_out->class =
+	(int *)G_calloc(features_out->nexamples, sizeof(int));
+
+    matrix = (double **)G_calloc(features_out->nexamples, sizeof(double *));
+    for (i = 0; i < features_out->nexamples; i++) {
+	matrix[i] = (double *)G_calloc(dim, sizeof(double));
+    }
+
+    mean = (double *)G_calloc(features_out->nexamples, sizeof(double));
+    sd = (double *)G_calloc(features_out->nexamples, sizeof(double));
+
+
+    /*copy classes */
+    for (i = 0; i < features_out->nexamples; i++) {
+	features_out->class[i] = features_out->training.class[i];
+    }
+
+    /*compute p_classes */
+    features_out->p_classes = (int *)G_calloc(1, sizeof(int));
+    features_out->nclasses = 1;
+    features_out->p_classes[0] = features_out->class[0];
+    for (i = 1; i < features_out->nexamples; i++) {
+	addclass = TRUE;
+	for (j = 0; j < features_out->nclasses; j++) {
+	    if (features_out->class[i] == features_out->p_classes[j]) {
+		addclass = FALSE;
+	    }
+	}
+	if (addclass) {
+	    features_out->nclasses += 1;
+	    features_out->p_classes =
+		(int *)G_realloc(features_out->p_classes,
+				 features_out->nclasses * sizeof(int));
+	    features_out->p_classes[features_out->nclasses - 1] =
+		features_out->class[i];
+	}
+    }
+
+    if (features_out->f_pca[0]) {
+	projected = (double *)G_calloc(dim, sizeof(double));
+    }
+
+    corrent_feature = 0;
+    for (j = 0; j < features_out->training.nlayers; j++) {
+	for (i = 0; i < features_out->nexamples; i++) {
+	    switch (features_out->training.data_type) {
+	    case GRASS_data:
+		if ((mapset =
+		     G_find_cell(features_out->training.mapnames[i][j],
+				 "")) == NULL) {
+		    sprintf(tempbuf,
+			    "generate_features-> Can't find raster map <%s>",
+			    features_out->training.mapnames[i][j]);
+		    G_fatal_error(tempbuf);
+		}
+		if ((fp =
+		     G_open_cell_old(features_out->training.mapnames[i][j],
+				     mapset)) < 0) {
+		    sprintf(tempbuf,
+			    "generate_features-> Can't open raster map <%s> for reading",
+			    features_out->training.mapnames[i][j]);
+		    G_fatal_error(tempbuf);
+		}
+
+		G_get_cellhd(features_out->training.mapnames[i][j], mapset,
+			     &cellhd);
+		G_set_window(&cellhd);
+		if ((cellhd.rows != features_out->training.rows) ||
+		    (cellhd.cols != features_out->training.cols)) {
+		    /*      fprintf(stderr,"map number = %d\n",i); */
+		    sprintf(tempbuf, "generate_features-> Dimension Error");
+		    G_fatal_error(tempbuf);
+		}
+		rowbuf = (DCELL *) G_calloc(dim, sizeof(DCELL));
+		tf = rowbuf;
+
+
+		for (r = 0; r < features_out->training.rows; r++) {
+		    G_get_d_raster_row(fp, tf, r);
+		    for (c = 0; c < features_out->training.cols; c++) {
+			if (G_is_d_null_value(tf))
+			    *tf = 0.0;
+			matrix[i][c + (r * features_out->training.cols)] =
+			    *tf;
+			tf++;
+		    }
+		}
+		G_free(rowbuf);
+
+		G_close_cell(fp);
+		break;
+	    case TABLE_data:
+		matrix[i] = features_out->training.data[i];
+		break;
+	    default:
+		sprintf(tempbuf, "generate_features-> Format not recognized");
+		G_fatal_error(tempbuf);
+		break;
+	    }
+	}
+
+	for (k = 0; k < features_out->nexamples; k++) {
+	    mean[k] = sd[k] = 0.0;
+	}
+	mean_and_sd_of_double_matrix_by_row(matrix, features_out->nexamples,
+					    dim, mean, sd);
+
+	if (features_out->f_normalize[0]) {
+	    for (i = 2; i < 2 + features_out->f_normalize[1]; i++) {
+		if (features_out->f_normalize[i] == j) {
+		    for (k = 0; k < features_out->nexamples; k++) {
+			for (r = 0; r < dim; r++) {
+			    matrix[k][r] = (matrix[k][r] - mean[k]) / sd[k];
+			}
+		    }
+		}
+	    }
+	}
+
+	if (!compute_features[j]) {
+	    for (i = 0; i < features_out->nexamples; i++) {
+		for (r = 0; r < dim; r++) {
+		    features_out->value[i][corrent_feature + r] =
+			matrix[i][r];
+		}
+	    }
+	    corrent_feature += dim;
+	}
+	else {
+	    if (features_out->f_mean[0]) {
+		for (i = 2; i < 2 + features_out->f_mean[1]; i++) {
+		    if (features_out->f_mean[i] == j) {
+			for (k = 0; k < features_out->nexamples; k++) {
+			    features_out->value[k][corrent_feature] = mean[k];
+			}
+			corrent_feature += 1;
+		    }
+		}
+	    }
+
+	    if (features_out->f_variance[0]) {
+		for (i = 2; i < 2 + features_out->f_variance[1]; i++) {
+		    if (features_out->f_variance[i] == j) {
+			for (k = 0; k < features_out->nexamples; k++) {
+			    features_out->value[k][corrent_feature] =
+				sd[k] * sd[k];
+			}
+			corrent_feature += 1;
+		    }
+		}
+	    }
+
+
+	    if (features_out->f_pca[0]) {
+		for (i = 2; i < 2 + features_out->f_pca[1]; i++) {
+		    if (features_out->f_pca[i] == j) {
+			for (l = 0; l < features_out->nexamples; l++) {
+			    product_double_vector_double_matrix
+				(features_out->pca[j].eigmat, matrix[l], dim,
+				 dim, projected);
+			    for (r = 0; r < dim; r++) {
+				features_out->value[l][corrent_feature + r] =
+				    projected[r];
+			    }
+			}
+			corrent_feature += dim;
+		    }
+		}
+	    }
+	}
+    }
+
+
+    G_free(mean);
+    G_free(sd);
+    G_free(compute_features);
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_features_extract/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_features_extract/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_features_extract/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,180 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+#define MAXLIMITS 20
+
+int read_selection();
+
+int main(int argc, char **argv)
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Option *opt4;
+
+    int i, j, k;
+    Features features;
+
+    char features_out_name[500];
+    int limits[MAXLIMITS];
+    int *selection;
+    char *tmpbuf;
+    int nselection;
+    int nlimits;
+    double **copydata;
+    int col;
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module for feature extraction. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "features";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description =
+	"Input file containing the features (output of i.pr_features).";
+
+    opt2 = G_define_option();
+    opt2->key = "selected";
+    opt2->type = TYPE_STRING;
+    opt2->required = YES;
+    opt2->description =
+	"File containing the results of the features selection procedure\n\t\t(output of i.pr_features_selection).";
+
+
+    opt3 = G_define_option();
+    opt3->key = "nvar";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = YES;
+    opt3->multiple = YES;
+    opt3->description = "Number of reordered variables to be extracted.";
+
+
+    opt4 = G_define_option();
+    opt4->key = "output";
+    opt4->type = TYPE_STRING;
+    opt4->required = NO;
+    opt4->description =
+	"Optionally, creates output features files with output option as root\n\t\t(insteed of the features option).";
+
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+    nlimits = 0;
+    /*get limits */
+    if (opt3->answers) {
+	j = 0;
+	for (i = 0; (tmpbuf = opt3->answers[i]); i++, nlimits++) {
+	    if (i == MAXLIMITS)
+		break;
+	    sscanf(tmpbuf, "%d", &(limits[i]));
+	    j += 1;
+	}
+    }
+
+    /*read features */
+    read_features(opt1->answer, &features, -1);
+
+    /*copy data */
+    copydata = (double **)G_calloc(features.nexamples, sizeof(double *));
+    for (i = 0; i < features.nexamples; i++)
+	copydata[i] =
+	    (double *)G_calloc(features.examples_dim, sizeof(double));
+
+    for (i = 0; i < features.nexamples; i++)
+	for (j = 0; j < features.examples_dim; j++)
+	    copydata[i][j] = features.value[i][j];
+
+    /*read relative importance file */
+    nselection = read_selection(opt2->answer, &selection);
+
+    if (nselection != features.examples_dim) {
+	fprintf(stderr,
+		"WARNING: number of features (=%d) different from length of relative importance file (=%d)\n",
+		features.examples_dim, nselection);
+    }
+
+    /*build files */
+    for (i = 0; i < nlimits; i++) {
+	features.training.file = "generated by i.pr_features_extract";
+	features.training.cols = limits[i];
+	features.examples_dim = limits[i];
+
+	col = 0;
+	for (j = 0; j < limits[i]; j++) {
+	    for (k = 0; k < features.nexamples; k++)
+		features.value[k][col] = copydata[k][selection[j] - 1];
+	    col++;
+	}
+
+	/*write features */
+	if (opt4->answer == NULL)
+	    sprintf(features_out_name, "%s_fsExtr_%d", opt1->answer,
+		    limits[i]);
+	else
+	    sprintf(features_out_name, "%s_fsExtr_%d", opt4->answer,
+		    limits[i]);
+
+	write_features(features_out_name, &features);
+    }
+
+    return 0;
+}
+
+int read_selection(char *file, int **selection)
+{
+    FILE *fp;
+    char tmpbuf[500];
+    char *line = NULL;
+    int index = 0;
+
+    if ((fp = fopen(file, "r")) == NULL) {
+	sprintf(tmpbuf, "Error opening file %s for reading", file);
+	G_fatal_error(tmpbuf);
+    }
+
+    *selection = (int *)G_calloc(1, sizeof(int));
+    while ((line = GetLine(fp)) != NULL) {
+	line = (char *)strchr(line, '\t');
+	sscanf(line, "%d", &((*selection)[index]));
+	index++;
+	*selection = G_realloc(*selection, (index + 1) * sizeof(int));
+    }
+
+    return index;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_features_selection/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_features_selection/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_features_selection/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,618 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+int main(int argc, char **argv)
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Option *opt6;
+    struct Option *opt7;
+    struct Option *opt8;
+    struct Option *opt9;
+    struct Option *opt10;
+    struct Option *opt12;
+    struct Option *opt18;
+    struct Option *opt23;
+    struct Option *opt24;
+    struct Option *opt22;
+    struct Flag *flag_w;
+
+    Features features;
+    SupportVectorMachine *svm_models;
+    SupportVectorMachine svm;
+
+    char tmpbuf[500];
+    char svm_kernel_type[100];
+    char fs_type_string[100];
+    double svm_kp, svm_C, svm_tol, svm_eps;
+    int svm_maxloops;
+    int svm_kernel;
+    double svm_cost;
+    double *svm_W;
+    int i, j, k, t;
+    int fs_type, fs_rfe, neliminati;
+    FILE *fp_fs_out;
+    FILE *fp_fs_w, *fp_fs_stats;
+    char file_w[200], file_stats[200];
+    int *selected, *names, *posizwsquare, *vareliminate;
+    int rimanenti;
+    int ncicli;
+    double **H_tot, **H_tmp, *valoriDJ, *wsquarenuovo, *vareliminatedouble;
+    int svm_verbose;
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module for feature selection. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "features";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description =
+	"Input file containing the features (output of i.pr_features).";
+
+    opt2 = G_define_option();
+    opt2->key = "output";
+    opt2->type = TYPE_STRING;
+    opt2->required = YES;
+    opt2->description =
+	"Name of the output file containing the selected features.";
+
+    opt3 = G_define_option();
+    opt3->key = "npc";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = NO;
+    opt3->description =
+	"Number of the principal components to be used for the model development.\n\t\t\tIf not set all the principal components will be used.\n\t\t\tIgnored if features does not contain principal components model.";
+
+    opt6 = G_define_option();
+    opt6->key = "svm_kernel";
+    opt6->type = TYPE_STRING;
+    opt6->required = NO;
+    opt6->description = "For svm: type of employed kernel.";
+    opt6->answer = "linear";
+    opt6->options = "gaussian,linear";
+
+    opt7 = G_define_option();
+    opt7->key = "svm_kp";
+    opt7->type = TYPE_DOUBLE;
+    opt7->required = NO;
+    opt7->description =
+	"For svm: kernel parameter  (Required parameter if you are using gaussian kernel).";
+
+    opt8 = G_define_option();
+    opt8->key = "svm_C";
+    opt8->type = TYPE_DOUBLE;
+    opt8->required = NO;
+    opt8->description =
+	"For svm: optimization parameter (Required parameter).";
+
+    opt18 = G_define_option();
+    opt18->key = "svm_cost";
+    opt18->type = TYPE_DOUBLE;
+    opt18->required = NO;
+    opt18->description =
+	"Cost parameter for the implementation of cost-sensitive procedure(w in [-1,1]).\n\t\t\tw>0 results higher weight on examples of class 1.\n\t\t\tw<0 results higher weight on examples of class -1.\n\t\t\tw=0 corresponds to standard SVM.";
+    opt18->answer = "0.0";
+
+
+    opt23 = G_define_option();
+    opt23->key = "fs_type";
+    opt23->type = TYPE_STRING;
+    opt23->required = YES;
+    opt23->description = "Feature selection method.";
+    opt23->options = "rfe,e_rfe,1_rfe,sqrt_rfe";
+
+    opt24 = G_define_option();
+    opt24->key = "fs_rfe";
+    opt24->type = TYPE_INTEGER;
+    opt24->required = NO;
+    opt24->description =
+	"If you are using the e_rfe method, you have to choose the number of feartures\n\t\t\tfor the classical rfe method to start (fs_rfe>1).";
+
+    opt9 = G_define_option();
+    opt9->key = "svm_tol";
+    opt9->type = TYPE_DOUBLE;
+    opt9->required = NO;
+    opt9->description = "For svm: tollerance parameter.";
+    opt9->answer = "0.001";
+
+    opt10 = G_define_option();
+    opt10->key = "svm_eps";
+    opt10->type = TYPE_DOUBLE;
+    opt10->required = NO;
+    opt10->description = "For svm: epsilon.";
+    opt10->answer = "0.001";
+
+    opt12 = G_define_option();
+    opt12->key = "svm_maxloops";
+    opt12->type = TYPE_INTEGER;
+    opt12->required = NO;
+    opt12->description = "For svm: maximum number of optimization steps.";
+    opt12->answer = "1000";
+
+    opt22 = G_define_option();
+    opt22->key = "svm_verbose";
+    opt22->type = TYPE_INTEGER;
+    opt22->required = NO;
+    opt22->description =
+	"For svm: if it is set to 1 the number of loops will be printed.";
+    opt22->options = "0,1";
+    opt22->answer = "0";
+
+
+    flag_w = G_define_flag();
+    flag_w->key = 'w';
+    flag_w->description = "Produce file containing weights at each step.";
+
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+    /*
+       read number of pc 
+       (May be we do not use this parameter in the next future)
+     */
+
+    if (opt3->answer) {
+	sscanf(opt3->answer, "%d", &(features.npc));
+    }
+    else {
+	features.npc = -1;
+    }
+
+    /*read SVM parameters */
+
+    sscanf(opt6->answer, "%s", svm_kernel_type);
+    sscanf(opt18->answer, "%lf", &svm_cost);
+
+    if (strcmp(svm_kernel_type, "linear") == 0) {
+	svm_kernel = SVM_KERNEL_LINEAR;
+    }
+    else if (strcmp(svm_kernel_type, "gaussian") == 0) {
+	svm_kernel = SVM_KERNEL_GAUSSIAN;
+    }
+    else {
+	sprintf(tmpbuf, "kernel type not implemended!\n");
+	G_fatal_error(tmpbuf);
+    }
+    if (svm_kernel == SVM_KERNEL_GAUSSIAN) {
+	if (!opt7->answer) {
+	    sprintf(tmpbuf, "Please set kernel parameter\n");
+	    G_fatal_error(tmpbuf);
+	}
+	else {
+	    sscanf(opt7->answer, "%lf", &svm_kp);
+	    if (svm_kp <= 0) {
+		sprintf(tmpbuf, "kernel parameter must be > 0\n");
+		G_fatal_error(tmpbuf);
+	    }
+	}
+    }
+    else
+	svm_kp = 0.0;
+
+    if (!opt8->answer) {
+	sprintf(tmpbuf, "Please set optimization parameter\n");
+	G_fatal_error(tmpbuf);
+    }
+    else {
+	sscanf(opt8->answer, "%lf", &svm_C);
+	if (svm_C <= 0) {
+	    sprintf(tmpbuf, "optimization parameter must be > 0\n");
+	    G_fatal_error(tmpbuf);
+	}
+    }
+    sscanf(opt9->answer, "%lf", &svm_tol);
+    if (svm_tol <= 0) {
+	sprintf(tmpbuf, "tol must be > 0\n");
+	G_fatal_error(tmpbuf);
+    }
+    sscanf(opt10->answer, "%lf", &svm_eps);
+    if (svm_eps <= 0) {
+	sprintf(tmpbuf, "eps must be > 0\n");
+	G_fatal_error(tmpbuf);
+    }
+    sscanf(opt12->answer, "%d", &svm_maxloops);
+    if (svm_maxloops <= 0) {
+	sprintf(tmpbuf, "maximum number of loops must be > 0\n");
+	G_fatal_error(tmpbuf);
+    }
+    sscanf(opt22->answer, "%d", &svm_verbose);
+    /* read features selection parameters (PLease check consistence!!!!) */
+
+    sscanf(opt23->answer, "%s", fs_type_string);
+    if (strcmp(fs_type_string, "rfe") == 0)
+	fs_type = FS_RFE;
+    else if (strcmp(fs_type_string, "e_rfe") == 0)
+	fs_type = FS_E_RFE;
+    else if (strcmp(fs_type_string, "1_rfe") == 0)
+	fs_type = FS_ONE_RFE;
+    else if (strcmp(fs_type_string, "sqrt_rfe") == 0)
+	fs_type = FS_SQRT_RFE;
+    else {
+	sprintf(tmpbuf, "features selection method not recognized!\n");
+	G_fatal_error(tmpbuf);
+    }
+
+    if (fs_type == FS_E_RFE) {
+	if (!opt24->answer) {
+	    sprintf(tmpbuf,
+		    "You selected e_rfe: please set fs_rfe parameter!\n");
+	    G_fatal_error(tmpbuf);
+	}
+	else
+	    sscanf(opt24->answer, "%d", &fs_rfe);
+
+	if (fs_rfe <= 1) {
+	    sprintf(tmpbuf, "fs_rfe must be > 1\n");
+	    G_fatal_error(tmpbuf);
+	}
+    }
+
+    /*output files */
+
+    fp_fs_out = fopen(opt2->answer, "w");
+
+    if (fp_fs_out == NULL) {
+	fprintf(stderr, "Error opening file %s for writing\n", opt2->answer);
+	exit(0);
+    }
+
+    if (flag_w->answer) {
+	sprintf(file_w, "%s_w", opt2->answer);
+	fp_fs_w = fopen(file_w, "w");
+
+	if (fp_fs_w == NULL) {
+	    fprintf(stderr, "Error opening file %s for writing\n", file_w);
+	    exit(0);
+	}
+    }
+    else
+	fp_fs_w = NULL;
+
+    if (fs_type == FS_E_RFE) {
+	sprintf(file_stats, "%s_stats", opt2->answer);
+	fp_fs_stats = fopen(file_stats, "w");
+
+	if (fp_fs_stats == NULL) {
+	    fprintf(stderr, "Error opening file %s for writing\n",
+		    file_stats);
+	    exit(0);
+	}
+    }
+    else
+	fp_fs_stats = NULL;
+
+
+    /*read features */
+
+    read_features(opt1->answer, &features, features.npc);
+    if (features.nclasses == 2) {
+	if ((features.p_classes[0] * features.p_classes[1]) != -1) {
+	    fprintf(stderr, "class %d interpreted as class -1\n",
+		    features.p_classes[0]);
+	    fprintf(stderr, "class %d interpreted as class 1\n",
+		    features.p_classes[1]);
+
+	    for (i = 0; i < features.nexamples; i++) {
+		if (features.class[i] == features.p_classes[0]) {
+		    features.class[i] = -1;
+		}
+		else {
+		    features.class[i] = 1;
+		}
+	    }
+	    features.p_classes[0] = -1;
+	    features.p_classes[1] = 1;
+	}
+    }
+    if (features.nclasses != 2) {
+	sprintf(tmpbuf, "svm works only with 2 class problems\n");
+	G_fatal_error(tmpbuf);
+    }
+
+    /*set svm parameters */
+    svm_W = (double *)G_calloc(features.nexamples, sizeof(double));
+    for (i = 0; i < features.nexamples; i++) {
+	svm_W[i] = 1.;
+	if (svm_cost > 0) {
+	    if (features.class[i] < 0)
+		svm_W[i] = 1. - svm_cost;
+	}
+	else if (svm_cost < 0) {
+	    if (features.class[i] > 0)
+		svm_W[i] = 1. + svm_cost;
+	}
+    }
+
+    /*set features selection variables */
+    svm_models = (SupportVectorMachine *) G_calloc(features.examples_dim - 1,
+						   sizeof
+						   (SupportVectorMachine));
+
+    names = (int *)G_calloc(features.examples_dim, sizeof(int));
+    selected = (int *)G_calloc(features.examples_dim, sizeof(int));
+
+    for (j = 0; j < features.examples_dim; j++) {
+	names[j] = j + 1;
+    }
+
+    /*WORK!!!! */
+    if (svm_kernel == SVM_KERNEL_LINEAR) {
+	 /*LINEAR*/ switch (fs_type) {
+	case FS_ONE_RFE:
+	    /*Non ricalcola linear */
+	    compute_svm(&svm, features.nexamples,
+			features.examples_dim, features.value,
+			features.class, svm_kernel, svm_kp,
+			svm_C, svm_tol, svm_eps, svm_maxloops, svm_verbose,
+			svm_W);
+	    one_rfe_lin(&svm, names, selected, fp_fs_w);
+	    free_svm(&svm);
+	    break;
+	case FS_RFE:
+	    /*RFE linear */
+	    for (i = 0; i < (features.examples_dim - 1); i++) {
+		compute_svm(&(svm_models[i]), features.nexamples,
+			    (features.examples_dim - i), features.value,
+			    features.class, svm_kernel, svm_kp,
+			    svm_C, svm_tol, svm_eps, svm_maxloops,
+			    svm_verbose, svm_W);
+
+		rfe_lin(&(svm_models[i]), &features, names, selected, i,
+			fp_fs_w);
+		free_svm(&(svm_models[i]));
+	    }
+	    selected[0] = names[0];
+	    break;
+	case FS_E_RFE:
+	    /*Entropy-based RFE linear */
+	    rimanenti = features.examples_dim;
+
+	    ncicli = 0;
+	    for (i = 0; rimanenti > fs_rfe; i++) {
+		compute_svm(&(svm_models[i]), features.nexamples,
+			    rimanenti, features.value,
+			    features.class, svm_kernel, svm_kp,
+			    svm_C, svm_tol, svm_eps, svm_maxloops,
+			    svm_verbose, svm_W);
+
+		e_rfe_lin(&(svm_models[i]), &features, names,
+			  selected, i, &rimanenti, fp_fs_w, fp_fs_stats);
+		free_svm(&(svm_models[i]));
+		ncicli++;
+	    }
+
+	    for (i = ncicli; rimanenti > 1; i++) {
+		compute_svm(&(svm_models[i]), features.nexamples,
+			    rimanenti, features.value,
+			    features.class, svm_kernel, svm_kp,
+			    svm_C, svm_tol, svm_eps, svm_maxloops,
+			    svm_verbose, svm_W);
+		rfe_lin(&(svm_models[i]), &features, names, selected,
+			features.examples_dim - rimanenti, fp_fs_w);
+		free_svm(&(svm_models[i]));
+		rimanenti--;
+	    }
+	    selected[0] = names[0];
+	    break;
+	case FS_SQRT_RFE:
+	    /* Eliminate sqrt(remaining features) features at a time */
+	    rimanenti = features.examples_dim;
+	    for (i = 0; rimanenti > 1; i++) {
+
+		compute_svm(&(svm_models[i]), features.nexamples,
+			    rimanenti, features.value,
+			    features.class, svm_kernel, svm_kp,
+			    svm_C, svm_tol, svm_eps, svm_maxloops,
+			    svm_verbose, svm_W);
+
+		wsquarenuovo = (double *)G_calloc(rimanenti, sizeof(double));
+
+		for (j = 0; j < rimanenti; j++) {
+		    wsquarenuovo[j] = svm_models[i].w[j] * svm_models[i].w[j];
+		}
+
+		if (fp_fs_w != NULL) {
+		    fprintf(fp_fs_w, "%6.10f", wsquarenuovo[0]);
+		    for (j = 1; j < rimanenti; j++) {
+			fprintf(fp_fs_w, "\t%6.10f", wsquarenuovo[j]);
+		    }
+		    fprintf(fp_fs_w, "\n");
+		}
+
+		posizwsquare = (int *)G_calloc(rimanenti, sizeof(int));
+
+		indexx_1(rimanenti, wsquarenuovo, posizwsquare);
+
+		neliminati = (int)floor(sqrt(rimanenti));
+
+		vareliminate = (int *)G_calloc(neliminati, sizeof(int));
+
+		vareliminatedouble =
+		    (double *)G_calloc(neliminati, sizeof(double));
+
+		for (j = 0; j < neliminati; j++) {
+		    vareliminate[j] = posizwsquare[j];
+		}
+
+		for (j = 0; j < neliminati; j++) {
+		    selected[rimanenti - j - 1] = names[vareliminate[j]];
+		    vareliminatedouble[j] = (double)vareliminate[j];
+		}
+
+		shell(neliminati, vareliminatedouble);
+
+
+		for (j = 0; j < neliminati; j++) {
+		    vareliminate[j] = (int)vareliminatedouble[j];
+		}
+
+		for (j = 0; j < neliminati; j++) {
+		    for (k = vareliminate[j]; k < (rimanenti - 1); k++) {
+			for (t = 0; t < features.nexamples; t++) {
+			    features.value[t][k] = features.value[t][k + 1];
+			}
+			names[k] = names[k + 1];
+		    }
+
+		    for (k = j + 1; k < neliminati; k++) {
+			vareliminate[k]--;
+		    }
+		    rimanenti--;
+		}
+
+		G_free(wsquarenuovo);
+		G_free(posizwsquare);
+		G_free(vareliminate);
+
+	    }
+	    selected[0] = names[0];
+	    break;
+
+	default:
+	    break;
+	}
+    }
+    if (svm_kernel == SVM_KERNEL_GAUSSIAN) {
+
+	H_tot = (double **)G_calloc(features.nexamples, sizeof(double *));
+	for (j = 0; j < features.nexamples; j++) {
+	    H_tot[j] = (double *)G_calloc(features.nexamples, sizeof(double));
+	}
+
+	compute_H(H_tot, features.value, features.class, features.nexamples,
+		  features.examples_dim, svm_kp);
+
+	H_tmp = (double **)G_calloc(features.nexamples, sizeof(double *));
+
+	for (j = 0; j < features.nexamples; j++) {
+	    H_tmp[j] = (double *)G_calloc(features.nexamples, sizeof(double));
+	}
+
+	switch (fs_type) {
+	case FS_ONE_RFE:
+	    /*Non ricalcola gaussian */
+	    compute_svm(&svm, features.nexamples,
+			features.examples_dim, features.value,
+			features.class, svm_kernel, svm_kp,
+			svm_C, svm_tol, svm_eps, svm_maxloops, svm_verbose,
+			svm_W);
+
+	    compute_valoriDJ(&svm, &features, H_tot, H_tmp, &valoriDJ);
+	    one_rfe_gauss(valoriDJ, names, selected, features.examples_dim,
+			  fp_fs_w);
+	    free_svm(&svm);
+	    break;
+	case FS_RFE:
+	    /*RFE gaussian */
+
+	    for (i = 0; i < (features.examples_dim - 1); i++) {
+		compute_svm(&(svm_models[i]), features.nexamples,
+			    (features.examples_dim - i), features.value,
+			    features.class, svm_kernel, svm_kp,
+			    svm_C, svm_tol, svm_eps, svm_maxloops,
+			    svm_verbose, svm_W);
+
+		compute_valoriDJ(&(svm_models[i]), &features, H_tot, H_tmp,
+				 &valoriDJ);
+		rfe_gauss(valoriDJ, &features, names, selected, i, H_tot,
+			  H_tmp, svm_kp, fp_fs_w);
+		G_free(valoriDJ);
+		free_svm(&(svm_models[i]));
+	    }
+	    selected[0] = names[0];
+	    break;
+	case FS_E_RFE:
+	    /*Entropy-based RFE gaussian */
+	    rimanenti = features.examples_dim;
+
+	    ncicli = 0;
+
+	    for (i = 0; rimanenti > fs_rfe; i++) {
+		compute_svm(&(svm_models[i]), features.nexamples,
+			    rimanenti, features.value,
+			    features.class, svm_kernel, svm_kp,
+			    svm_C, svm_tol, svm_eps, svm_maxloops,
+			    svm_verbose, svm_W);
+
+		compute_valoriDJ(&(svm_models[i]), &features, H_tot, H_tmp,
+				 &valoriDJ);
+		e_rfe_gauss(valoriDJ, &features, names, selected, i, H_tot,
+			    H_tmp, &rimanenti, svm_kp, fp_fs_w, fp_fs_stats);
+
+		G_free(valoriDJ);
+		free_svm(&(svm_models[i]));
+		ncicli++;
+	    }
+
+	    for (i = ncicli; rimanenti > 1; i++) {
+		compute_svm(&(svm_models[i]), features.nexamples,
+			    rimanenti, features.value,
+			    features.class, svm_kernel, svm_kp,
+			    svm_C, svm_tol, svm_eps, svm_maxloops,
+			    svm_verbose, svm_W);
+		compute_valoriDJ(&(svm_models[i]), &features, H_tot, H_tmp,
+				 &valoriDJ);
+		rfe_gauss(valoriDJ, &features, names, selected,
+			  features.examples_dim - rimanenti, H_tot, H_tmp,
+			  svm_kp, fp_fs_w);
+		G_free(valoriDJ);
+		free_svm(&(svm_models[i]));
+		rimanenti--;
+	    }
+	    selected[0] = names[0];
+	    break;
+	default:
+	    break;
+	}
+    }
+
+
+    /*print output file containing the order of the relative importance */
+    for (i = 0; i < features.examples_dim; i++) {
+	fprintf(fp_fs_out, "%d\t%d\n", i + 1, selected[i]);
+    }
+    fclose(fp_fs_out);
+    if (fs_type == FS_E_RFE)
+	fclose(fp_fs_stats);
+    if (flag_w->answer)
+	fclose(fp_fs_w);
+
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_model/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_model/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_model/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,1088 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+int main(int argc, char **argv)
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Option *opt4;
+    struct Option *opt5;
+    struct Option *opt6;
+    struct Option *opt7;
+    struct Option *opt8;
+    struct Option *opt9;
+    struct Option *opt10;
+    struct Option *opt11;
+    struct Option *opt12;
+    struct Option *opt13;
+    struct Option *opt14;
+    struct Option *opt15;
+    struct Option *opt16;
+    struct Option *opt17;
+    struct Option *opt18;
+    struct Option *opt19;
+    struct Option *opt20;
+    struct Option *opt21;
+    struct Option *opt22;
+    struct Option *opt23;
+    struct Option *opt24;
+    struct Option *opt25;
+    struct Option *opt26;
+    struct Option *opt27;
+    struct Option *opt28;
+    struct Flag *flag_g;
+    struct Flag *flag_t;
+    struct Flag *flag_s;
+    struct Flag *flag_n;
+
+    Features features;
+    Features test_features;
+    Features validation_features;
+    Tree tree;
+    GaussianMixture gm;
+    NearestNeighbor nn;
+    SupportVectorMachine svm;
+    BTree btree;
+    BSupportVectorMachine bsvm;
+
+    char tmpbuf[500];
+    int tree_stamps, tree_minsize;
+    char svm_kernel_type[100];
+    double svm_kp, svm_C, svm_tol, svm_eps;
+    int svm_maxloops;
+    int svm_kernel;
+    int svm_verbose;
+    double svm_cost;
+    double *svm_W;
+    int bagging, boosting, reg, reg_verbose;
+    double w;
+    int i, j;
+    char outfile[500];
+    char outfile1[500];
+    int svm_l1o;
+    int weights_boosting;
+    double *tree_costs;
+    char *tmp_costs;
+    int soft_margin_boosting;
+    int progressive_error;
+    int parallel_boosting;
+    double *misratio;
+    double misclass_ratio;
+    int testset;
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to generate model from features file. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "features";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description =
+	"Input file containing the features (output of i.pr_features).";
+
+    opt2 = G_define_option();
+    opt2->key = "model";
+    opt2->type = TYPE_STRING;
+    opt2->required = YES;
+    opt2->description = "Name of the output file containing the model.";
+
+    opt26 = G_define_option();
+    opt26->key = "validation";
+    opt26->type = TYPE_STRING;
+    opt26->required = NO;
+    opt26->description =
+	"Input file containing other features for the training\n\t\t\tand for the evaluation of the performances of the model\n\t\t\ton an independent test set (for regularized AdaBoost).";
+
+    opt16 = G_define_option();
+    opt16->key = "test";
+    opt16->type = TYPE_STRING;
+    opt16->required = NO;
+    opt16->description =
+	"Input file containing other features for the evaluation of the\n\t\t\tperformances of the model on an independent test set.";
+
+    opt3 = G_define_option();
+    opt3->key = "npc";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = NO;
+    opt3->description =
+	"Number of the principal components to be used for the model development.\n\t\t\tIf not set all the principal components will be used.\n\t\t\tIgnored if features does not contain principal components model.";
+
+    opt13 = G_define_option();
+    opt13->key = "bagging";
+    opt13->type = TYPE_INTEGER;
+    opt13->required = NO;
+    opt13->description =
+	"Number of bagging models.\n\t\t\tIf bagging = 0 the classical model will be implemented.\n\t\t\tImplemented for trees and svm only.";
+    opt13->answer = "0";
+
+    opt14 = G_define_option();
+    opt14->key = "boosting";
+    opt14->type = TYPE_INTEGER;
+    opt14->required = NO;
+    opt14->description =
+	"Number of boosting models.\n\t\t\tIf boosting = 0 the classical model will be implemented.\n\t\t\tImplemented for trees and svm only.";
+    opt14->answer = "0";
+
+    opt25 = G_define_option();
+    opt25->key = "reg";
+    opt25->type = TYPE_INTEGER;
+    opt25->required = NO;
+    opt25->description = "Number of misclassification ratio intervals.";
+    opt25->answer = "0";
+
+    opt28 = G_define_option();
+    opt28->key = "misclass_ratio";
+    opt28->type = TYPE_DOUBLE;
+    opt28->required = NO;
+    opt28->description =
+	"For regularized AdaBoost: misclassification ratio for\n\t\t\thard point shaving and compute the new model.";
+    opt28->answer = "1.00";
+
+    opt27 = G_define_option();
+    opt27->key = "reg_verbose";
+    opt27->type = TYPE_INTEGER;
+    opt27->required = NO;
+    opt27->description =
+	"For regularized AdaBoost:\n\t\t\t- if it is set = 1 the current value of misclassification\n\t\t\t ratio and the current error will be printed.\n\t\t\t- if it is set to >1 the number of\n\t\t\t loops, accuracy and the current value of misclassification ratio\n\t\t\twill be printed.\n\t\t\t For shaving and compute:\n\t\t\t - if it is set >0 the numbers of samples shaved will be printed.";
+    opt27->answer = "0";
+
+    opt23 = G_define_option();
+    opt23->key = "progressive_error";
+    opt23->type = TYPE_INTEGER;
+    opt23->required = NO;
+    opt23->description =
+	"Progressive estimate of the model error\n\t\t\tincreasing the number of aggregated models";
+    opt23->answer = "0";
+    opt23->options = "0,1";
+
+    opt15 = G_define_option();
+    opt15->key = "cost_boosting";
+    opt15->type = TYPE_DOUBLE;
+    opt15->required = NO;
+    opt15->description =
+	"Cost parameter for the implementation of cost-sensitive procedure(w in [0,2]).\n\t\t\tw>1 results higher weight on examples of class 1.\n\t\t\tw<1 results higher weight on examples of class -1.\n\t\t\tw=1 corresponds to standard Adaboost.";
+    opt15->answer = "1.0";
+
+    opt19 = G_define_option();
+    opt19->key = "weights_boosting";
+    opt19->type = TYPE_INTEGER;
+    opt19->required = NO;
+    opt19->description =
+	"For boosting: if weights_boosting = 1, a file containing the evolution\n\t\t\tof the weights associated to data points will be produced.";
+    opt19->answer = "0";
+    opt19->options = "0,1";
+
+
+
+    opt24 = G_define_option();
+    opt24->key = "parallel_boosting";
+    opt24->type = TYPE_INTEGER;
+    opt24->required = NO;
+    opt24->description =
+	"For boosting: number of true boosting steps for parallel boosting.\n\t\t\tImplemented only for trees!!";
+    opt24->answer = "0";
+
+    opt21 = G_define_option();
+    opt21->key = "soft_margin_boosting";
+    opt21->type = TYPE_INTEGER;
+    opt21->required = NO;
+    opt21->description =
+	"For boosting: if soft_margin_boosting = 1, sof margin of Ababoost\n\t\t\t will bee used. Implemented only with trees. (Sperimental!!!!!!!!!!)";
+    opt21->answer = "0";
+    opt21->options = "0,1";
+
+    opt4 = G_define_option();
+    opt4->key = "tree_stamps";
+    opt4->type = TYPE_INTEGER;
+    opt4->required = NO;
+    opt4->description =
+	"For trees: if tree_stamps = 1, a single split tree will be procuded,\n\t\t\tif tree_stamps = 0, a classical tree will be procuded.";
+    opt4->answer = "0";
+    opt4->options = "0,1";
+
+    opt5 = G_define_option();
+    opt5->key = "tree_minsize";
+    opt5->type = TYPE_INTEGER;
+    opt5->required = NO;
+    opt5->description =
+	"For trees: minimum number of examples containined\n\t\t\tinto a node for splitting the node itself";
+    opt5->answer = "0";
+
+    opt20 = G_define_option();
+    opt20->key = "tree_costs";
+    opt20->type = TYPE_INTEGER;
+    opt20->required = NO;
+    opt20->description = "For trees: misclassification costs for each class";
+    opt20->multiple = YES;
+
+    opt6 = G_define_option();
+    opt6->key = "svm_kernel";
+    opt6->type = TYPE_STRING;
+    opt6->required = NO;
+    opt6->description = "For svm: type of employed kernel.";
+    opt6->answer = "linear";
+    opt6->options = "gaussian,linear,2pbk";
+
+    opt7 = G_define_option();
+    opt7->key = "svm_kp";
+    opt7->type = TYPE_DOUBLE;
+    opt7->required = NO;
+    opt7->description =
+	"For svm: kernel parameter (Required parameter if you are using gaussian kernel).";
+
+    opt8 = G_define_option();
+    opt8->key = "svm_C";
+    opt8->type = TYPE_DOUBLE;
+    opt8->required = NO;
+    opt8->description =
+	"For svm: optimization parameter (Required parameter).";
+
+    opt18 = G_define_option();
+    opt18->key = "svm_cost";
+    opt18->type = TYPE_DOUBLE;
+    opt18->required = NO;
+    opt18->description =
+	"Cost parameter for the implementation of cost-sensitive procedure(w in [-1,1]).\n\t\t\tw>0 results higher weight on examples of class 1.\n\t\t\tw<0 results higher weight on examples of class -1.\n\t\t\tw=0 corresponds to standard SVM.\n\t\t\tNot yet implemented (and may be it will be never implemented)\n\t\t\tfor bagging and boosting";
+    opt18->answer = "0.0";
+
+    opt9 = G_define_option();
+    opt9->key = "svm_tol";
+    opt9->type = TYPE_DOUBLE;
+    opt9->required = NO;
+    opt9->description = "For svm: tollerance parameter.";
+    opt9->answer = "0.001";
+
+    opt10 = G_define_option();
+    opt10->key = "svm_eps";
+    opt10->type = TYPE_DOUBLE;
+    opt10->required = NO;
+    opt10->description = "For svm: epsilon.";
+    opt10->answer = "0.001";
+
+    opt11 = G_define_option();
+    opt11->key = "svm_l1o";
+    opt11->type = TYPE_INTEGER;
+    opt11->required = NO;
+    opt11->description = "For svm: leave 1 out error estimate.";
+    opt11->answer = "0";
+    opt11->options = "0,1";
+
+    opt12 = G_define_option();
+    opt12->key = "svm_maxloops";
+    opt12->type = TYPE_INTEGER;
+    opt12->required = NO;
+    opt12->description = "For svm: maximum number of optimization steps.";
+    opt12->answer = "1000";
+
+    opt22 = G_define_option();
+    opt22->key = "svm_verbose";
+    opt22->type = TYPE_INTEGER;
+    opt22->required = NO;
+    opt22->description =
+	"For svm: if it is set to 1 the number of loops will be printed.";
+    opt22->options = "0,1";
+    opt22->answer = "0";
+
+    opt17 = G_define_option();
+    opt17->key = "nn_k";
+    opt17->type = TYPE_INTEGER;
+    opt17->required = NO;
+    opt17->description =
+	"For nn: Number of neighbor to be considered during the test phase.";
+    opt17->answer = "1";
+
+
+    flag_g = G_define_flag();
+    flag_g->key = 'g';
+    flag_g->description = "selected model: gaussian mixture.";
+
+    flag_t = G_define_flag();
+    flag_t->key = 't';
+    flag_t->description = "selected model: classification trees.";
+
+    flag_s = G_define_flag();
+    flag_s->key = 's';
+    flag_s->description = "selected model: support vector machines.";
+
+    flag_n = G_define_flag();
+    flag_n->key = 'n';
+    flag_n->description = "selected model: nearest neighbor.";
+
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+    /*read parameters */
+    if (opt3->answer) {
+	sscanf(opt3->answer, "%d", &(features.npc));
+    }
+    else {
+	features.npc = -1;
+    }
+
+    if (flag_n->answer) {
+	sscanf(opt17->answer, "%d", &(nn.k));
+	if (nn.k <= 0) {
+	    sprintf(tmpbuf, "number of neighbor must be > 0\n");
+	    G_fatal_error(tmpbuf);
+	}
+	if (nn.k % 2 == 0) {
+	    sprintf(tmpbuf, "number of neighbor must be odd\n");
+	    G_fatal_error(tmpbuf);
+	}
+    }
+
+    if (flag_t->answer) {
+	sscanf(opt4->answer, "%d", &tree_stamps);
+	if ((tree_stamps != 0) && (tree_stamps != 1)) {
+	    sprintf(tmpbuf, "stamps must be 0 or 1\n");
+	    G_fatal_error(tmpbuf);
+	}
+	sscanf(opt5->answer, "%d", &tree_minsize);
+	if (tree_minsize < 0) {
+	    sprintf(tmpbuf, "minsize must be >= 0\n");
+	    G_fatal_error(tmpbuf);
+	}
+    }
+
+    if (flag_s->answer) {
+	sscanf(opt6->answer, "%s", svm_kernel_type);
+	sscanf(opt18->answer, "%lf", &svm_cost);
+
+	if (strcmp(svm_kernel_type, "linear") == 0) {
+	    svm_kernel = SVM_KERNEL_LINEAR;
+	}
+	else if (strcmp(svm_kernel_type, "gaussian") == 0) {
+	    svm_kernel = SVM_KERNEL_GAUSSIAN;
+	}
+	else if (strcmp(svm_kernel_type, "2pbk") == 0) {
+	    svm_kernel = SVM_KERNEL_DIRECT;
+	}
+	else {
+	    sprintf(tmpbuf, "kernel type not implemended!\n");
+	    G_fatal_error(tmpbuf);
+	}
+	if (svm_kernel == SVM_KERNEL_GAUSSIAN) {
+	    if (!opt7->answer) {
+		sprintf(tmpbuf, "Please set kernel parameter\n");
+		G_fatal_error(tmpbuf);
+	    }
+	    else {
+		sscanf(opt7->answer, "%lf", &svm_kp);
+		if (svm_kp <= 0) {
+		    sprintf(tmpbuf, "kernel parameter must be > 0\n");
+		    G_fatal_error(tmpbuf);
+		}
+	    }
+	}
+	else
+	    svm_kp = 0.0;
+
+	if (!opt8->answer) {
+	    sprintf(tmpbuf, "Please set optimization parameter\n");
+	    G_fatal_error(tmpbuf);
+	}
+	else {
+	    sscanf(opt8->answer, "%lf", &svm_C);
+	    if (svm_C <= 0) {
+		sprintf(tmpbuf, "optimization parameter must be > 0\n");
+		G_fatal_error(tmpbuf);
+	    }
+	}
+	sscanf(opt9->answer, "%lf", &svm_tol);
+	if (svm_tol <= 0) {
+	    sprintf(tmpbuf, "tol must be > 0\n");
+	    G_fatal_error(tmpbuf);
+	}
+	sscanf(opt10->answer, "%lf", &svm_eps);
+	if (svm_eps <= 0) {
+	    sprintf(tmpbuf, "eps must be > 0\n");
+	    G_fatal_error(tmpbuf);
+	}
+	sscanf(opt12->answer, "%d", &svm_maxloops);
+	if (svm_maxloops <= 0) {
+	    sprintf(tmpbuf, "maximum number of loops must be > 0\n");
+	    G_fatal_error(tmpbuf);
+	}
+	sscanf(opt11->answer, "%d", &svm_l1o);
+	sscanf(opt22->answer, "%d", &svm_verbose);
+    }
+
+    sscanf(opt13->answer, "%d", &bagging);
+    sscanf(opt14->answer, "%d", &boosting);
+    sscanf(opt25->answer, "%d", &reg);
+    sscanf(opt27->answer, "%d", &reg_verbose);
+    sscanf(opt23->answer, "%d", &progressive_error);
+
+    sscanf(opt28->answer, "%lf", &misclass_ratio);
+    if ((misclass_ratio < 0) || (misclass_ratio > 1)) {
+	sprintf(tmpbuf, "misclassification ratio must be > 0 and < 1\n");
+	G_fatal_error(tmpbuf);
+    }
+    if ((misclass_ratio < 1) && (reg > 0)) {
+	sprintf(tmpbuf,
+		"Please select only one between shaving the training set and regularized AdaBoost\n");
+	G_fatal_error(tmpbuf);
+    }
+    if (bagging < 0) {
+	bagging = 0;
+    }
+
+    if (reg < 0) {
+	reg = 0;
+    }
+
+    if (boosting < 0) {
+	boosting = 0;
+    }
+
+    if ((bagging > 0) && (boosting > 0)) {
+	sprintf(tmpbuf,
+		"Please select only one between bagging and boosting\n");
+	G_fatal_error(tmpbuf);
+    }
+
+    if (boosting > 0) {
+	sscanf(opt15->answer, "%lf", &w);
+	if (w < 0.0 || w > 2.0) {
+	    sprintf(tmpbuf, "boosting cost in [0,2]\n");
+	    G_fatal_error(tmpbuf);
+	}
+	sscanf(opt19->answer, "%d", &weights_boosting);
+	if ((weights_boosting != 0) && (weights_boosting != 1)) {
+	    sprintf(tmpbuf, "weights_boosting must be 0 or 1\n");
+	    G_fatal_error(tmpbuf);
+	}
+	sscanf(opt21->answer, "%d", &soft_margin_boosting);
+	if ((soft_margin_boosting != 0) && (soft_margin_boosting != 1)) {
+	    sprintf(tmpbuf, "soft_margin_boosting must be 0 or 1\n");
+	    G_fatal_error(tmpbuf);
+	}
+	if (opt24->answer) {
+	    sscanf(opt24->answer, "%d", &parallel_boosting);
+	    if ((parallel_boosting <= boosting) && (parallel_boosting > 0)) {
+		sprintf(tmpbuf, "parallel_boosting must be > boosting\n");
+		G_fatal_error(tmpbuf);
+	    }
+	}
+    }
+
+    /*read features */
+    read_features(opt1->answer, &features, features.npc);
+    if (features.nclasses == 2) {
+	if ((features.p_classes[0] * features.p_classes[1]) != -1) {
+	    fprintf(stderr, "class %d interpreted as class -1\n",
+		    features.p_classes[0]);
+	    fprintf(stderr, "class %d interpreted as class 1\n",
+		    features.p_classes[1]);
+
+	    for (i = 0; i < features.nexamples; i++) {
+		if (features.class[i] == features.p_classes[0]) {
+		    features.class[i] = -1;
+		}
+		else {
+		    features.class[i] = 1;
+		}
+	    }
+	    features.p_classes[0] = -1;
+	    features.p_classes[1] = 1;
+	}
+    }
+
+    /*now I know the classes, I can read the misclassifciation costs
+       for the trees */
+
+    if (flag_t->answer) {
+	tree_costs = (double *)G_calloc(features.nclasses, sizeof(double));
+	if (opt20->answers) {
+	    j = 0;
+	    for (i = 0; (tmp_costs = opt20->answers[i]); i++) {
+		j += 1;
+	    }
+
+	    if (j > features.nclasses)
+		j = features.nclasses;
+
+	    for (i = 0; i < j; i++)
+		sscanf(opt20->answers[i], "%lf", &(tree_costs[i]));
+
+	    for (i = j; i < features.nclasses; i++)
+		tree_costs[i] = 1.0;
+
+
+	}
+	else
+	    for (i = 0; i < features.nclasses; i++)
+		tree_costs[i] = 1.0;
+    }
+
+
+
+    /*read test features */
+    testset = 0;
+    if (opt16->answer) {
+	testset = 1;
+	read_features(opt16->answer, &test_features, features.npc);
+	if (test_features.nclasses == 2) {
+	    if ((test_features.p_classes[0] * test_features.p_classes[1]) !=
+		-1) {
+		fprintf(stderr, "class %d interpreted as class -1\n",
+			test_features.p_classes[0]);
+		fprintf(stderr, "class %d interpreted as class 1\n",
+			test_features.p_classes[1]);
+
+		for (i = 0; i < test_features.nexamples; i++) {
+		    if (test_features.class[i] == test_features.p_classes[0]) {
+			test_features.class[i] = -1;
+		    }
+		    else {
+			test_features.class[i] = 1;
+		    }
+		}
+		test_features.p_classes[0] = -1;
+		test_features.p_classes[1] = 1;
+	    }
+	}
+    }
+
+    /*read validation features */
+    if ((opt26->answer) && (reg > 0)) {
+	read_features(opt26->answer, &validation_features, features.npc);
+	if (validation_features.nclasses == 2) {
+	    if ((validation_features.p_classes[0] *
+		 validation_features.p_classes[1]) != -1) {
+		fprintf(stderr, "class %d interpreted as class -1\n",
+			validation_features.p_classes[0]);
+		fprintf(stderr, "class %d interpreted as class 1\n",
+			validation_features.p_classes[1]);
+
+		for (i = 0; i < validation_features.nexamples; i++) {
+		    if (validation_features.class[i] ==
+			validation_features.p_classes[0]) {
+			validation_features.class[i] = -1;
+		    }
+		    else {
+			validation_features.class[i] = 1;
+		    }
+		}
+		validation_features.p_classes[0] = -1;
+		validation_features.p_classes[1] = 1;
+	    }
+	}
+    }
+    else if ((opt16->answer) && (reg > 0)) {
+	fprintf(stderr, "Regularized adaboost: validation data not foud\n");
+	exit(-1);
+    }
+
+    /*single models */
+    if ((bagging == 0) && (boosting == 0)) {
+	if (flag_t->answer) {
+	    /*tree */
+	    compute_tree(&tree, features.nexamples, features.examples_dim,
+			 features.value, features.class, features.nclasses,
+			 features.p_classes, tree_stamps, tree_minsize,
+			 tree_costs);
+
+	    /*model test and output */
+	    write_tree(opt2->answer, &tree, &features);
+
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    fprintf(stdout, "Prediction on training data: %s\n",
+		    opt1->answer);
+	    test_tree(&tree, &features, outfile);
+	    if (opt16->answer) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+		fprintf(stdout, "Prediction on test data: %s\n",
+			opt16->answer);
+		test_tree(&tree, &test_features, outfile);
+	    }
+	    return 0;
+	}
+
+	if (flag_g->answer) {
+	    /*gm */
+	    compute_gm(&gm, features.nexamples, features.examples_dim,
+		       features.value, features.class, features.nclasses,
+		       features.p_classes);
+
+	    /*model test and output */
+	    write_gm(opt2->answer, &gm, &features);
+
+	    compute_test_gm(&gm);
+
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    fprintf(stdout, "Prediction on training data: %s\n",
+		    opt1->answer);
+	    test_gm(&gm, &features, outfile);
+	    if (opt16->answer) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+		fprintf(stdout, "Prediction on test data: %s\n",
+			opt16->answer);
+		test_gm(&gm, &test_features, outfile);
+	    }
+	    return 0;
+	}
+
+	if (flag_n->answer) {
+	    /*nearest neighbours */
+	    compute_nn(&nn, features.nexamples, features.examples_dim,
+		       features.value, features.class);
+
+	    /*model test and output */
+	    write_nn(opt2->answer, &nn, &features);
+
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    fprintf(stdout, "Prediction on training data: %s\n",
+		    opt1->answer);
+	    test_nn(&nn, &features, nn.k, outfile);
+	    if (opt16->answer) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+		fprintf(stdout, "Prediction on test data: %s\n",
+			opt16->answer);
+		test_nn(&nn, &test_features, nn.k, outfile);
+	    }
+	    return 0;
+	}
+
+	if (flag_s->answer) {
+	    /*svm */
+
+	    if (features.nclasses != 2) {
+		sprintf(tmpbuf, "svm works only with 2 class problems\n");
+		G_fatal_error(tmpbuf);
+	    }
+
+	    /*svm costs */
+	    svm_W = (double *)G_calloc(features.nexamples, sizeof(double));
+	    for (i = 0; i < features.nexamples; i++) {
+		svm_W[i] = 1.;
+		if (svm_cost > 0) {
+		    if (features.class[i] < 0)
+			svm_W[i] = 1. - svm_cost;
+		}
+		else if (svm_cost < 0) {
+		    if (features.class[i] > 0)
+			svm_W[i] = 1. + svm_cost;
+		}
+	    }
+	    svm.cost = svm_cost;
+
+	    compute_svm(&svm, features.nexamples, features.examples_dim,
+			features.value, features.class, svm_kernel, svm_kp,
+			svm_C, svm_tol, svm_eps, svm_maxloops, svm_verbose,
+			svm_W);
+	    write_svm(opt2->answer, &svm, &features);
+
+	    /*model test and output */
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    fprintf(stdout, "Prediction on training data: %s\n",
+		    opt1->answer);
+	    test_svm(&svm, &features, outfile);
+
+	    /*if requested, leave 1 out error estimate */
+	    if (svm_l1o == 1) {
+		fprintf(stdout, "Leave one out error estimate\n");
+		estimate_cv_error(&svm);
+	    }
+	    if (opt16->answer) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+		fprintf(stdout, "Prediction on test data: %s\n",
+			opt16->answer);
+		test_svm(&svm, &test_features, outfile);
+	    }
+
+	    return 0;
+	}
+    }
+
+    /*bagging models */
+    if (bagging > 0) {
+	if (flag_n->answer) {
+	    sprintf(tmpbuf,
+		    "Sorry, bagging of nearest neighbor not yet implemented\n\n");
+	    G_fatal_error(tmpbuf);
+	}
+	if (flag_g->answer) {
+	    sprintf(tmpbuf,
+		    "Sorry, bagging of gaussian mixture not yet implemented\n\n");
+	    G_fatal_error(tmpbuf);
+	}
+	if (flag_t->answer) {
+	    /*trees */
+	    compute_tree_bagging(&btree, bagging, features.nexamples,
+				 features.examples_dim, features.value,
+				 features.class, features.nclasses,
+				 features.p_classes, tree_stamps,
+				 tree_minsize, tree_costs);
+
+	    /*model test and output */
+	    write_bagging_boosting_tree(opt2->answer, &btree, &features);
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    fprintf(stdout, "Prediction on training data: %s\n",
+		    opt1->answer);
+	    if (!progressive_error)
+		test_btree(&btree, &features, outfile);
+	    else
+		test_btree_progressive(&btree, &features, outfile);
+	    if (opt16->answer) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+		fprintf(stdout, "Prediction on test data: %s\n",
+			opt16->answer);
+		if (!progressive_error)
+		    test_btree(&btree, &test_features, outfile);
+		else
+		    test_btree_progressive(&btree, &test_features, outfile);
+	    }
+	    return 0;
+	}
+	if (flag_s->answer) {
+	    /*svm */
+	    if (features.nclasses != 2) {
+		sprintf(tmpbuf, "svm works only with 2 class problems\n");
+		G_fatal_error(tmpbuf);
+	    }
+
+	    svm_W = (double *)G_calloc(features.nexamples, sizeof(double));
+	    for (i = 0; i < features.nexamples; i++)
+		svm_W[i] = 1.;
+
+	    compute_svm_bagging(&bsvm, bagging, features.nexamples,
+				features.examples_dim, features.value,
+				features.class, svm_kernel, svm_kp,
+				svm_C, svm_tol, svm_eps, svm_maxloops,
+				svm_verbose, svm_W);
+
+	    /*model test and output */
+	    write_bagging_boosting_svm(opt2->answer, &bsvm, &features);
+
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    fprintf(stdout, "Prediction on training data: %s\n",
+		    opt1->answer);
+	    if (!progressive_error)
+		test_bsvm(&bsvm, &features, outfile);
+	    else
+		test_bsvm_progressive(&bsvm, &features, outfile);
+	    if (opt16->answer) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+		fprintf(stdout, "Prediction on test data: %s\n",
+			opt16->answer);
+		if (!progressive_error)
+		    test_bsvm(&bsvm, &test_features, outfile);
+		else
+		    test_bsvm_progressive(&bsvm, &test_features, outfile);
+	    }
+	    return 0;
+	}
+    }
+
+    /*boosting models */
+    if (boosting > 0) {
+	if (flag_n->answer) {
+	    sprintf(tmpbuf,
+		    "Sorry, boosting of nearest neighbor not yet implemented\n\n");
+	    G_fatal_error(tmpbuf);
+	}
+	if (flag_g->answer) {
+	    sprintf(tmpbuf,
+		    "Sorry, boosting of gaussian mixture not yet implemented\n\n");
+	    G_fatal_error(tmpbuf);
+	}
+
+	if (features.nclasses != 2) {
+	    sprintf(tmpbuf, "boosting works only with 2 class problems\n");
+	    G_fatal_error(tmpbuf);
+	}
+	if (flag_t->answer) {
+	    /*trees */
+	    /*regularized adboost */
+	    if ((parallel_boosting == 0) &&
+		((reg > 0) || (misclass_ratio < 1.00))) {
+		misratio =
+		    (double *)G_calloc(features.nexamples, sizeof(double));
+		compute_tree_boosting_reg(&btree, boosting, w,
+					  features.nexamples,
+					  features.examples_dim,
+					  features.value, features.class,
+					  features.nclasses,
+					  features.p_classes, tree_stamps,
+					  tree_minsize, weights_boosting,
+					  tree_costs, misratio);
+		if (reg_verbose != 0)
+		    for (i = 0; i < features.nexamples; i++) {
+			fprintf(stdout,
+				"Misclassification ratio of point %d is %e:\n",
+				i, misratio[i]);
+		    }
+	    }
+
+	    /*standard adboost */
+	    if ((parallel_boosting == 0) && (reg == 0) &&
+		(misclass_ratio == 1.00)) {
+		compute_tree_boosting(&btree, boosting, w, features.nexamples,
+				      features.examples_dim, features.value,
+				      features.class, features.nclasses,
+				      features.p_classes, tree_stamps,
+				      tree_minsize, weights_boosting,
+				      tree_costs);
+	    }
+	    /*parallel adaboost */
+	    else if (parallel_boosting > 0) {
+		compute_tree_boosting_parallel(&btree, boosting,
+					       parallel_boosting, w,
+					       features.nexamples,
+					       features.examples_dim,
+					       features.value, features.class,
+					       features.nclasses,
+					       features.p_classes,
+					       tree_stamps, tree_minsize,
+					       weights_boosting, tree_costs);
+
+		boosting = parallel_boosting;
+	    }
+
+	    /*if requested, boosting weights to output */
+	    if (weights_boosting == 1) {
+		double *tmparray, *hist;
+
+		hist = (double *)G_calloc(10000, sizeof(double));
+		tmparray = (double *)G_calloc(boosting, sizeof(double));
+		for (i = 0; i < features.nexamples; i++) {
+		    for (j = 0; j < boosting; j++)
+			tmparray[j] = btree.w_evolution[i][j];
+
+		    btree.w_evolution[i][boosting] =
+			mean_of_double_array(tmparray, boosting);
+		    btree.w_evolution[i][boosting + 1] =
+			var_of_double_array_given_mean(tmparray, boosting,
+						       btree.w_evolution[i]
+						       [boosting]);
+		    histo(tmparray, boosting, hist, 2 * boosting);
+		    btree.w_evolution[i][boosting + 2] =
+			Entropy(hist, 2 * boosting, 0.000001);
+
+		}
+		sprintf(outfile, "%s_weights_boosting", opt2->answer);
+		write_matrix(outfile, btree.w_evolution, features.nexamples,
+			     boosting + 3);
+
+	    }
+
+	    /*if requested, soft margin boosting (sperimental) */
+	    if (soft_margin_boosting) {
+		double *alpha, *beta, **M;
+
+		alpha =
+		    (double *)G_calloc(features.nexamples, sizeof(double));
+		beta = (double *)G_calloc(boosting, sizeof(double));
+
+		M = (double **)G_calloc(features.nexamples, sizeof(double *));
+		for (i = 0; i < features.nexamples; i++)
+		    M[i] = (double *)G_calloc(boosting, sizeof(double));
+
+		for (i = 0; i < features.nexamples; i++)
+		    for (j = 0; j < boosting; j++)
+			M[i][j] = features.class[i] *
+			    predict_tree_multiclass(&(btree.tree[j]),
+						    features.value[i]);
+
+
+		maximize(alpha, features.nexamples, beta, boosting, M);
+
+
+		for (i = 0; i < features.nexamples; i++) {
+		    fprintf(stderr, "ALPHA[%d]=%e\n", i, alpha[i]);
+		}
+		fprintf(stderr, "\n");
+
+		for (i = 0; i < boosting; i++) {
+		    fprintf(stderr, "BETA[%d]=%e\n", i, beta[i]);
+		}
+		fprintf(stderr, "\n");
+
+		for (i = 0; i < boosting; i++) {
+		    btree.weights[i] = .0;
+		    for (j = 0; j < features.nexamples; j++)
+			btree.weights[i] += alpha[j] * M[j][i];
+		    btree.weights[i] += beta[i];
+		}
+	    }
+
+	    /*model test and output */
+	    if ((reg == 0) && (misclass_ratio == 1.00)) {
+		write_bagging_boosting_tree(opt2->answer, &btree, &features);
+	    }
+
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    if (!progressive_error) {
+
+		if ((reg > 0) && (misclass_ratio == 1.00)) {
+		    fprintf(stdout, "Prediction on training data: %s\n",
+			    opt1->answer);
+		    test_btree_reg(&btree, &features, outfile, misratio);
+		}
+		else if (misclass_ratio < 1.00) {
+		    /* if requested, it shave the hard point (point with misclassification ratio > misclass_ratio),
+		       compute the new tree model and write output. */
+		    sprintf(outfile1, "%s_ts_prediction", opt2->answer);
+		    shaving_and_compute(boosting, w, features.nexamples,
+					features.examples_dim, features.value,
+					features.class, features.nclasses,
+					features.p_classes, tree_stamps,
+					tree_minsize, weights_boosting,
+					tree_costs, misratio, reg_verbose,
+					misclass_ratio, outfile, opt2->answer,
+					features, test_features, outfile1,
+					testset);
+		}
+		else {
+		    fprintf(stdout, "Prediction on training data: %s\n",
+			    opt1->answer);
+		    test_btree(&btree, &features, outfile);
+		}
+	    }
+	    else {
+		fprintf(stdout, "Prediction on training data: %s\n",
+			opt1->answer);
+		test_btree_progressive(&btree, &features, outfile);
+	    }
+
+	    if (((opt16->answer) && (misclass_ratio == 1.00)) ||
+		((reg > 0) && (opt26->answer))) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+
+		/*if requested, it computes the regularized adaboost, model test and write output */
+		{
+		    if ((reg > 0) && (opt26->answer)) {
+			regularized_boosting(boosting, w, features.nexamples,
+					     features.examples_dim,
+					     features.value, features.class,
+					     features.nclasses,
+					     features.p_classes, tree_stamps,
+					     tree_minsize, weights_boosting,
+					     tree_costs, misratio, reg,
+					     test_features, outfile,
+					     validation_features, reg_verbose,
+					     opt16->answer, opt26->answer,
+					     opt2->answer, features, testset);
+		    }
+
+		    else if (!progressive_error) {
+			fprintf(stdout, "Prediction on test data: %s\n",
+				opt16->answer);
+			test_btree(&btree, &test_features, outfile);
+		    }
+		    else {
+			fprintf(stdout, "Prediction on test data: %s\n",
+				opt16->answer);
+			test_btree_progressive(&btree, &test_features,
+					       outfile);
+		    }
+		}
+	    }
+	    return 0;
+	}
+
+	if (flag_s->answer) {
+	    /*svm */
+	    svm_W = (double *)G_calloc(features.nexamples, sizeof(double));
+	    for (i = 0; i < features.nexamples; i++)
+		svm_W[i] = 1.;
+
+
+	    compute_svm_boosting(&bsvm, boosting, w, features.nexamples,
+				 features.examples_dim, features.value,
+				 features.class, features.nclasses,
+				 features.p_classes, svm_kernel, svm_kp,
+				 svm_C, svm_tol, svm_eps, svm_maxloops,
+				 svm_verbose, svm_W, weights_boosting);
+
+	    /*if requested, boosting weights to output */
+	    if (weights_boosting == 1) {
+		double *tmparray, *hist;
+
+		hist = (double *)G_calloc(10000, sizeof(double));
+		tmparray = (double *)G_calloc(boosting, sizeof(double));
+		for (i = 0; i < features.nexamples; i++) {
+		    for (j = 0; j < boosting; j++)
+			tmparray[j] = bsvm.w_evolution[i][j];
+
+		    bsvm.w_evolution[i][boosting] =
+			mean_of_double_array(tmparray, boosting);
+		    bsvm.w_evolution[i][boosting + 1] =
+			var_of_double_array_given_mean(tmparray, boosting,
+						       bsvm.w_evolution[i]
+						       [boosting]);
+		    histo(tmparray, boosting, hist, 2 * boosting);
+		    bsvm.w_evolution[i][boosting + 2] =
+			Entropy(hist, 2 * boosting, 0.000001);
+
+		}
+		sprintf(outfile, "%s_weights_boosting", opt2->answer);
+		write_matrix(outfile, bsvm.w_evolution, features.nexamples,
+			     boosting + 3);
+
+	    }
+
+	    /*model test and output */
+	    write_bagging_boosting_svm(opt2->answer, &bsvm, &features);
+
+	    sprintf(outfile, "%s_tr_prediction", opt2->answer);
+	    fprintf(stdout, "Prediction on training data: %s\n",
+		    opt1->answer);
+	    if (!progressive_error)
+		test_bsvm(&bsvm, &features, outfile);
+	    else
+		test_bsvm_progressive(&bsvm, &features, outfile);
+	    if (opt16->answer) {
+		sprintf(outfile, "%s_ts_prediction", opt2->answer);
+		fprintf(stdout, "Prediction on test data: %s\n",
+			opt16->answer);
+		if (!progressive_error)
+		    test_bsvm(&bsvm, &test_features, outfile);
+		else
+		    test_bsvm_progressive(&bsvm, &test_features, outfile);
+	    }
+	    return 0;
+	}
+    }
+
+    sprintf(tmpbuf, "please select a model\n");
+    G_warning(tmpbuf);
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_sites_aggregate/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_sites_aggregate/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_sites_aggregate/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,245 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+#include <grass/Vect.h>
+
+
+#define MAXPNTS 1000
+
+int main(int argc, char *argv[])
+{
+    struct GModule *module;
+    struct Option *opt1, *opt2, *opt3, *opt4;
+    char *line = NULL;
+    FILE *fp;
+    int i, j, k, npoints;
+    double **data;
+    int *mark;
+    double max_dist, min_dist;
+    double tmpx, tmpy;
+    int np, np2;
+    double tmpdist;
+    int *indice, *indice2;
+    double *dist;
+    int *indxx;
+    int count;
+
+    struct Map_info Out;
+    struct Map_info Out2;
+    struct line_pnts *Points;
+    struct line_cats *Cats;
+    struct line_pnts *Points2;
+    struct line_cats *Cats2;
+
+
+    opt1 = G_define_option();
+    opt1->key = "sites";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description = "site file";
+
+    opt2 = G_define_option();
+    opt2->key = "max_dist";
+    opt2->type = TYPE_DOUBLE;
+    opt2->required = YES;
+    opt2->description = "count sites distant less than max_dist";
+
+    opt3 = G_define_option();
+    opt3->key = "min_dist";
+    opt3->type = TYPE_DOUBLE;
+    opt3->required = YES;
+    opt3->description = "count sites distant less than min_dist";
+
+
+    opt4 = G_define_standard_option(G_OPT_V_OUTPUT);
+    opt4->key = "link";
+    opt4->description = "Output vector with lines";
+
+ /***** Start of main *****/
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to aggregate sites. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    if (G_parser(argc, argv) < 0)
+	exit(EXIT_FAILURE);
+
+    sscanf(opt2->answer, "%lf", &max_dist);
+    max_dist *= max_dist;
+    sscanf(opt3->answer, "%lf", &min_dist);
+    min_dist *= min_dist;
+
+    Vect_open_new(&Out2, opt4->answer, 0);
+    sprintf(opt4->answer, "%s_graph", opt4->answer);
+    Vect_open_new(&Out, opt4->answer, 0);
+
+    Points = Vect_new_line_struct();
+    Cats = Vect_new_cats_struct();
+    Points2 = Vect_new_line_struct();
+    Cats2 = Vect_new_cats_struct();
+
+    data = (double **)G_calloc(1, sizeof(double *));
+    data[0] = (double *)G_calloc(3, sizeof(double));
+
+    fp = fopen(opt1->answer, "r");
+
+    /* better to use G_tokenize() here? */
+    npoints = 0;
+    while ((line = GetLine(fp)) != NULL) {
+	sscanf(line, "%lf", &(data[npoints][0]));
+	line = (char *)strchr(line, '|');
+	*line++;
+	sscanf(line, "%lf", &(data[npoints][1]));
+	line = (char *)strchr(line, '|');
+	*line++;
+	sscanf(line, "%lf", &(data[npoints][2]));
+	npoints++;
+	data = (double **)G_realloc(data, (npoints + 1) * sizeof(double *));
+	data[npoints] = (double *)G_calloc(3, sizeof(double));
+    }
+
+    mark = (int *)G_calloc(npoints, sizeof(int));
+    for (i = 0; i < npoints; i++)
+	mark[i] = 0;
+
+    indxx = (int *)G_calloc(MAXPNTS, sizeof(int));
+    dist = (double *)G_calloc(MAXPNTS, sizeof(double));
+    indice = (int *)G_calloc(MAXPNTS, sizeof(int));
+    indice2 = (int *)G_calloc(MAXPNTS, sizeof(int));
+
+    for (i = 0; i < npoints; i++) {
+	if (mark[i] == 0) {
+	    np = 0;
+	    for (j = i; j < npoints; j++) {
+		if (np == MAXPNTS) {
+		    fprintf(stderr,
+			    "Too many nearest points. Maximum allowed (see var MAXPNTS): %d\n",
+			    MAXPNTS);
+		    exit(-1);
+		}
+		if (mark[j] == 0) {
+		    if ((tmpdist =
+			 squared_distance(data[i], data[j], 2)) < max_dist) {
+			indice[np] = j;
+			dist[np] = tmpdist;
+			np++;
+		    }
+		}
+	    }
+
+	    if (np <= 2 * data[i][2]) {
+
+		indexx_1(np, dist, indxx);
+
+		if (np > data[i][2])
+		    count = data[i][2];
+		else
+		    count = np;
+
+		tmpx = 0;
+		tmpy = 0;
+		for (j = 0; j < count; j++) {
+		    if (mark[indice[indxx[j]]] == 0) {
+			tmpx += data[indice[indxx[j]]][0];
+			tmpy += data[indice[indxx[j]]][1];
+			mark[indice[indxx[j]]] = 1;
+		    }
+		}
+		tmpx /= count;
+		tmpy /= count;
+
+		Vect_reset_line(Points2);
+		Vect_append_point(Points2, tmpx, tmpy, (double)count);
+		Vect_write_line(&Out2, GV_POINT, Points2, Cats2);
+
+
+		for (j = 0; j < count; j++) {
+		    Vect_reset_line(Points);
+		    Vect_append_point(Points, data[indice[indxx[j]]][0],
+				      data[indice[indxx[j]]][1], 0.0);
+		    Vect_append_point(Points, tmpx, tmpy, 0.0);
+		    Vect_write_line(&Out, GV_LINE, Points, Cats);
+		}
+
+	    }
+	    else {
+		for (j = 0; j < np; j++) {
+		    if (mark[indice[j]] == 0) {
+			np2 = 0;
+			for (k = 0; k < np; k++) {
+			    if (mark[indice[k]] == 0) {
+				if ((tmpdist =
+				     squared_distance(data[indice[j]],
+						      data[indice[k]], 2))
+				    < min_dist) {
+				    indice2[np2] = indice[k];
+				    np2++;
+				}
+			    }
+			}
+
+			tmpx = 0;
+			tmpy = 0;
+			for (k = 0; k < np2; k++) {
+			    tmpx += data[indice2[k]][0];
+			    tmpy += data[indice2[k]][1];
+			    mark[indice2[k]] = 1;
+			}
+			tmpx /= np2;
+			tmpy /= np2;
+
+			Vect_reset_line(Points2);
+			Vect_append_point(Points2, tmpx, tmpy, np2);
+			Vect_write_line(&Out2, GV_POINT, Points2, Cats2);
+
+
+			for (k = 0; k < np2; k++) {
+			    Vect_reset_line(Points);
+			    Vect_append_point(Points, data[indice2[k]][0],
+					      data[indice2[k]][1], 0.0);
+			    Vect_append_point(Points, tmpx, tmpy, 0.0);
+			    Vect_write_line(&Out, GV_LINE, Points, Cats);
+			}
+		    }
+
+		}
+
+	    }
+	}
+	percent(i, npoints, 1);
+    }
+    Vect_build(&Out);
+    Vect_close(&Out);
+    Vect_build(&Out2);
+    Vect_close(&Out2);
+
+    return 1;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_statistics/cell.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_statistics/cell.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_statistics/cell.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,112 @@
+#include <stdlib.h>
+#include <grass/display.h>
+#include <grass/raster.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+/*#include "edit.h" */
+#include "localproto.h"
+
+static int cell_draw(char *, char *, struct Colors *, int);
+
+
+/*!
+ * \brief 
+ *
+ * If the map is a floating-point map, read the map using
+ * <tt>G_get_d_raster_row()</tt> and plot using <tt>D_draw_d_cell()</tt>. If the
+ * map is an integer map, read the map using <tt>G_get_c_raster_row()</tt> and
+ * plot using <tt>D_draw_cell()</tt>.
+ *
+ *  \param name
+ *  \param mapset
+ *  \param overlay
+ *  \return int
+ */
+
+int Dcell(char *name, char *mapset, int overlay)
+{
+    struct Cell_head wind;
+    struct Colors colors;
+    char buff[128];
+
+    G_get_set_window(&wind);
+
+    if (D_check_map_window(&wind))
+	G_fatal_error(_("Setting map window"));
+
+    if (G_set_window(&wind) == -1)
+	G_fatal_error(_("Current window not settable"));
+
+    /* Get existing map window for this graphics window, or save window */
+    /* cell maps wipe out a picture, so we clear info on the window too */
+    if (!overlay && D_clear_window())
+	G_fatal_error(_("Can't clear current graphics window"));
+
+    /* Save the current map window with the graphics window */
+    D_check_map_window(&wind);
+    G_set_window(&wind);
+
+    /* Set the colors for the display */
+    if (G_read_colors(name, mapset, &colors) == -1)
+	G_fatal_error(_("Color file for [%s] not available"), name);
+
+    /* Go draw the cell file */
+    cell_draw(name, mapset, &colors, overlay);
+
+    /* release the colors now */
+    G_free_colors(&colors);
+
+    /* record the cell file */
+    /* If overlay add it to the list instead of setting the cell name */
+    /* added 6/91 DBS @ CWU */
+    if (overlay) {
+	sprintf(buff, "d.rast -o map=%s",
+		G_fully_qualified_name(name, mapset));
+	D_add_to_list(buff);
+    }
+    else {
+	D_set_cell_name(G_fully_qualified_name(name, mapset));
+    }
+
+    return 0;
+}
+
+/* I modified this function to read and draw raster cell as doubles */
+static int cell_draw(char *name, char *mapset, struct Colors *colors,
+		     int overlay)
+{
+    int cellfile;
+    DCELL *xarray;
+    int cur_A_row;
+    int t, b, l, r;
+
+    /* Set up the screen, conversions, and graphics */
+    D_get_screen_window(&t, &b, &l, &r);
+    if (D_cell_draw_setup(t, b, l, r))
+	G_fatal_error(_("Cannot use current window"));
+
+    D_set_overlay_mode(overlay);
+
+    /* Make sure map is available */
+    if ((cellfile = G_open_cell_old(name, mapset)) == -1)
+	G_fatal_error(_("Not able to open cellfile for [%s]"), name);
+
+    /* Allocate space for cell buffer */
+    xarray = G_allocate_d_raster_buf();
+
+    /* loop for array rows */
+    for (cur_A_row = 0; cur_A_row != -1;) {
+	/* Get window (array) row currently required */
+	G_get_d_raster_row(cellfile, xarray, cur_A_row);
+
+	/* Draw the cell row, and get the next row number */
+	cur_A_row = D_draw_d_raster(cur_A_row, xarray, colors);
+    }
+    R_flush();
+
+    /* Wrap up and return */
+    G_close_cell(cellfile);
+    G_free(xarray);
+
+    return (0);
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_statistics/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_statistics/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_statistics/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,553 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+#include <grass/raster.h>
+/*#include "edit.h" */
+#include "localproto.h"
+
+#define TINY 1.0e-20
+#define MAXLIMITS 10000
+
+void pearsn();
+double erfcc();
+
+int main(int argc, char *argv[])
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Option *opt4;
+    Features features;
+    char tempbuf[500];
+    char *tmpbuf;
+    int corr[MAXLIMITS];
+    int i, j, h, k;
+    double **mat;
+    double min, max, NEWmin, NEWmax, a;
+    CELL **intmat;
+    struct Cell_head cellhd;
+    struct Cell_head cellhd_orig;
+    char outputmap_name[500];
+    int FD;
+    int npca;
+    double sqrt_npca;
+    int ROW, COL;
+    int index;
+    char *outputxgraph_name;
+    FILE *FP;
+    double sum;
+    double tmp;
+    double *vett1, *vett2;
+    int maxeig = 0;
+    int layer;
+    int ncorr;
+    int *npoints_for_class;
+    double **DataMat;
+    double d, prob, coeffcorr, pvalue, zvalue;
+    double mean, sd;
+    int *indexA;
+
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to calculate feature statistics. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    opt1 = G_define_option();
+    opt1->key = "features";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description =
+	"Input file containing the features (output of i.pr_features).";
+
+    opt3 = G_define_option();
+    opt3->key = "layer";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = NO;
+    opt3->description =
+	"Number of the layer to be analized (concerning with the principal components).\n\t\tIgnored if features does not contain principal components model.";
+    opt3->answer = "1";
+
+    opt2 = G_define_option();
+    opt2->key = "npc";
+    opt2->type = TYPE_INTEGER;
+    opt2->required = NO;
+    opt2->description =
+	"Number of principal components to be analized.\n\t\tIf this number is greater then the dimension of the data,\n\t\tall the principal components will be considered.\n\t\tIgnored if features does not contain principal components model.";
+
+    opt4 = G_define_option();
+    opt4->key = "corr";
+    opt4->type = TYPE_INTEGER;
+    opt4->required = NO;
+    opt4->multiple = YES;
+    opt4->description = "Index of columns to compute correlation.";
+
+  /***** Start of main *****/
+
+    if (G_parser(argc, argv) < 0)
+	exit(-1);
+
+    sscanf(opt3->answer, "%d", &layer);
+
+    read_features(opt1->answer, &features, -1);
+    if ((layer <= 0) || (layer > features.training.nlayers)) {
+	sprintf(tempbuf, "Number of layers is %d\n",
+		features.training.nlayers);
+	G_fatal_error(tempbuf);
+    }
+
+    ncorr = 0;
+    /*get index for correlation */
+    if (opt4->answers) {
+	for (i = 0; (tmpbuf = opt4->answers[i]); i++, ncorr++) {
+	    if (i == MAXLIMITS)
+		break;
+	    sscanf(tmpbuf, "%d", &(corr[i]));
+	    if (corr[i] == 0) {
+		ncorr = features.examples_dim;
+		for (j = 0; j < ncorr; j++) {
+		    corr[j] = j + 1;
+		}
+		break;
+	    }
+	    if (corr[i] < 0 || corr[i] > features.examples_dim) {
+		sprintf(tempbuf,
+			"Negative index of columns or wrong index\n");
+		G_fatal_error(tempbuf);
+	    }
+	}
+
+	if (ncorr == 1) {
+	    sprintf(tempbuf, "Can't do correlation with 1 column!!\n");
+	    G_fatal_error(tempbuf);
+	}
+
+	/* calcolo la correlazione tra le varie variabili */
+
+	vett1 = (double *)G_calloc(features.nexamples, sizeof(double));
+	vett2 = (double *)G_calloc(features.nexamples, sizeof(double));
+
+	for (i = 0; i < ncorr; i++) {
+	    for (k = 0; k < features.nexamples; k++) {
+		vett1[k] = features.value[k][corr[i] - 1];
+	    }
+	    for (j = i + 1; j < ncorr; j++) {
+		for (k = 0; k < features.nexamples; k++) {
+		    vett2[k] = features.value[k][corr[j] - 1];
+		}
+
+		pearsn(vett1, vett2, features.nexamples, &coeffcorr, &pvalue,
+		       &zvalue);
+
+		fprintf(stdout,
+			"features %d, %d:\t correlation coeff. %f \t pvalue %f",
+			corr[i], corr[j], coeffcorr, pvalue);
+
+		if (pvalue < 0.001)
+		    fprintf(stdout, " (***)\n");
+		else if (pvalue < 0.01)
+		    fprintf(stdout, " (**)\n");
+		else if (pvalue < 0.05)
+		    fprintf(stdout, " (*)\n");
+		else
+		    fprintf(stdout, "\n");
+	    }
+	}
+	exit(0);
+    }
+
+
+
+    npoints_for_class = (int *)G_calloc(features.nclasses, sizeof(int));
+    for (i = 0; i < features.nexamples; i++) {
+	for (j = 0; j < features.nclasses; j++) {
+	    if (features.class[i] == features.p_classes[j]) {
+		npoints_for_class[j] += 1;
+	    }
+	}
+    }
+
+    DataMat = (double **)G_calloc(features.nclasses, sizeof(double *));
+    for (i = 0; i < features.nclasses; i++) {
+	DataMat[i] = (double *)G_calloc(npoints_for_class[i], sizeof(double));
+    }
+
+    indexA = (int *)G_calloc(features.nclasses, sizeof(int));
+
+    for (i = 0; i < features.examples_dim; i++) {
+	for (k = 0; k < features.nclasses; k++) {
+	    indexA[k] = 0;
+	}
+	for (j = 0; j < features.nexamples; j++) {
+	    for (k = 0; k < features.nclasses; k++) {
+		if (features.class[j] == features.p_classes[k]) {
+		    DataMat[k][indexA[k]] = features.value[j][i];
+		    indexA[k] += 1;
+		    break;
+		}
+	    }
+	}
+	for (k = 0; k < features.nclasses - 1; k++) {
+	    mean = mean_of_double_array(DataMat[k], npoints_for_class[k]);
+	    sd = sd_of_double_array_given_mean(DataMat[k],
+					       npoints_for_class[k], mean);
+
+	    fprintf(stdout, "features %d class %d:mean %f sd %f\n", i + 1,
+		    features.p_classes[k], mean, sd);
+	    for (h = k + 1; h < features.nclasses; h++) {
+		mean = mean_of_double_array(DataMat[h], npoints_for_class[h]);
+		sd = sd_of_double_array_given_mean(DataMat[h],
+						   npoints_for_class[h],
+						   mean);
+		fprintf(stdout, "features %d class %d:mean %f sd %f\n", i + 1,
+			features.p_classes[h], mean, sd);
+		kstwo(DataMat[k], npoints_for_class[k], DataMat[h],
+		      npoints_for_class[h], &d, &prob);
+		fprintf(stdout, "features %d: KS-test(class %d-%d): %f\t%f",
+			i + 1, features.p_classes[k], features.p_classes[h],
+			d, prob);
+		if (prob < 0.001)
+		    fprintf(stdout, " (***)\n");
+		else if (prob < 0.01)
+		    fprintf(stdout, " (**)\n");
+		else if (prob < 0.05)
+		    fprintf(stdout, " (*)\n");
+		else
+		    fprintf(stdout, "\n");
+		tutest(DataMat[k], npoints_for_class[k], DataMat[h],
+		       npoints_for_class[h], &d, &prob);
+		fprintf(stdout, "features %d: T_test(class %d-%d): %f\t%f",
+			i + 1, features.p_classes[k], features.p_classes[h],
+			d, prob);
+		if (prob < 0.001)
+		    fprintf(stdout, " (***)\n");
+		else if (prob < 0.01)
+		    fprintf(stdout, " (**)\n");
+		else if (prob < 0.05)
+		    fprintf(stdout, " (*)\n");
+		else
+		    fprintf(stdout, "\n");
+	    }
+	}
+    }
+
+
+
+
+    layer -= 1;
+
+    if (features.f_pca[0]) {
+	for (i = 2; i < 2 + features.f_pca[1]; i++) {
+	    if (features.f_pca[i] == layer) {
+		/*set number of maps to be displaied */
+		for (i = 0;
+		     i < (features.training.rows * features.training.cols);
+		     i++)
+		    if (features.pca[layer].eigval[i] > .0)
+			maxeig = i + 1;
+
+		if (opt2->answer == NULL)
+		    npca = maxeig;
+		else {
+		    sscanf(opt2->answer, "%d", &npca);
+		    if (npca <= 0) {
+			sprintf(tempbuf, "npca must be > 0");
+			G_fatal_error(tempbuf);
+		    }
+		}
+		if (npca > maxeig)
+		    npca = maxeig;
+
+
+		if (features.training.rows > 1 && features.training.cols > 1) {
+		    /*make sure avalability of monitor */
+		    if (R_open_driver() != 0)
+			G_fatal_error(_("No graphics device selected."));
+		    R_close_driver();
+
+		    /*number of rows and cols on the virtual screen */
+
+		    sqrt_npca = sqrt(npca);
+		    if (((int)sqrt_npca * (int)sqrt_npca) >= npca) {
+			ROW = (int)sqrt_npca;
+			COL = ROW;
+		    }
+		    else {
+			ROW = (int)sqrt_npca + 1;
+			COL = ROW;
+		    }
+		    if ((ROW * (COL - 1)) >= npca)
+			COL = COL - 1;
+
+
+		    /* set region */
+		    G_get_window(&cellhd_orig);
+		    cellhd = cellhd_orig;
+		    cellhd.rows = features.training.rows * ROW;
+		    cellhd.cols = features.training.cols * COL;
+		    cellhd.ew_res = 1.0;
+		    cellhd.ns_res = 1.0;
+		    cellhd.north = (double)(cellhd.rows);
+		    cellhd.south = .0;
+		    cellhd.east = (double)(cellhd.cols);
+		    cellhd.west = .0;
+		    if (G_set_window(&cellhd) == -1) {
+			sprintf(tempbuf, "error setting working window");
+			G_fatal_error(tempbuf);
+		    }
+
+		    /*open output raster map */
+
+		    sprintf(outputmap_name, "%s_tmpimage", opt1->answer);
+
+		    if (outputmap_name != NULL)
+			FD = open_new_CELL(outputmap_name);
+		    else {
+			sprintf(tempbuf, "error setting the output name");
+			G_fatal_error(tempbuf);
+		    }
+
+		    /* alloc memory */
+		    mat = (double **)G_calloc(cellhd.rows, sizeof(double *));
+		    for (i = 0; i < cellhd.rows; i++)
+			mat[i] =
+			    (double *)G_calloc(cellhd.cols, sizeof(double));
+		    intmat = (int **)G_calloc(cellhd.rows, sizeof(int *));
+		    for (i = 0; i < cellhd.rows; i++)
+			intmat[i] = (int *)G_calloc(cellhd.cols, sizeof(int));
+
+		    for (i = 0; i < cellhd.rows; i++)
+			G_zero_cell_buf(intmat[i]);
+
+		    /*compute output raster map */
+		    index = 0;
+		    for (k = 0; k < ROW; k++) {
+			for (h = 0; h < COL; h++) {
+
+			    /*an aoutovector */
+			    if (index < npca) {
+				min = features.pca[layer].eigmat[0][index];
+				max = features.pca[layer].eigmat[0][index];
+
+				for (i = 0; i < features.training.rows; i++)
+				    for (j = 0; j < features.training.cols;
+					 j++) {
+					mat[i][j] =
+					    features.pca[layer].eigmat[i *
+								       features.
+								       training.
+								       cols +
+								       j]
+					    [index];
+					if (mat[i][j] < min)
+					    min = mat[i][j];
+					if (mat[i][j] > max)
+					    max = mat[i][j];
+				    }
+
+				/*converted the aoutovalue in 0-256 */
+				NEWmin = 1.;
+				NEWmax = 255.;
+
+				if (max != min)
+				    a = (NEWmax - NEWmin) / (max - min);
+				else {
+				    sprintf(tempbuf,
+					    "min of eigenvect %d = max of eigenvect %d",
+					    index, index);
+				    G_fatal_error(tempbuf);
+				}
+
+				for (i = 0; i < features.training.rows; i++)
+				    for (j = 0; j < features.training.cols;
+					 j++)
+					intmat[k * features.training.rows +
+					       i][h * features.training.cols +
+						  j] =
+					    (CELL) (a * (mat[i][j] - min) +
+						    NEWmin);
+			    }
+			    index += 1;
+			}
+		    }
+
+		    /*write output map */
+		    for (i = 0; i < cellhd.rows; i++)
+			if (G_put_map_row(FD, intmat[i]) == -1) {
+			    sprintf(tempbuf, "error writing tmp raster map");
+			    G_fatal_error(tempbuf);
+			}
+
+		    if (G_close_cell(FD) == -1) {
+			sprintf(tempbuf, "error closing tmp raster map");
+			G_fatal_error(tempbuf);
+		    }
+
+		    /*colors */
+		    sprintf(tempbuf, "r.colors map=%s color=grey",
+			    outputmap_name);
+		    system(tempbuf);
+
+		    /*graphics */
+		    if (G_put_window(&cellhd) == -1) {
+			sprintf(tempbuf, "error writing working region");
+			G_fatal_error(tempbuf);
+		    }
+		    sprintf(tempbuf, "d.frame -e");
+		    system(tempbuf);
+		    if (R_open_driver() != 0)
+			G_fatal_error(_("No graphics device selected."));
+		    Dcell(outputmap_name, G_mapset(), 0);
+		    R_close_driver();
+		    if (G_put_window(&cellhd_orig) == -1) {
+			sprintf(tempbuf,
+				"error writing original working region");
+			G_fatal_error(tempbuf);
+		    }
+
+
+		    /*remove */
+		    G_remove("cats", outputmap_name);
+		    G_remove("cell", outputmap_name);
+		    G_remove("cell_misc", outputmap_name);
+		    G_remove("cellhd", outputmap_name);
+		    G_remove("colr", outputmap_name);
+		    G_remove("hist", outputmap_name);
+		}
+		/*xgraph 1 */
+		outputxgraph_name = G_tempfile();
+		if ((FP = fopen(outputxgraph_name, "w")) == NULL) {
+		    sprintf(tempbuf, "error opening tmp file for xgraph");
+		    G_fatal_error(tempbuf);
+		}
+
+
+		fprintf(stdout,
+			"Principal components layer %d: cumulative explained variance\n",
+			layer + 1);
+		sum = .0;
+		for (i = 0;
+		     i < (features.training.rows * features.training.cols);
+		     i++) {
+		    features.pca[layer].eigval[i] =
+			features.pca[layer].eigval[i] *
+			features.pca[layer].eigval[i];
+		    sum += features.pca[layer].eigval[i];
+		}
+
+		fprintf(FP, "0 0\n");
+		if (sum != .0) {
+		    tmp = .0;
+		    for (i = 0; i < npca; i++) {
+			fprintf(FP, "%d %f\n", i + 1,
+				tmp + features.pca[layer].eigval[i] / sum);
+			fprintf(stdout, "p.c. %d: %f\n", i + 1,
+				tmp + features.pca[layer].eigval[i] / sum);
+			tmp += features.pca[layer].eigval[i] / sum;
+		    }
+		}
+		else {
+		    sprintf(tempbuf, "divide by 0");
+		    G_fatal_error(tempbuf);
+		}
+
+		fclose(FP);
+
+		sprintf(tempbuf, "xgraph -0 variance -P %s",
+			outputxgraph_name);
+		system(tempbuf);
+		sprintf(tempbuf, "rm %s", outputxgraph_name);
+		system(tempbuf);
+		return 0;
+	    }
+	}
+    }
+    return 0;
+}
+
+
+void pearsn(double x[], double y[], int n, double *r, double *prob, double *z)
+{
+    int j;
+    double yt, xt, t, df;
+    double syy = 0.0, sxy = 0.0, sxx = 0.0, ay = 0.0, ax = 0.0;
+    double betai(), erfcc();
+
+
+    /*calcolo della media */
+
+    for (j = 1; j <= n; j++) {
+	ax += x[j];
+	ay += y[j];
+    }
+
+    ax /= n;
+    ay /= n;
+
+    /*calcolo del coefficiente di correlazione */
+
+    for (j = 1; j <= n; j++) {
+	xt = x[j] - ax;
+	yt = y[j] - ay;
+	sxx += xt * xt;
+	syy += yt * yt;
+	sxy += xt * yt;
+    }
+
+    *r = sxy / sqrt(sxx * syy);
+    *z = 0.5 * log((1.0 + (*r) + TINY) / (1.0 - (*r) + TINY));
+    df = n - 2;
+    t = (*r) * sqrt(df / ((1.0 - (*r) + TINY) * (1.0 + (*r) + TINY)));
+    *prob = betai(0.5 * df, 0.5, df / (df + t * t));
+    /* *prob=erfcc(fabs((*z)*sqrt(n-1.0))/1.4142136); */
+}
+
+
+double erfcc(double x)
+{
+    double t, z, ans;
+
+    z = fabs(x);
+    t = 1.0 / (1.0 + 0.5 * z);
+    ans =
+	t * exp(-z * z - 1.26551223 +
+		t * (1.00002368 +
+		     t * (0.37409196 +
+			  t * (0.09678418 +
+			       t * (-0.18628806 +
+				    t * (0.27886807 +
+					 t * (-1.13520398 +
+					      t * (1.48851587 +
+						   t * (-0.82215223 +
+							t *
+							0.17087277)))))))));
+    return x >= 0.0 ? ans : 2.0 - ans;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_subsets/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_subsets/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_subsets/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,636 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ *
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+void write_matrix();
+
+int main(int argc, char **argv)
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Flag *flag_c;
+    struct Flag *flag_b;
+    struct Flag *flag_s;
+
+    Features features;
+    Features *tr_features;
+    Features *ts_features;
+
+    char tmpbuf[500];
+    int n_sets;
+    int i, j, k;
+    char fileout[500], filelab[500];
+    FILE *flab;
+    double *prob, *prob_pos, *prob_neg;
+    int *random_labels, *non_extracted, *random_labels_pos,
+	*random_labels_neg;
+    int n_non_extracted;
+    int npos, nneg;
+    int n_extracted;
+    int extracted;
+    int indx;
+    int idum;
+    double probok;
+    int seed;
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to create features file for experiment using a features file and applying cross-validation or bootstrap resampling. "
+	 "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	 "Decision Tree and SVM classification techniques. Also includes "
+	 "cross-validation and bagging methods for model validation.");
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "features";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description =
+	"Input file containing the features (output of i.pr_features).";
+
+    opt2 = G_define_option();
+    opt2->key = "n_sets";
+    opt2->type = TYPE_INTEGER;
+    opt2->required = YES;
+    opt2->description =
+	"Number of subsets (>=1). If you set n_sets=1 and select  cross-validation,\n\t\tleave one out cv will be implemented.";
+
+    opt3 = G_define_option();
+    opt3->key = "seed";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = YES;
+    opt3->description =
+	"Seed for the initialization (>=0), which specifies a starting point\n\t\tfor the random number sequence. Replicate same experiment.";
+    opt3->answer = "0";
+
+    flag_c = G_define_flag();
+    flag_c->key = 'c';
+    flag_c->description = "selected method: cross-validation.";
+
+    flag_b = G_define_flag();
+    flag_b->key = 'b';
+    flag_b->description = "selected method: bootstrap.";
+
+    flag_s = G_define_flag();
+    flag_s->key = 's';
+    flag_s->description =
+	"selected method: stratified bootstrap (works only with two classes.";
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+
+    /*read parameters */
+    sscanf(opt2->answer, "%d", &n_sets);
+    if (n_sets <= 0) {
+	sprintf(tmpbuf, "n_sets must be >0");
+	G_fatal_error(tmpbuf);
+    }
+
+    sscanf(opt3->answer, "%d", &seed);
+    if (seed < 0) {
+	sprintf(tmpbuf, "seed must be >=0");
+	G_fatal_error(tmpbuf);
+    }
+
+    if (!flag_b->answer && !flag_c->answer) {
+	sprintf(tmpbuf, "Neither -b nor -c flag set!\n");
+	G_fatal_error(tmpbuf);
+    }
+
+    /*read features */
+    read_features(opt1->answer, &features, -1);
+
+    if (flag_b->answer && !flag_s->answer) {
+	sprintf(filelab, "%s__bootstrap__labels", opt1->answer);
+	flab = fopen(filelab, "w");
+
+	tr_features = (Features *) G_calloc(n_sets, sizeof(Features));
+	ts_features = (Features *) G_calloc(n_sets, sizeof(Features));
+
+	prob = (double *)G_calloc(features.nexamples, sizeof(double));
+	for (i = 0; i < features.nexamples; i++)
+	    prob[i] = 1. / features.nexamples;
+	random_labels = (int *)G_calloc(features.nexamples, sizeof(int));
+	non_extracted = (int *)G_calloc(features.nexamples, sizeof(int));
+
+
+	for (i = 0; i < n_sets; i++) {
+	    idum = i + seed;
+	    Bootsamples_rseed(features.nexamples, prob, random_labels, &idum);
+
+	    /*training */
+	    tr_features[i].file = features.file;
+	    tr_features[i].nexamples = features.nexamples;
+	    tr_features[i].examples_dim = features.examples_dim;
+	    tr_features[i].value =
+		(double **)G_calloc(tr_features[i].nexamples,
+				    sizeof(double *));
+	    tr_features[i].class =
+		(int *)G_calloc(tr_features[i].nexamples, sizeof(int));
+	    for (j = 0; j < tr_features[i].nexamples; j++) {
+		tr_features[i].value[j] = features.value[random_labels[j]];
+		tr_features[i].class[j] = features.class[random_labels[j]];
+	    }
+	    tr_features[i].p_classes = features.p_classes;
+	    tr_features[i].nclasses = features.nclasses;
+	    tr_features[i].mean = features.mean;
+	    tr_features[i].sd = features.sd;
+	    tr_features[i].f_normalize = features.f_normalize;
+	    tr_features[i].f_standardize = features.f_standardize;
+	    tr_features[i].f_mean = features.f_mean;
+	    tr_features[i].f_variance = features.f_variance;
+	    tr_features[i].f_pca = features.f_pca;
+	    tr_features[i].pca_class = features.pca_class;
+	    tr_features[i].pca = features.pca;
+	    tr_features[i].training = features.training;
+	    tr_features[i].npc = features.npc;
+	    tr_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__tr_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &tr_features[i]);
+
+	    fprintf(flab, "Training %d:\n", i + 1);
+
+	    for (j = 0; j < (tr_features[i].nexamples - 1); j++) {
+		fprintf(flab, "%d\t", random_labels[j] + 1);
+	    }
+	    fprintf(flab, "%d\n",
+		    random_labels[tr_features[i].nexamples - 1] + 1);
+
+	    /*test */
+	    n_non_extracted = 0;
+	    for (k = 0; k < tr_features[i].nexamples; k++) {
+		extracted = 0;
+		for (j = 0; j < tr_features[i].nexamples; j++) {
+		    if (k == random_labels[j]) {
+			extracted = 1;
+			break;
+		    }
+		}
+		if (!extracted) {
+		    non_extracted[n_non_extracted] = k;
+		    n_non_extracted++;
+		}
+	    }
+
+	    ts_features[i].file = features.file;
+	    ts_features[i].nexamples = n_non_extracted;
+	    ts_features[i].examples_dim = features.examples_dim;
+	    ts_features[i].value = (double **)G_calloc(n_non_extracted,
+						       sizeof(double *));
+	    ts_features[i].class = (int *)G_calloc(n_non_extracted,
+						   sizeof(int));
+	    for (j = 0; j < n_non_extracted; j++) {
+		ts_features[i].value[j] = features.value[non_extracted[j]];
+		ts_features[i].class[j] = features.class[non_extracted[j]];
+	    }
+	    ts_features[i].p_classes = features.p_classes;
+	    ts_features[i].nclasses = features.nclasses;
+	    ts_features[i].mean = features.mean;
+	    ts_features[i].sd = features.sd;
+	    ts_features[i].f_normalize = features.f_normalize;
+	    ts_features[i].f_standardize = features.f_standardize;
+	    ts_features[i].f_mean = features.f_mean;
+	    ts_features[i].f_variance = features.f_variance;
+	    ts_features[i].f_pca = features.f_pca;
+	    ts_features[i].pca_class = features.pca_class;
+	    ts_features[i].pca = features.pca;
+	    ts_features[i].training = features.training;
+	    ts_features[i].npc = features.npc;
+	    ts_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__ts_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &ts_features[i]);
+
+	    fprintf(flab, "Test %d:\n", i + 1);
+
+	    for (j = 0; j < (ts_features[i].nexamples - 1); j++) {
+		fprintf(flab, "%d\t", non_extracted[j] + 1);
+	    }
+	    fprintf(flab, "%d\n",
+		    non_extracted[ts_features[i].nexamples - 1] + 1);
+
+	}
+	G_free(prob);
+	G_free(random_labels);
+	G_free(non_extracted);
+
+	return 0;
+    }
+
+
+    /*stratified bootstrap */
+    if (flag_s->answer && flag_b->answer) {
+
+	sprintf(filelab, "%s__str_bootstrap__labels", opt1->answer);
+	flab = fopen(filelab, "w");
+
+	tr_features = (Features *) G_calloc(n_sets, sizeof(Features));
+	ts_features = (Features *) G_calloc(n_sets, sizeof(Features));
+
+	random_labels = (int *)G_calloc(features.nexamples, sizeof(int));
+	non_extracted = (int *)G_calloc(features.nexamples, sizeof(int));
+
+	npos = 0;
+	nneg = 0;
+
+	for (i = 0; i < features.nexamples; i++) {
+	    if (features.class[i] == 1) {
+		npos++;
+	    }
+	    else if (features.class[i] == -1) {
+		nneg++;
+	    }
+	}
+
+	prob_pos = (double *)G_calloc(features.nexamples, sizeof(double));
+	prob_neg = (double *)G_calloc(features.nexamples, sizeof(double));
+	random_labels_pos = (int *)G_calloc(npos, sizeof(int));
+	random_labels_neg = (int *)G_calloc(nneg, sizeof(int));
+
+	for (i = 0; i < features.nexamples; i++) {
+	    if (features.class[i] == 1) {
+		prob_pos[i] = 1. / npos;
+		prob_neg[i] = 0;
+	    }
+	    else if (features.class[i] == -1) {
+		prob_pos[i] = 0.;
+		prob_neg[i] = 1. / nneg;
+	    }
+	}
+
+	for (i = 0; i < n_sets; i++) {
+	    idum = i + seed;
+
+	    Bootsamples_rseed(npos, prob_pos, random_labels_pos, &idum);
+	    Bootsamples_rseed(nneg, prob_neg, random_labels_neg, &idum);
+
+	    for (j = 0; j < npos; j++) {
+		random_labels[j] = random_labels_pos[j];
+	    }
+
+	    for (j = 0; j < nneg; j++) {
+		random_labels[npos + j] = (random_labels_neg[j] + npos);
+	    }
+
+	    /*training */
+	    tr_features[i].file = features.file;
+	    tr_features[i].nexamples = features.nexamples;
+	    tr_features[i].examples_dim = features.examples_dim;
+	    tr_features[i].value =
+		(double **)G_calloc(tr_features[i].nexamples,
+				    sizeof(double *));
+	    tr_features[i].class =
+		(int *)G_calloc(tr_features[i].nexamples, sizeof(int));
+	    for (j = 0; j < tr_features[i].nexamples; j++) {
+		tr_features[i].value[j] = features.value[random_labels[j]];
+		tr_features[i].class[j] = features.class[random_labels[j]];
+	    }
+	    tr_features[i].p_classes = features.p_classes;
+	    tr_features[i].nclasses = features.nclasses;
+	    tr_features[i].mean = features.mean;
+	    tr_features[i].sd = features.sd;
+	    tr_features[i].f_normalize = features.f_normalize;
+	    tr_features[i].f_standardize = features.f_standardize;
+	    tr_features[i].f_mean = features.f_mean;
+	    tr_features[i].f_variance = features.f_variance;
+	    tr_features[i].f_pca = features.f_pca;
+	    tr_features[i].pca_class = features.pca_class;
+	    tr_features[i].pca = features.pca;
+	    tr_features[i].training = features.training;
+	    tr_features[i].npc = features.npc;
+	    tr_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__tr_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &tr_features[i]);
+
+	    fprintf(flab, "Training %d:\n", i + 1);
+
+	    for (j = 0; j < (tr_features[i].nexamples - 1); j++) {
+		fprintf(flab, "%d\t", random_labels[j] + 1);
+	    }
+	    fprintf(flab, "%d\n",
+		    random_labels[tr_features[i].nexamples - 1] + 1);
+
+	    /*test */
+	    n_non_extracted = 0;
+	    for (k = 0; k < tr_features[i].nexamples; k++) {
+		extracted = 0;
+		for (j = 0; j < tr_features[i].nexamples; j++) {
+		    if (k == random_labels[j]) {
+			extracted = 1;
+			break;
+		    }
+		}
+		if (!extracted) {
+		    non_extracted[n_non_extracted] = k;
+		    n_non_extracted++;
+		}
+	    }
+
+	    ts_features[i].file = features.file;
+	    ts_features[i].nexamples = n_non_extracted;
+	    ts_features[i].examples_dim = features.examples_dim;
+	    ts_features[i].value = (double **)G_calloc(n_non_extracted,
+						       sizeof(double *));
+	    ts_features[i].class = (int *)G_calloc(n_non_extracted,
+						   sizeof(int));
+	    for (j = 0; j < n_non_extracted; j++) {
+		ts_features[i].value[j] = features.value[non_extracted[j]];
+		ts_features[i].class[j] = features.class[non_extracted[j]];
+	    }
+	    ts_features[i].p_classes = features.p_classes;
+	    ts_features[i].nclasses = features.nclasses;
+	    ts_features[i].mean = features.mean;
+	    ts_features[i].sd = features.sd;
+	    ts_features[i].f_normalize = features.f_normalize;
+	    ts_features[i].f_standardize = features.f_standardize;
+	    ts_features[i].f_mean = features.f_mean;
+	    ts_features[i].f_variance = features.f_variance;
+	    ts_features[i].f_pca = features.f_pca;
+	    ts_features[i].pca_class = features.pca_class;
+	    ts_features[i].pca = features.pca;
+	    ts_features[i].training = features.training;
+	    ts_features[i].npc = features.npc;
+	    ts_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__ts_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &ts_features[i]);
+
+	    fprintf(flab, "Test %d:\n", i + 1);
+
+	    for (j = 0; j < (ts_features[i].nexamples - 1); j++) {
+		fprintf(flab, "%d\t", non_extracted[j] + 1);
+	    }
+	    fprintf(flab, "%d\n",
+		    non_extracted[ts_features[i].nexamples - 1] + 1);
+
+	}
+	G_free(prob_pos);
+	G_free(prob_neg);
+	G_free(random_labels);
+	G_free(random_labels_pos);
+	G_free(random_labels_neg);
+	G_free(non_extracted);
+
+	return 0;
+    }
+
+    if (flag_c->answer && !flag_s->answer) {
+	if (n_sets == 1) {
+	    tr_features =
+		(Features *) G_calloc(features.nexamples, sizeof(Features));
+	    ts_features =
+		(Features *) G_calloc(features.nexamples, sizeof(Features));
+
+	    /*training */
+	    for (i = 0; i < features.nexamples; i++) {
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = features.nexamples - 1;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value =
+		    (double **)G_calloc(features.nexamples - 1,
+					sizeof(double *));
+		tr_features[i].class =
+		    (int *)G_calloc(features.nexamples - 1, sizeof(int));
+		indx = 0;
+		for (j = 0; j < features.nexamples; j++) {
+		    if (j != i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+		    }
+		}
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__tr_l1ocv__%d", opt1->answer, i + 1);
+		write_features(fileout, &tr_features[i]);
+
+		/*test */
+		ts_features[i].file = features.file;
+		ts_features[i].nexamples = 1;
+		ts_features[i].examples_dim = features.examples_dim;
+		ts_features[i].value =
+		    (double **)G_calloc(1, sizeof(double *));
+		ts_features[i].class = (int *)G_calloc(1, sizeof(int));
+		ts_features[i].value[0] = features.value[i];
+		ts_features[i].class[0] = features.class[i];
+
+
+		ts_features[i].p_classes = features.p_classes;
+		ts_features[i].nclasses = features.nclasses;
+		ts_features[i].mean = features.mean;
+		ts_features[i].sd = features.sd;
+		ts_features[i].f_normalize = features.f_normalize;
+		ts_features[i].f_standardize = features.f_standardize;
+		ts_features[i].f_mean = features.f_mean;
+		ts_features[i].f_variance = features.f_variance;
+		ts_features[i].f_pca = features.f_pca;
+		ts_features[i].pca_class = features.pca_class;
+		ts_features[i].pca = features.pca;
+		ts_features[i].training = features.training;
+		ts_features[i].npc = features.npc;
+		ts_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__ts_l1ocv__%d", opt1->answer, i + 1);
+		write_features(fileout, &ts_features[i]);
+	    }
+	    return 0;
+	}
+	else {
+	    sprintf(filelab, "%s__cv__labels", opt1->answer);
+	    flab = fopen(filelab, "w");
+
+	    tr_features = (Features *) G_calloc(n_sets, sizeof(Features));
+	    ts_features = (Features *) G_calloc(n_sets, sizeof(Features));
+
+	    if (n_sets > features.nexamples) {
+		sprintf(tmpbuf,
+			"n_sets must be <= %d (=number of training data) if you want to use cross-validation",
+			features.nexamples);
+		G_fatal_error(tmpbuf);
+	    }
+
+	    probok =
+		pow(1. - pow(1. - 1. / n_sets, (double)features.nexamples),
+		    (double)n_sets);
+	    if (probok < 0.95) {
+		sprintf(tmpbuf,
+			"the probability of extracting %d non empty test sets is less than 0.95 (the probability is exactly %e). Sorry but I don't like to take this risk.",
+			n_sets, probok);
+		G_fatal_error(tmpbuf);
+	    }
+
+	    random_labels = (int *)G_calloc(features.nexamples, sizeof(int));
+	    for (i = 0; i < n_sets; i++) {
+		idum = i + seed;
+		for (j = 0; j < features.nexamples; j++)
+		    random_labels[j] = (int)(n_sets * ran1(&idum));
+
+		/*training */
+		n_extracted = 0;
+		for (j = 0; j < features.nexamples; j++)
+		    if (random_labels[j] != i)
+			n_extracted++;
+
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = n_extracted;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value = (double **)G_calloc(n_extracted,
+							   sizeof(double *));
+		tr_features[i].class = (int *)G_calloc(n_extracted,
+						       sizeof(int));
+
+		fprintf(flab, "Training %d:\n", i + 1);
+
+		indx = 0;
+
+		for (j = 0; j < (features.nexamples - 1); j++) {
+		    if (random_labels[j] != i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+			fprintf(flab, "%d\t", j + 1);
+		    }
+		}
+
+		if (random_labels[features.nexamples - 1] != i) {
+		    tr_features[i].value[indx] =
+			features.value[features.nexamples - 1];
+		    tr_features[i].class[indx++] =
+			features.class[features.nexamples - 1];
+		    fprintf(flab, "%d", features.nexamples);
+		}
+
+		fprintf(flab, "\n");
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__tr_%dcv__%d", opt1->answer, n_sets,
+			i + 1);
+		write_features(fileout, &tr_features[i]);
+
+
+		/*test */
+		n_non_extracted = 0;
+		for (j = 0; j < features.nexamples; j++)
+		    if (random_labels[j] == i)
+			n_non_extracted++;
+
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = n_non_extracted;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value = (double **)G_calloc(n_non_extracted,
+							   sizeof(double *));
+		tr_features[i].class = (int *)G_calloc(n_non_extracted,
+						       sizeof(int));
+
+
+		fprintf(flab, "Test %d:\n", i + 1);
+
+		indx = 0;
+		for (j = 0; j < (features.nexamples - 1); j++) {
+		    if (random_labels[j] == i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+			fprintf(flab, "%d\t", j + 1);
+		    }
+		}
+
+		if (random_labels[features.nexamples - 1] == i) {
+		    tr_features[i].value[indx] =
+			features.value[features.nexamples - 1];
+		    tr_features[i].class[indx++] =
+			features.class[features.nexamples - 1];
+		    fprintf(flab, "%d", features.nexamples);
+		}
+
+		fprintf(flab, "\n");
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__ts_%dcv__%d", opt1->answer, n_sets,
+			i + 1);
+		write_features(fileout, &tr_features[i]);
+
+	    }
+	    G_free(random_labels);
+	    return 0;
+	}
+    }
+
+    if (flag_c->answer && flag_s->answer) {
+	fprintf(stderr,
+		"Stratified Cross validation not implemented (yet)!\n");
+	exit(0);
+    }
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,391 @@
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+void write_matrix();
+
+int main(int argc, char **argv)
+{
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Flag *flag_c;
+    struct Flag *flag_b;
+
+    Features features;
+    Features *tr_features;
+    Features *ts_features;
+
+    char tmpbuf[500];
+    int n_sets;
+    int i, j, k;
+    char fileout[500];
+    double *prob;
+    int *random_labels, *non_extracted;
+    int n_non_extracted;
+    int n_extracted;
+    int extracted;
+    int indx;
+    int idum;
+    double probok;
+    int seed;
+
+    char gisrc[500];
+
+    if (getenv("GISBASE") == NULL)
+	setenv("GISBASE",
+	       "/mpa_sw/ssi/BIO/software/GRASS5.0.0/grass5bin_cvs/grass5", 1);
+    if (getenv("GISRC") == NULL) {
+	sprintf(gisrc, "/ssi0/ssi/%s/.grassrc5", getenv("LOGNAME"));
+	setenv("GISRC", gisrc, 1);
+    }
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "features";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description =
+	"Input file containing the features (output of i.pr_features).";
+
+    opt2 = G_define_option();
+    opt2->key = "n_sets";
+    opt2->type = TYPE_INTEGER;
+    opt2->required = YES;
+    opt2->description =
+	"Number of subsets (>=1). If you set n_sets=1 and select  cross-validation,\n\t\tleave one out cv will be implemented.";
+
+    opt3 = G_define_option();
+    opt3->key = "seed";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = YES;
+    opt3->description =
+	"Seed for the initialization (>=0), which specifies a starting point\n\t\tfor the random number sequence. Replicate same experiment.";
+    opt3->answer = "0";
+
+    flag_c = G_define_flag();
+    flag_c->key = 'c';
+    flag_c->description = "selected method: cross-validation.";
+
+    flag_b = G_define_flag();
+    flag_b->key = 'b';
+    flag_b->description = "selected method: bootstrap.";
+
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+
+    /*read parameters */
+    sscanf(opt2->answer, "%d", &n_sets);
+    if (n_sets <= 0) {
+	sprintf(tmpbuf, "n_sets must be >0");
+	G_fatal_error(tmpbuf);
+    }
+
+    sscanf(opt3->answer, "%d", &seed);
+    if (seed < 0) {
+	sprintf(tmpbuf, "seed must be >=0");
+	G_fatal_error(tmpbuf);
+    }
+
+    /*read features */
+    read_features(opt1->answer, &features, -1);
+
+
+
+    if (flag_b->answer) {
+	tr_features = (Features *) G_calloc(n_sets, sizeof(Features));
+	ts_features = (Features *) G_calloc(n_sets, sizeof(Features));
+
+	prob = (double *)G_calloc(features.nexamples, sizeof(double));
+	for (i = 0; i < features.nexamples; i++)
+	    prob[i] = 1. / features.nexamples;
+	random_labels = (int *)G_calloc(features.nexamples, sizeof(int));
+	non_extracted = (int *)G_calloc(features.nexamples, sizeof(int));
+
+
+	for (i = 0; i < n_sets; i++) {
+	    idum = i + seed;
+	    Bootsamples_rseed(features.nexamples, prob, random_labels, &idum);
+
+	    /*training */
+	    tr_features[i].file = features.file;
+	    tr_features[i].nexamples = features.nexamples;
+	    tr_features[i].examples_dim = features.examples_dim;
+	    tr_features[i].value =
+		(double **)G_calloc(tr_features[i].nexamples,
+				    sizeof(double *));
+	    tr_features[i].class =
+		(int *)G_calloc(tr_features[i].nexamples, sizeof(int));
+	    for (j = 0; j < tr_features[i].nexamples; j++) {
+		tr_features[i].value[j] = features.value[random_labels[j]];
+		tr_features[i].class[j] = features.class[random_labels[j]];
+	    }
+	    tr_features[i].p_classes = features.p_classes;
+	    tr_features[i].nclasses = features.nclasses;
+	    tr_features[i].mean = features.mean;
+	    tr_features[i].sd = features.sd;
+	    tr_features[i].f_normalize = features.f_normalize;
+	    tr_features[i].f_standardize = features.f_standardize;
+	    tr_features[i].f_mean = features.f_mean;
+	    tr_features[i].f_variance = features.f_variance;
+	    tr_features[i].f_pca = features.f_pca;
+	    tr_features[i].pca_class = features.pca_class;
+	    tr_features[i].pca = features.pca;
+	    tr_features[i].training = features.training;
+	    tr_features[i].npc = features.npc;
+	    tr_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__tr_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &tr_features[i]);
+
+	    /*test */
+	    n_non_extracted = 0;
+	    for (k = 0; k < tr_features[i].nexamples; k++) {
+		extracted = 0;
+		for (j = 0; j < tr_features[i].nexamples; j++) {
+		    if (k == random_labels[j]) {
+			extracted = 1;
+			break;
+		    }
+		}
+		if (!extracted) {
+		    non_extracted[n_non_extracted] = k;
+		    n_non_extracted++;
+		}
+	    }
+
+	    ts_features[i].file = features.file;
+	    ts_features[i].nexamples = n_non_extracted;
+	    ts_features[i].examples_dim = features.examples_dim;
+	    ts_features[i].value = (double **)G_calloc(n_non_extracted,
+						       sizeof(double *));
+	    ts_features[i].class = (int *)G_calloc(n_non_extracted,
+						   sizeof(int));
+	    for (j = 0; j < n_non_extracted; j++) {
+		ts_features[i].value[j] = features.value[non_extracted[j]];
+		ts_features[i].class[j] = features.class[non_extracted[j]];
+	    }
+	    ts_features[i].p_classes = features.p_classes;
+	    ts_features[i].nclasses = features.nclasses;
+	    ts_features[i].mean = features.mean;
+	    ts_features[i].sd = features.sd;
+	    ts_features[i].f_normalize = features.f_normalize;
+	    ts_features[i].f_standardize = features.f_standardize;
+	    ts_features[i].f_mean = features.f_mean;
+	    ts_features[i].f_variance = features.f_variance;
+	    ts_features[i].f_pca = features.f_pca;
+	    ts_features[i].pca_class = features.pca_class;
+	    ts_features[i].pca = features.pca;
+	    ts_features[i].training = features.training;
+	    ts_features[i].npc = features.npc;
+	    ts_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__ts_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &ts_features[i]);
+
+	}
+	G_free(prob);
+	G_free(random_labels);
+	G_free(non_extracted);
+
+	return 0;
+    }
+
+
+    if (flag_c->answer) {
+	if (n_sets == 1) {
+	    tr_features =
+		(Features *) G_calloc(features.nexamples, sizeof(Features));
+	    ts_features =
+		(Features *) G_calloc(features.nexamples, sizeof(Features));
+
+	    /*training */
+	    for (i = 0; i < features.nexamples; i++) {
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = features.nexamples - 1;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value =
+		    (double **)G_calloc(features.nexamples - 1,
+					sizeof(double *));
+		tr_features[i].class =
+		    (int *)G_calloc(features.nexamples - 1, sizeof(int));
+		indx = 0;
+		for (j = 0; j < features.nexamples; j++) {
+		    if (j != i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+		    }
+		}
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__tr_l1ocv__%d", opt1->answer, i + 1);
+		write_features(fileout, &tr_features[i]);
+
+		/*test */
+		ts_features[i].file = features.file;
+		ts_features[i].nexamples = 1;
+		ts_features[i].examples_dim = features.examples_dim;
+		ts_features[i].value =
+		    (double **)G_calloc(1, sizeof(double *));
+		ts_features[i].class = (int *)G_calloc(1, sizeof(int));
+		ts_features[i].value[0] = features.value[i];
+		ts_features[i].class[0] = features.class[i];
+
+
+		ts_features[i].p_classes = features.p_classes;
+		ts_features[i].nclasses = features.nclasses;
+		ts_features[i].mean = features.mean;
+		ts_features[i].sd = features.sd;
+		ts_features[i].f_normalize = features.f_normalize;
+		ts_features[i].f_standardize = features.f_standardize;
+		ts_features[i].f_mean = features.f_mean;
+		ts_features[i].f_variance = features.f_variance;
+		ts_features[i].f_pca = features.f_pca;
+		ts_features[i].pca_class = features.pca_class;
+		ts_features[i].pca = features.pca;
+		ts_features[i].training = features.training;
+		ts_features[i].npc = features.npc;
+		ts_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__ts_l1ocv__%d", opt1->answer, i + 1);
+		write_features(fileout, &ts_features[i]);
+	    }
+	    return 0;
+	}
+	else {
+	    tr_features = (Features *) G_calloc(n_sets, sizeof(Features));
+	    ts_features = (Features *) G_calloc(n_sets, sizeof(Features));
+
+	    if (n_sets > features.nexamples) {
+		sprintf(tmpbuf,
+			"n_sets must be <= %d (=number of training data) if you want to use cross-validation",
+			features.nexamples);
+		G_fatal_error(tmpbuf);
+	    }
+
+	    probok =
+		pow(1. - pow(1. - 1. / n_sets, (double)features.nexamples),
+		    (double)n_sets);
+	    if (probok < 0.95) {
+		sprintf(tmpbuf,
+			"the probability of extracting %d non empty test sets is less than 0.95 (the probability is exactly %e). Sorry but I don't like to take this risk.",
+			n_sets, probok);
+		G_fatal_error(tmpbuf);
+	    }
+
+	    random_labels = (int *)G_calloc(features.nexamples, sizeof(int));
+	    for (i = 0; i < n_sets; i++) {
+		idum = i + seed;
+		for (j = 0; j < features.nexamples; j++)
+		    random_labels[j] = (int)(n_sets * ran1(&idum));
+
+		/*training */
+		n_extracted = 0;
+		for (j = 0; j < features.nexamples; j++)
+		    if (random_labels[j] != i)
+			n_extracted++;
+
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = n_extracted;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value = (double **)G_calloc(n_extracted,
+							   sizeof(double *));
+		tr_features[i].class = (int *)G_calloc(n_extracted,
+						       sizeof(int));
+		indx = 0;
+		for (j = 0; j < features.nexamples; j++) {
+		    if (random_labels[j] != i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+		    }
+		}
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__tr_%dcv__%d", opt1->answer, n_sets,
+			i + 1);
+		write_features(fileout, &tr_features[i]);
+
+
+		/*test */
+		n_non_extracted = 0;
+		for (j = 0; j < features.nexamples; j++)
+		    if (random_labels[j] == i)
+			n_non_extracted++;
+
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = n_non_extracted;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value = (double **)G_calloc(n_non_extracted,
+							   sizeof(double *));
+		tr_features[i].class = (int *)G_calloc(n_non_extracted,
+						       sizeof(int));
+		indx = 0;
+		for (j = 0; j < features.nexamples; j++) {
+		    if (random_labels[j] == i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+		    }
+		}
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__ts_%dcv__%d", opt1->answer, n_sets,
+			i + 1);
+		write_features(fileout, &tr_features[i]);
+
+	    }
+	    G_free(random_labels);
+	    return 0;
+	}
+    }
+
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig_2.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig_2.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_subsets/old/main_orig_2.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,444 @@
+#include <grass/gis.h>
+#include "global.h"
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+void write_matrix();
+
+int main(int argc, char **argv)
+{
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Flag *flag_c;
+    struct Flag *flag_b;
+
+    Features features;
+    Features *tr_features;
+    Features *ts_features;
+
+    char tmpbuf[500];
+    int n_sets;
+    int i, j, k;
+    char fileout[500], filelab[500];
+    FILE *flab;
+    double *prob;
+    int *random_labels, *non_extracted;
+    int n_non_extracted;
+    int n_extracted;
+    int extracted;
+    int indx;
+    int idum;
+    double probok;
+    int seed;
+
+    char gisrc[500];
+
+    if (getenv("GISBASE") == NULL)
+	setenv("GISBASE",
+	       "/mpa_sw/ssi/BIO/software/GRASS5.0.0/grass5bin_cvs/grass5", 1);
+    if (getenv("GISRC") == NULL) {
+	sprintf(gisrc, "/ssi0/ssi/%s/.grassrc5", getenv("LOGNAME"));
+	setenv("GISRC", gisrc, 1);
+    }
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "features";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->description =
+	"Input file containing the features (output of i.pr_features).";
+
+    opt2 = G_define_option();
+    opt2->key = "n_sets";
+    opt2->type = TYPE_INTEGER;
+    opt2->required = YES;
+    opt2->description =
+	"Number of subsets (>=1). If you set n_sets=1 and select  cross-validation,\n\t\tleave one out cv will be implemented.";
+
+    opt3 = G_define_option();
+    opt3->key = "seed";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = YES;
+    opt3->description =
+	"Seed for the initialization (>=0), which specifies a starting point\n\t\tfor the random number sequence. Replicate same experiment.";
+    opt3->answer = "0";
+
+    flag_c = G_define_flag();
+    flag_c->key = 'c';
+    flag_c->description = "selected method: cross-validation.";
+
+    flag_b = G_define_flag();
+    flag_b->key = 'b';
+    flag_b->description = "selected method: bootstrap.";
+
+
+    if (G_parser(argc, argv))
+	exit(1);
+
+
+    /*read parameters */
+    sscanf(opt2->answer, "%d", &n_sets);
+    if (n_sets <= 0) {
+	sprintf(tmpbuf, "n_sets must be >0");
+	G_fatal_error(tmpbuf);
+    }
+
+    sscanf(opt3->answer, "%d", &seed);
+    if (seed < 0) {
+	sprintf(tmpbuf, "seed must be >=0");
+	G_fatal_error(tmpbuf);
+    }
+
+    /*read features */
+    read_features(opt1->answer, &features, -1);
+
+
+
+    if (flag_b->answer) {
+	sprintf(filelab, "%s__bootstrap__labels", opt1->answer);
+	flab = fopen(filelab, "w");
+
+	tr_features = (Features *) G_calloc(n_sets, sizeof(Features));
+	ts_features = (Features *) G_calloc(n_sets, sizeof(Features));
+
+	prob = (double *)G_calloc(features.nexamples, sizeof(double));
+	for (i = 0; i < features.nexamples; i++)
+	    prob[i] = 1. / features.nexamples;
+	random_labels = (int *)G_calloc(features.nexamples, sizeof(int));
+	non_extracted = (int *)G_calloc(features.nexamples, sizeof(int));
+
+
+	for (i = 0; i < n_sets; i++) {
+	    idum = i + seed;
+	    Bootsamples_rseed(features.nexamples, prob, random_labels, &idum);
+
+	    /*training */
+	    tr_features[i].file = features.file;
+	    tr_features[i].nexamples = features.nexamples;
+	    tr_features[i].examples_dim = features.examples_dim;
+	    tr_features[i].value =
+		(double **)G_calloc(tr_features[i].nexamples,
+				    sizeof(double *));
+	    tr_features[i].class =
+		(int *)G_calloc(tr_features[i].nexamples, sizeof(int));
+	    for (j = 0; j < tr_features[i].nexamples; j++) {
+		tr_features[i].value[j] = features.value[random_labels[j]];
+		tr_features[i].class[j] = features.class[random_labels[j]];
+	    }
+	    tr_features[i].p_classes = features.p_classes;
+	    tr_features[i].nclasses = features.nclasses;
+	    tr_features[i].mean = features.mean;
+	    tr_features[i].sd = features.sd;
+	    tr_features[i].f_normalize = features.f_normalize;
+	    tr_features[i].f_standardize = features.f_standardize;
+	    tr_features[i].f_mean = features.f_mean;
+	    tr_features[i].f_variance = features.f_variance;
+	    tr_features[i].f_pca = features.f_pca;
+	    tr_features[i].pca_class = features.pca_class;
+	    tr_features[i].pca = features.pca;
+	    tr_features[i].training = features.training;
+	    tr_features[i].npc = features.npc;
+	    tr_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__tr_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &tr_features[i]);
+
+	    fprintf(flab, "Training %d:\n", i + 1);
+
+	    for (j = 0; j < (tr_features[i].nexamples - 1); j++) {
+		fprintf(flab, "%d\t", random_labels[j] + 1);
+	    }
+	    fprintf(flab, "%d\n",
+		    random_labels[tr_features[i].nexamples - 1] + 1);
+
+	    /*test */
+	    n_non_extracted = 0;
+	    for (k = 0; k < tr_features[i].nexamples; k++) {
+		extracted = 0;
+		for (j = 0; j < tr_features[i].nexamples; j++) {
+		    if (k == random_labels[j]) {
+			extracted = 1;
+			break;
+		    }
+		}
+		if (!extracted) {
+		    non_extracted[n_non_extracted] = k;
+		    n_non_extracted++;
+		}
+	    }
+
+	    ts_features[i].file = features.file;
+	    ts_features[i].nexamples = n_non_extracted;
+	    ts_features[i].examples_dim = features.examples_dim;
+	    ts_features[i].value = (double **)G_calloc(n_non_extracted,
+						       sizeof(double *));
+	    ts_features[i].class = (int *)G_calloc(n_non_extracted,
+						   sizeof(int));
+	    for (j = 0; j < n_non_extracted; j++) {
+		ts_features[i].value[j] = features.value[non_extracted[j]];
+		ts_features[i].class[j] = features.class[non_extracted[j]];
+	    }
+	    ts_features[i].p_classes = features.p_classes;
+	    ts_features[i].nclasses = features.nclasses;
+	    ts_features[i].mean = features.mean;
+	    ts_features[i].sd = features.sd;
+	    ts_features[i].f_normalize = features.f_normalize;
+	    ts_features[i].f_standardize = features.f_standardize;
+	    ts_features[i].f_mean = features.f_mean;
+	    ts_features[i].f_variance = features.f_variance;
+	    ts_features[i].f_pca = features.f_pca;
+	    ts_features[i].pca_class = features.pca_class;
+	    ts_features[i].pca = features.pca;
+	    ts_features[i].training = features.training;
+	    ts_features[i].npc = features.npc;
+	    ts_features[i].training.file = "generated by i.pr_subsets";
+
+	    sprintf(fileout, "%s__ts_bootstrap__%d", opt1->answer, i + 1);
+	    write_features(fileout, &ts_features[i]);
+
+	    fprintf(flab, "Test %d:\n", i + 1);
+
+	    for (j = 0; j < (ts_features[i].nexamples - 1); j++) {
+		fprintf(flab, "%d\t", non_extracted[j] + 1);
+	    }
+	    fprintf(flab, "%d\n",
+		    non_extracted[ts_features[i].nexamples - 1] + 1);
+
+	}
+	G_free(prob);
+	G_free(random_labels);
+	G_free(non_extracted);
+
+	return 0;
+    }
+
+
+    if (flag_c->answer) {
+	if (n_sets == 1) {
+	    tr_features =
+		(Features *) G_calloc(features.nexamples, sizeof(Features));
+	    ts_features =
+		(Features *) G_calloc(features.nexamples, sizeof(Features));
+
+	    /*training */
+	    for (i = 0; i < features.nexamples; i++) {
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = features.nexamples - 1;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value =
+		    (double **)G_calloc(features.nexamples - 1,
+					sizeof(double *));
+		tr_features[i].class =
+		    (int *)G_calloc(features.nexamples - 1, sizeof(int));
+		indx = 0;
+		for (j = 0; j < features.nexamples; j++) {
+		    if (j != i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+		    }
+		}
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__tr_l1ocv__%d", opt1->answer, i + 1);
+		write_features(fileout, &tr_features[i]);
+
+		/*test */
+		ts_features[i].file = features.file;
+		ts_features[i].nexamples = 1;
+		ts_features[i].examples_dim = features.examples_dim;
+		ts_features[i].value =
+		    (double **)G_calloc(1, sizeof(double *));
+		ts_features[i].class = (int *)G_calloc(1, sizeof(int));
+		ts_features[i].value[0] = features.value[i];
+		ts_features[i].class[0] = features.class[i];
+
+
+		ts_features[i].p_classes = features.p_classes;
+		ts_features[i].nclasses = features.nclasses;
+		ts_features[i].mean = features.mean;
+		ts_features[i].sd = features.sd;
+		ts_features[i].f_normalize = features.f_normalize;
+		ts_features[i].f_standardize = features.f_standardize;
+		ts_features[i].f_mean = features.f_mean;
+		ts_features[i].f_variance = features.f_variance;
+		ts_features[i].f_pca = features.f_pca;
+		ts_features[i].pca_class = features.pca_class;
+		ts_features[i].pca = features.pca;
+		ts_features[i].training = features.training;
+		ts_features[i].npc = features.npc;
+		ts_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__ts_l1ocv__%d", opt1->answer, i + 1);
+		write_features(fileout, &ts_features[i]);
+	    }
+	    return 0;
+	}
+	else {
+	    sprintf(filelab, "%s__cv__labels", opt1->answer);
+	    flab = fopen(filelab, "w");
+
+	    tr_features = (Features *) G_calloc(n_sets, sizeof(Features));
+	    ts_features = (Features *) G_calloc(n_sets, sizeof(Features));
+
+	    if (n_sets > features.nexamples) {
+		sprintf(tmpbuf,
+			"n_sets must be <= %d (=number of training data) if you want to use cross-validation",
+			features.nexamples);
+		G_fatal_error(tmpbuf);
+	    }
+
+	    probok =
+		pow(1. - pow(1. - 1. / n_sets, (double)features.nexamples),
+		    (double)n_sets);
+	    if (probok < 0.95) {
+		sprintf(tmpbuf,
+			"the probability of extracting %d non empty test sets is less than 0.95 (the probability is exactly %e). Sorry but I don't like to take this risk.",
+			n_sets, probok);
+		G_fatal_error(tmpbuf);
+	    }
+
+	    random_labels = (int *)G_calloc(features.nexamples, sizeof(int));
+	    for (i = 0; i < n_sets; i++) {
+		idum = i + seed;
+		for (j = 0; j < features.nexamples; j++)
+		    random_labels[j] = (int)(n_sets * ran1(&idum));
+
+		/*training */
+		n_extracted = 0;
+		for (j = 0; j < features.nexamples; j++)
+		    if (random_labels[j] != i)
+			n_extracted++;
+
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = n_extracted;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value = (double **)G_calloc(n_extracted,
+							   sizeof(double *));
+		tr_features[i].class = (int *)G_calloc(n_extracted,
+						       sizeof(int));
+
+		fprintf(flab, "Training %d:\n", i + 1);
+
+		indx = 0;
+
+		for (j = 0; j < (features.nexamples - 1); j++) {
+		    if (random_labels[j] != i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+			fprintf(flab, "%d\t", j + 1);
+		    }
+		}
+
+		if (random_labels[features.nexamples - 1] != i) {
+		    tr_features[i].value[indx] =
+			features.value[features.nexamples - 1];
+		    tr_features[i].class[indx++] =
+			features.class[features.nexamples - 1];
+		    fprintf(flab, "%d", features.nexamples);
+		}
+
+		fprintf(flab, "\n");
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__tr_%dcv__%d", opt1->answer, n_sets,
+			i + 1);
+		write_features(fileout, &tr_features[i]);
+
+
+		/*test */
+		n_non_extracted = 0;
+		for (j = 0; j < features.nexamples; j++)
+		    if (random_labels[j] == i)
+			n_non_extracted++;
+
+		tr_features[i].file = features.file;
+		tr_features[i].nexamples = n_non_extracted;
+		tr_features[i].examples_dim = features.examples_dim;
+		tr_features[i].value = (double **)G_calloc(n_non_extracted,
+							   sizeof(double *));
+		tr_features[i].class = (int *)G_calloc(n_non_extracted,
+						       sizeof(int));
+
+
+		fprintf(flab, "Test %d:\n", i + 1);
+
+		indx = 0;
+		for (j = 0; j < (features.nexamples - 1); j++) {
+		    if (random_labels[j] == i) {
+			tr_features[i].value[indx] = features.value[j];
+			tr_features[i].class[indx++] = features.class[j];
+			fprintf(flab, "%d\t", j + 1);
+		    }
+		}
+
+		if (random_labels[features.nexamples - 1] == i) {
+		    tr_features[i].value[indx] =
+			features.value[features.nexamples - 1];
+		    tr_features[i].class[indx++] =
+			features.class[features.nexamples - 1];
+		    fprintf(flab, "%d", features.nexamples);
+		}
+
+		fprintf(flab, "\n");
+
+		tr_features[i].p_classes = features.p_classes;
+		tr_features[i].nclasses = features.nclasses;
+		tr_features[i].mean = features.mean;
+		tr_features[i].sd = features.sd;
+		tr_features[i].f_normalize = features.f_normalize;
+		tr_features[i].f_standardize = features.f_standardize;
+		tr_features[i].f_mean = features.f_mean;
+		tr_features[i].f_variance = features.f_variance;
+		tr_features[i].f_pca = features.f_pca;
+		tr_features[i].pca_class = features.pca_class;
+		tr_features[i].pca = features.pca;
+		tr_features[i].training = features.training;
+		tr_features[i].npc = features.npc;
+		tr_features[i].training.file = "generated by i.pr_subsets";
+
+		sprintf(fileout, "%s__ts_%dcv__%d", opt1->answer, n_sets,
+			i + 1);
+		write_features(fileout, &tr_features[i]);
+
+	    }
+	    G_free(random_labels);
+	    return 0;
+	}
+    }
+
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/conv.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/conv.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/conv.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,54 @@
+#include "globals.h"
+int view_to_col(View * view, int x)
+{
+    return x - view->cell.left;
+}
+
+int view_to_row(View * view, int y)
+{
+    return y - view->cell.top;
+}
+
+int col_to_view(View * view, int col)
+{
+    return view->cell.left + col;
+}
+
+int row_to_view(View * view, int row)
+{
+    return view->cell.top + row;
+}
+
+void row_to_northing(struct Cell_head *cellhd, int row, double location,
+		     double *north)
+{
+    *north = cellhd->north - (row + location) * cellhd->ns_res;
+}
+
+void col_to_easting(struct Cell_head *cellhd, int col, double location,
+		    double *east)
+{
+    *east = cellhd->west + (col + location) * cellhd->ew_res;
+}
+
+
+void northing_to_row(struct Cell_head *cellhd, double north, int *row)
+{
+    *row = (cellhd->north - north) / cellhd->ns_res;
+}
+
+void easting_to_col(struct Cell_head *cellhd, double east, int *col)
+{
+    *col = (east - cellhd->west) / cellhd->ew_res;
+}
+
+void from_screen_to_geo(View * view, int x, int y, double *east,
+			double *north)
+{
+    int row, col;
+
+    col = view_to_col(view, x);
+    col_to_easting(&view->cell.head, col, 0.5, east);
+    row = view_to_row(view, y);
+    row_to_northing(&view->cell.head, row, 0.5, north);
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/graphics.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/graphics.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/graphics.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,317 @@
+#include "globals.h"
+#include <grass/raster.h>
+#include <grass/display.h>
+#include "loc_func.h"
+
+static View *makeview(double bottom, double top, double left, double right)
+{
+    View *view;
+
+    view = (View *) G_malloc(sizeof(View));
+
+    top = 100 - top;
+    bottom = 100 - bottom;
+
+    view->top = SCREEN_TOP + (SCREEN_BOTTOM - SCREEN_TOP) * top / 100.0;
+    view->bottom = SCREEN_TOP + (SCREEN_BOTTOM - SCREEN_TOP) * bottom / 100.0;
+    view->left = SCREEN_LEFT + (SCREEN_RIGHT - SCREEN_LEFT) * left / 100.0;
+    view->right = SCREEN_LEFT + (SCREEN_RIGHT - SCREEN_LEFT) * right / 100.0;
+
+    if (view->top < SCREEN_TOP)
+	view->top = SCREEN_TOP;
+    if (view->bottom > SCREEN_BOTTOM)
+	view->bottom = SCREEN_BOTTOM;
+    if (view->left < SCREEN_LEFT)
+	view->left = SCREEN_LEFT;
+    if (view->right > SCREEN_RIGHT)
+	view->right = SCREEN_RIGHT;
+
+    Outline_box(view->top, view->bottom, view->left, view->right);
+
+    view->top++;
+    view->bottom--;
+    view->left++;
+    view->right--;
+
+    view->nrows = view->bottom - view->top + 1;
+    view->ncols = view->right - view->left + 1;
+    view->cell.configured = 0;
+
+    return view;
+}
+
+
+void Init_graphics2()
+{
+    /*
+       R_color_table_fixed();
+     */
+    /*    R_color_offset (0);
+
+       Dscreen();
+     */
+
+    SCREEN_TOP = R_screen_top();
+    SCREEN_BOTTOM = R_screen_bot();
+    SCREEN_LEFT = R_screen_left();
+    SCREEN_RIGHT = R_screen_rite();
+
+
+    BLACK = D_translate_color("black");
+    BLUE = D_translate_color("blue");
+    BROWN = D_translate_color("brown");
+    GREEN = D_translate_color("green");
+    GREY = D_translate_color("grey");
+    ORANGE = D_translate_color("orange");
+    PURPLE = D_translate_color("purple");
+    RED = D_translate_color("red");
+    WHITE = D_translate_color("white");
+    YELLOW = D_translate_color("yellow");
+
+    R_standard_color(WHITE);
+
+    VIEW_TITLE1 = makeview(97.5, 100.0, 0.0, 100.0);
+    VIEW_TITLE_IMAGE = makeview(97.5, 100.0, 50.0, 100.0);
+    VIEW_MAP1 = makeview(0.0, 97.5, 0.0, 100.0);
+    VIEW_IMAGE = makeview(51.0, 97.5, 50.0, 100.0);
+    VIEW_TITLE1_ZOOM = makeview(47.5, 51.0, 0.0, 50.0);
+    VIEW_EMPTY = makeview(47.5, 51.0, 50.0, 100.0);
+    VIEW_MAP1_ZOOM = makeview(2.5, 47.5, 0.0, 50.0);
+    VIEW_EXIT = makeview(2.5, 5.0, 90.0, 100.0);
+    VIEW_INFO = makeview(7.5, 45.0, 52.5, 97.5);
+    VIEW_MENU = makeview(0.0, 2.5, 0.0, 100.0);
+
+    G_init_colors(&VIEW_MAP1->cell.colors);
+    G_init_colors(&VIEW_IMAGE->cell.colors);
+}
+
+
+void Init_graphics()
+{
+    /*
+       R_color_table_fixed();
+     */
+    /*    R_color_offset (0);
+
+       Dscreen();
+     */
+
+    SCREEN_TOP = R_screen_top();
+    SCREEN_BOTTOM = R_screen_bot();
+    SCREEN_LEFT = R_screen_left();
+    SCREEN_RIGHT = R_screen_rite();
+
+
+    BLACK = D_translate_color("black");
+    BLUE = D_translate_color("blue");
+    BROWN = D_translate_color("brown");
+    GREEN = D_translate_color("green");
+    GREY = D_translate_color("grey");
+    ORANGE = D_translate_color("orange");
+    PURPLE = D_translate_color("purple");
+    RED = D_translate_color("red");
+    WHITE = D_translate_color("white");
+    YELLOW = D_translate_color("yellow");
+
+    R_standard_color(WHITE);
+
+    VIEW_TITLE1 = makeview(97.5, 100.0, 0.0, 50.0);
+    VIEW_TITLE_IMAGE = makeview(97.5, 100.0, 50.0, 100.0);
+    VIEW_MAP1 = makeview(51.0, 97.5, 0.0, 50.0);
+    VIEW_IMAGE = makeview(51.0, 97.5, 50.0, 100.0);
+    VIEW_TITLE1_ZOOM = makeview(47.5, 51.0, 0.0, 50.0);
+    VIEW_EMPTY = makeview(47.5, 51.0, 50.0, 100.0);
+    VIEW_MAP1_ZOOM = makeview(2.5, 47.5, 0.0, 50.0);
+    VIEW_EXIT = makeview(2.5, 5.0, 90.0, 100.0);
+    VIEW_INFO = makeview(7.5, 45.0, 52.5, 97.5);
+    VIEW_MENU = makeview(0.0, 2.5, 0.0, 100.0);
+
+    G_init_colors(&VIEW_MAP1->cell.colors);
+    G_init_colors(&VIEW_IMAGE->cell.colors);
+}
+
+void Outline_box(top, bottom, left, right)
+{
+    R_move_abs(left, top);
+    R_cont_abs(left, bottom);
+    R_cont_abs(right, bottom);
+    R_cont_abs(right, top);
+    R_cont_abs(left, top);
+}
+
+
+int Text_width(text)
+     char *text;
+{
+    int top, bottom, left, right;
+
+    R_get_text_box(text, &top, &bottom, &left, &right);
+
+    if (right > left)
+	return right - left + 1;
+    else
+	return left - right + 1;
+}
+
+void Text(text, top, bottom, left, right, edge)
+     char *text;
+{
+    R_set_window(top, bottom, left, right);
+    R_move_abs(left + edge, bottom - edge);
+    R_text(text);
+    R_set_window(SCREEN_TOP, SCREEN_BOTTOM, SCREEN_LEFT, SCREEN_RIGHT);
+}
+
+void Uparrow(top, bottom, left, right)
+{
+    R_move_abs((left + right) / 2, bottom);
+    R_cont_abs((left + right) / 2, top);
+    R_cont_rel((left - right) / 2, (bottom - top) / 2);
+    R_move_abs((left + right) / 2, top);
+    R_cont_rel((right - left) / 2, (bottom - top) / 2);
+}
+
+void Downarrow(top, bottom, left, right)
+{
+    R_move_abs((left + right) / 2, top);
+    R_cont_abs((left + right) / 2, bottom);
+    R_cont_rel((left - right) / 2, (top - bottom) / 2);
+    R_move_abs((left + right) / 2, bottom);
+    R_cont_rel((right - left) / 2, (top - bottom) / 2);
+}
+
+void display_map(cellhd, view, name, mapset)
+     struct Cell_head *cellhd;
+     View *view;
+     char *name;
+     char *mapset;
+{
+
+    G_adjust_window_to_box(cellhd, &view->cell.head, view->nrows,
+			   view->ncols);
+    Configure_view(view, name, mapset, cellhd->ns_res, cellhd->ew_res);
+    drawcell(view);
+}
+
+
+void drawcell(view)
+     View *view;
+{
+    int fd;
+    int left, top;
+    int ncols, nrows;
+    int row;
+    CELL *cell;
+    int repeat;
+    struct Colors *colors;
+    int read_colors;
+    char msg[100];
+
+
+    if (!view->cell.configured)
+	return 0;
+    if (view == VIEW_MAP1 || view == VIEW_MAP1_ZOOM) {
+	colors = &VIEW_MAP1->cell.colors;
+	read_colors = view == VIEW_MAP1;
+    }
+    else {
+	colors = &VIEW_IMAGE->cell.colors;
+	read_colors = view == VIEW_IMAGE;
+    }
+    if (read_colors) {
+	G_free_colors(colors);
+	if (G_read_colors(view->cell.name, view->cell.mapset, colors) < 0)
+	    return 0;
+    }
+
+    display_title(view);
+
+    /*    D_set_colors (colors); */
+
+    G_set_window(&view->cell.head);
+    nrows = G_window_rows();
+    ncols = G_window_cols();
+
+    left = view->cell.left;
+    top = view->cell.top;
+
+    Outline_box(top, top + nrows - 1, left, left + ncols - 1);
+
+    {
+	char *getenv();
+
+	if (getenv("NO_DRAW"))
+	    return 1;
+    }
+
+    fd = G_open_cell_old(view->cell.name, view->cell.mapset);
+    if (fd < 0)
+	return 0;
+    cell = G_allocate_cell_buf();
+
+    /*
+       sprintf (msg, "Plotting %s ...", view->cell.name);
+       Menu_msg(msg);
+     */
+
+    for (row = 0; row < nrows; row += repeat) {
+	R_move_abs(left, top + row);
+	if (G_get_map_row_nomask(fd, cell, row) < 0)
+	    break;
+	repeat = G_row_repeat_nomask(fd, row);
+	/*      D_raster (cell, ncols, repeat, colors); */
+    }
+    G_close_cell(fd);
+    G_free(cell);
+    /*    if(colors != &VIEW_MAP1->cell.colors)
+       D_set_colors(&VIEW_MAP1->cell.colors);
+     */
+    return row == nrows;
+}
+
+void exit_button()
+{
+    int size;
+
+    Erase_view(VIEW_EXIT);
+    R_standard_color(RED);
+    size = VIEW_EXIT->nrows - 4;
+    R_text_size(size, size);
+    Text("exit", VIEW_EXIT->top, VIEW_EXIT->bottom,
+	 VIEW_EXIT->left, VIEW_EXIT->right, 2);
+    R_standard_color(WHITE);
+}
+
+void info_button()
+{
+    int size;
+
+    Erase_view(VIEW_INFO);
+    R_standard_color(GREEN);
+    size = VIEW_INFO->nrows / 13;
+    R_text_size(size, size);
+    Text("UPPER LEFT PANEL:", VIEW_INFO->top, VIEW_INFO->top + size,
+	 VIEW_INFO->left, VIEW_INFO->right, 1);
+    R_standard_color(YELLOW);
+    Text("left: mark 1", VIEW_INFO->top + size, VIEW_INFO->top + 2 * size,
+	 VIEW_INFO->left, VIEW_INFO->right, 1);
+    Text("left: mark 2", VIEW_INFO->top + 2 * size,
+	 VIEW_INFO->top + 3 * size, VIEW_INFO->left, VIEW_INFO->right, 1);
+    Text("", VIEW_INFO->top + 4 * size,
+	 VIEW_INFO->top + 5 * size, VIEW_INFO->left, VIEW_INFO->right, 1);
+    R_standard_color(GREEN);
+    Text("LOWER LEFT PANEL:", VIEW_INFO->top + 5 * size,
+	 VIEW_INFO->top + 6 * size, VIEW_INFO->left, VIEW_INFO->right, 1);
+    R_standard_color(YELLOW);
+    Text("left(double): select", VIEW_INFO->top + 6 * size,
+	 VIEW_INFO->top + 7 * size, VIEW_INFO->left, VIEW_INFO->right, 1);
+    Text("", VIEW_INFO->top + 8 * size,
+	 VIEW_INFO->top + 9 * size, VIEW_INFO->left, VIEW_INFO->right, 1);
+    R_standard_color(GREEN);
+    Text("UPPER RIGHT PANEL:", VIEW_INFO->top + 9 * size,
+	 VIEW_INFO->top + 10 * size, VIEW_INFO->left, VIEW_INFO->right, 1);
+    R_standard_color(YELLOW);
+    Text("right(double): save", VIEW_INFO->top + 10 * size,
+	 VIEW_INFO->top + 11 * size, VIEW_INFO->left, VIEW_INFO->right, 1);
+    R_standard_color(WHITE);
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,365 @@
+#define GLOBAL
+
+#include <stdlib.h>
+#include <grass/gis.h>
+#include <grass/raster.h>
+#include <grass/glocale.h>
+#include "global.h"
+#include "globals.h"
+#include "func.h"
+#include "loc_func.h"
+
+int main(int argc, char **argv)
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    struct Option *opt3;
+    struct Option *opt4;
+    struct Option *opt5;
+    struct Option *opt6;
+    struct Option *opt7;
+    struct Cell_head cellhd, zoomed_cellhd, map_cellhd;
+    char *mapset[TRAINING_MAX_LAYERS];
+    char *name[TRAINING_MAX_LAYERS];
+    int nmaps;
+    char buf[256];
+    int window_rows, window_cols;
+    FILE *fp;
+    int num_class;
+    int i, j;
+    Training training;
+    int X1, X2, Y1, Y2;
+    int x_screen1, y_screen1, button1;
+    int x_screen2, y_screen2, button2;
+    int other = TRUE;
+    double east, north, west, south;
+    double tempeast1, tempnorth1;
+    double tempeast2, tempnorth2;
+    char out_map[100];
+    int orig_nexamples;
+    char opt1desc[500];
+    char *vis_map;
+    char *vis_mapset;
+
+    char gisrc[500];
+
+    /* Initialize the GIS calls */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module to generate the training samples for use in i.pr.* modules. "
+	 "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	 "Decision Tree and SVM classification techniques. Also includes "
+	 "cross-validation and bagging methods for model validation.");
+
+    sprintf(opt1desc,
+	    "Input raster maps (max %d) for extracting the training examples.\n\t\tThe first one will be used for graphical output in case vis_map option not set",
+	    TRAINING_MAX_LAYERS);
+    /* set up command line */
+    opt1 = G_define_option();
+    opt1->key = "map";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->gisprompt = "old,cell,raster";
+    opt1->description = opt1desc;
+    opt1->multiple = YES;
+
+    opt7 = G_define_option();
+    opt7->key = "vis_map";
+    opt7->type = TYPE_STRING;
+    opt7->required = NO;
+    opt7->gisprompt = "old,cell,raster";
+    opt7->description = "Raster map for visualization.";
+
+    opt4 = G_define_option();
+    opt4->key = "training";
+    opt4->type = TYPE_STRING;
+    opt4->required = YES;
+    opt4->description =
+	"Name of the output file containing the training raster maps.\n\t\tIf this file already exists, the new data will be appended\n\t\tto the end of the file.";
+
+    opt6 = G_define_option();
+    opt6->key = "vector";
+    opt6->type = TYPE_STRING;
+    opt6->required = NO;
+    opt6->description =
+	"Name of the vector points map containing labelled location.\n\t\tSubstitutes the interactive procedure of point selection.";
+
+    opt2 = G_define_option();
+    opt2->key = "rows";
+    opt2->type = TYPE_INTEGER;
+    opt2->required = YES;
+    opt2->description =
+	"Number of rows (required odd) of the training samples.";
+
+    opt3 = G_define_option();
+    opt3->key = "cols";
+    opt3->type = TYPE_INTEGER;
+    opt3->required = YES;
+    opt3->description =
+	"Number of columns (required odd) of the training samples.";
+
+    opt5 = G_define_option();
+    opt5->key = "class";
+    opt5->type = TYPE_INTEGER;
+    opt5->required = NO;
+    opt5->description =
+	"Numerical label to be attached to the training examples.\n\t\tOption not required with the vector option.";
+
+    if (G_parser(argc, argv))
+	exit(EXIT_FAILURE);
+
+    /* informations from command line */
+    nmaps = 0;
+    for (i = 0; name[nmaps] = opt1->answers[i]; i++) {
+	mapset[i] = G_find_cell2(name[i], "");
+	if (mapset[i] == NULL) {
+	    sprintf(buf, "Can't find raster map <%s>", name[i]);
+	    G_fatal_error(buf);
+	}
+	nmaps += 1;
+	if (nmaps > TRAINING_MAX_LAYERS) {
+	    sprintf(buf, "Too many raster maps\nMaximum number allowed = %d",
+		    TRAINING_MAX_LAYERS);
+	    G_fatal_error(buf);
+	}
+    }
+
+    if (opt7->answer) {
+	vis_map = opt7->answer;
+	vis_mapset = G_find_cell2(vis_map, "");
+	if (vis_mapset == NULL) {
+	    sprintf(buf, "Can't find raster map <%s>", vis_map);
+	    G_fatal_error(buf);
+	}
+    }
+    else {
+	vis_map = name[0];
+	vis_mapset = mapset[0];
+    }
+
+    if (!opt6->answer && !opt5->answer) {
+	sprintf(buf, "Please select a class for the examples\n");
+	G_fatal_error(buf);
+    }
+    if (!opt6->answer) {
+	sscanf(opt5->answer, "%d", &num_class);
+    }
+    if (opt6->answer && opt5->answer) {
+	sprintf(buf,
+		"Option class ignored\nLabels will be directlly read from site file\n");
+	G_warning(buf);
+    }
+    sscanf(opt2->answer, "%d", &window_rows);
+    sscanf(opt3->answer, "%d", &window_cols);
+    if (window_rows % 2 == 0 || window_cols % 2 == 0) {
+	sprintf(buf, "Number of rows and columns must be odd\n");
+	G_fatal_error(buf);
+    }
+
+    /*open output file and read/initialize training */
+    inizialize_training(&training);
+    if (fopen(opt4->answer, "r") == NULL) {
+	if ((fp = fopen(opt4->answer, "w")) == NULL) {
+	    sprintf(buf, "Can't open file %s for writing\n", opt4->answer);
+	    G_fatal_error(buf);
+	}
+	fprintf(fp, "Data type:\n");
+	fprintf(fp, "GrassTraining\n");
+	fprintf(fp, "Number of layers:\n");
+	fprintf(fp, "%d\n", nmaps);
+	fprintf(fp, "Label:\n");
+	fprintf(fp, "%s\n", opt4->answer);
+	fprintf(fp, "Data:\n");
+	for (i = 0; i < nmaps; i++) {
+	    fprintf(fp, "Layer_%d\t", i + 1);
+	}
+	fprintf(fp, "Class\tEast\tNorth\tRows\tCols\tEW-res\tNS-res\n");
+    }
+    else {
+	if ((fp = fopen(opt4->answer, "a")) == NULL) {
+	    sprintf(buf, "Can't open file %s for appending\n", opt4->answer);
+	    G_fatal_error(buf);
+	}
+	read_training(opt4->answer, &training);
+    }
+
+    if (!opt6->answer) {
+	/* must have a graphics terminal selected */
+	if (R_open_driver() != 0)
+	    G_fatal_error(_("No graphics device selected."));
+
+	/*inizialize monitor */
+	Init_graphics();
+	exit_button();
+	info_button();
+
+	/*get current region */
+	G_get_window(&cellhd);
+
+	/*plot map */
+	display_map(&cellhd, VIEW_MAP1, vis_map, vis_mapset);
+	R_standard_color(RED);
+	for (i = 0; i < training.nexamples; i++) {
+	    display_one_point(VIEW_MAP1, training.east[i], training.north[i]);
+	}
+	R_flush();
+
+	X1 = X2 = Y1 = Y2 = 0;
+	while (other == TRUE) {
+	    Mouse_pointer(&x_screen1, &y_screen1, &button1);
+	    if (In_view(VIEW_MAP1, x_screen1, y_screen1) && button1 == 1) {
+		R_standard_color(GREEN);
+		point(x_screen1, y_screen1);
+		R_flush();
+	    }
+	    if (In_view(VIEW_EXIT, x_screen1, y_screen1)) {
+		R_close_driver();
+		fclose(fp);
+		return 0;
+	    }
+	    Mouse_pointer(&x_screen2, &y_screen2, &button2);
+	    if (In_view(VIEW_EXIT, x_screen2, y_screen2)) {
+		R_close_driver();
+		fclose(fp);
+		return 0;
+	    }
+	    if (In_view(VIEW_MAP1, x_screen1, y_screen1) &&
+		In_view(VIEW_MAP1, x_screen2, y_screen2) &&
+		button1 == 1 && button2 == 1) {
+		R_standard_color(GREEN);
+		rectangle(x_screen1, y_screen1, x_screen2, y_screen2);
+		R_standard_color(GREY);
+		point(X1, Y1);
+		rectangle(X1, Y1, X2, Y2);
+		R_flush();
+		X1 = x_screen1;
+		X2 = x_screen2;
+		Y1 = y_screen1;
+		Y2 = y_screen2;
+
+		from_screen_to_geo(VIEW_MAP1, x_screen1, y_screen1,
+				   &tempeast1, &tempnorth1);
+		from_screen_to_geo(VIEW_MAP1, x_screen2, y_screen2,
+				   &tempeast2, &tempnorth2);
+		if (tempeast1 > tempeast2) {
+		    east = tempeast1;
+		    west = tempeast2;
+		}
+		else {
+		    east = tempeast2;
+		    west = tempeast1;
+		}
+		if (tempnorth1 > tempnorth2) {
+		    north = tempnorth1;
+		    south = tempnorth2;
+		}
+		else {
+		    north = tempnorth2;
+		    south = tempnorth1;
+		}
+		compute_temp_region(&zoomed_cellhd, &cellhd, east, west,
+				    north, south);
+		Erase_view(VIEW_MAP1_ZOOM);
+		display_map(&zoomed_cellhd, VIEW_MAP1_ZOOM, vis_map,
+			    vis_mapset);
+		R_standard_color(RED);
+		for (i = 0; i < training.nexamples; i++) {
+		    display_one_point(VIEW_MAP1_ZOOM, training.east[i],
+				      training.north[i]);
+		}
+		R_flush();
+	    }
+	    if (In_view(VIEW_MAP1_ZOOM, x_screen1, y_screen1) &&
+		In_view(VIEW_MAP1_ZOOM, x_screen2, y_screen2) &&
+		button1 == 1 && button2 == 1) {
+		if (VIEW_MAP1_ZOOM->cell.configured) {
+		    from_screen_to_geo(VIEW_MAP1_ZOOM, x_screen2, y_screen2,
+				       &east, &north);
+		    compute_temp_region2(&map_cellhd, &zoomed_cellhd, east,
+					 north, window_rows, window_cols);
+		    R_standard_color(BLUE);
+		    display_one_point(VIEW_MAP1, east, north);
+		    display_one_point(VIEW_MAP1_ZOOM, east, north);
+		    display_map(&map_cellhd, VIEW_IMAGE, vis_map, vis_mapset);
+		    R_flush();
+		}
+	    }
+
+	    if (In_view(VIEW_IMAGE, x_screen1, y_screen1) &&
+		In_view(VIEW_IMAGE, x_screen2, y_screen2)) {
+		if (button1 == 3 && button2 == 3) {
+		    if (VIEW_IMAGE->cell.configured) {
+			training.nexamples += 1;
+			training.east[training.nexamples] = east;
+			training.north[training.nexamples] = north;
+
+			R_standard_color(RED);
+
+			display_one_point(VIEW_MAP1,
+					  training.east[training.nexamples],
+					  training.north[training.nexamples]);
+			display_one_point(VIEW_MAP1_ZOOM,
+					  training.east[training.nexamples],
+					  training.north[training.nexamples]);
+			R_flush();
+			for (i = 0; i < nmaps; i++) {
+			    sprintf(out_map, "%s_%s.%d", opt4->answer,
+				    name[i], training.nexamples);
+			    write_map(&map_cellhd, name[i], mapset[i],
+				      out_map);
+			    fprintf(fp, "%s\t", out_map);
+			}
+			fprintf(fp, "%d\t%f\t%f\t%d\t%d\t%f\t%f\n", num_class,
+				training.east[training.nexamples],
+				training.north[training.nexamples],
+				window_rows, window_cols, cellhd.ew_res,
+				cellhd.ns_res);
+		    }
+		}
+	    }
+	}
+    }
+    else {
+	G_get_window(&cellhd);
+	if (R_open_driver() != 0)
+	    G_fatal_error(_("No graphics device selected."));
+	Init_graphics2();
+	display_map(&cellhd, VIEW_MAP1, vis_map, vis_mapset);
+
+	R_flush();
+	orig_nexamples = training.nexamples;
+	if (read_points_from_file(&training, opt6->answer) == 0) {
+	    sprintf(buf, "Error readining site file <%s>", opt6->answer);
+	    G_fatal_error(buf);
+	}
+	R_standard_color(BLUE);
+	for (i = 0; i < orig_nexamples; i++) {
+	    display_one_point(VIEW_MAP1, training.east[i], training.north[i]);
+	}
+	R_standard_color(RED);
+	for (i = orig_nexamples; i < training.nexamples; i++) {
+	    display_one_point(VIEW_MAP1, training.east[i], training.north[i]);
+	    R_flush();
+	    compute_temp_region2(&map_cellhd, &cellhd,
+				 training.east[i], training.north[i],
+				 window_rows, window_cols);
+	    for (j = 0; j < nmaps; j++) {
+		sprintf(out_map, "%s_%s.%d", opt4->answer, name[j], i + 1);
+		write_map(&map_cellhd, name[j], mapset[j], out_map);
+		fprintf(fp, "%s\t", out_map);
+	    }
+	    fprintf(fp, "%d\t%f\t%f\t%d\t%d\t%f\t%f\n", training.class[i],
+		    training.east[i],
+		    training.north[i],
+		    window_rows, window_cols, cellhd.ew_res, cellhd.ns_res);
+	}
+	fclose(fp);
+	R_close_driver();
+	return 0;
+    }
+    return 0;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/mouse.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/mouse.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/mouse.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,12 @@
+#include <grass/raster.h>
+#include "globals.h"
+
+
+void Mouse_pointer(int *x, int *y, int *button)
+{
+    static int curx, cury;
+
+    R_get_location_with_pointer(&curx, &cury, button);
+    *x = curx;
+    *y = cury;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/points.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/points.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/points.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,71 @@
+#include <grass/raster.h>
+#include "globals.h"
+#include "loc_func.h"
+
+void display_one_point(View * view, double east, double north)
+{
+    int row, col, x, y;
+
+     /*TODO*/ northing_to_row(&view->cell.head, north, &row);	// + .5;
+    easting_to_col(&view->cell.head, east, &col);	// + .5;
+    y = row_to_view(view, row);
+    x = col_to_view(view, col);
+    dot(x, y);
+}
+
+void dot(x, y)
+{
+    int vx[5], vy[5];
+
+    vx[0] = x;
+    vy[0] = y - dotsize;
+    vx[1] = x - dotsize;
+    vy[1] = y;
+    vx[2] = x;
+    vy[2] = y + dotsize;
+    vx[3] = x + dotsize;
+    vy[3] = y;
+    vx[4] = x;
+    vy[4] = y - dotsize;
+
+    R_polygon_abs(vx, vy, 5);
+}
+
+int point_in_view(view, east, north)
+     View *view;
+     double north, east;
+{
+    if ((north <= view->cell.head.north) &&
+	(north >= view->cell.head.south) &&
+	(east <= view->cell.head.east) && (east >= view->cell.head.west))
+	return 1;
+    else
+	return 0;
+}
+
+void rectangle(x_screen1, y_screen1, x_screen2, y_screen2)
+{
+    R_move_abs(x_screen1, y_screen1);
+    R_cont_abs(x_screen1, y_screen2);
+    R_cont_abs(x_screen2, y_screen2);
+    R_cont_abs(x_screen2, y_screen1);
+    R_cont_abs(x_screen1, y_screen1);
+}
+
+void point(x, y)
+{
+    int vx[5], vy[5];
+
+    vx[0] = x;
+    vy[0] = y - 2;
+    vx[1] = x - 2;
+    vy[1] = y;
+    vx[2] = x;
+    vy[2] = y + 2;
+    vx[3] = x + 2;
+    vy[3] = y;
+    vx[4] = x;
+    vy[4] = y - 2;
+
+    R_polygon_abs(vx, vy, 5);
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/sites.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/sites.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/sites.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,50 @@
+#include <grass/gis.h>
+#include <grass/site.h>
+#include "global.h"
+#include "globals.h"
+
+
+int read_points_from_file(Training * training, char *site_file)
+{
+    char msg[256];
+    char *mapset;
+    FILE *out;
+    Site *site;
+    int dims = 0, cat = 0, strs = 0, dbls = 0;
+    int code;
+
+    mapset = G_find_sites(site_file, "");
+    if (mapset == NULL) {
+	sprintf(msg,
+		"read_points_from_file-> Can't find vector points map <%s>",
+		site_file);
+	G_fatal_error(msg);
+    }
+    out = G_fopen_sites_old(site_file, mapset);
+    if (out == NULL) {
+	sprintf(msg,
+		"read_points_from_file-> Can't open vector points map <%s>",
+		site_file);
+	G_fatal_error(msg);
+    }
+    if (G_site_describe(out, &dims, &cat, &strs, &dbls) != 0) {
+	sprintf(msg, "read_points_from_file-> Error in G_site_describe");
+	G_warning(msg);
+	return 0;
+    }
+    site = (Site *) G_calloc(1, sizeof(Site));
+    site = G_site_new_struct(0, dims, strs, dbls);
+    while ((code = G_site_get(out, site)) > -1) {
+	training->east[training->nexamples] = site->east;
+	training->north[training->nexamples] = site->north;
+	training->class[training->nexamples] = site->ccat;
+	training->nexamples += 1;
+    }
+    G_sites_close(out);
+    if (code != -1) {
+	sprintf(msg, "read_points_from_file-> Error in G_site_get");
+	G_warning(msg);
+	return 0;
+    }
+    return 1;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/title.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/title.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/title.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,40 @@
+#include <grass/raster.h>
+#include "globals.h"
+#include "loc_func.h"
+
+void display_title(View * view)
+{
+    View *title;
+    char center[100];
+    int size;
+    double magnification();
+
+    *center = 0;
+
+    if (view->cell.configured) {
+	sprintf(center, "(mag %.1lf)", magnification(view));
+    }
+
+    if (view == VIEW_MAP1) {
+	title = VIEW_TITLE1;
+    }
+    else if (view == VIEW_MAP1_ZOOM) {
+	title = VIEW_TITLE1_ZOOM;
+    }
+
+    if (view == VIEW_IMAGE) {
+	title = VIEW_TITLE_IMAGE;
+    }
+
+    Erase_view(title);
+    size = title->nrows - 4;
+    R_text_size(size, size);
+    if (*center) {
+	R_standard_color(YELLOW);
+	Text(center, title->top, title->bottom,
+	     (title->left + title->right - Text_width(center)) / 2,
+	     title->right, 2);
+    }
+    R_standard_color(WHITE);
+
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/view.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/view.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/view.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,50 @@
+#include <string.h>
+#include <grass/raster.h>
+#include "globals.h"
+#include "loc_func.h"
+
+void Configure_view(View * view, char *name, char *mapset, double ns_res,
+		    double ew_res)
+
+
+
+	/* original map resolution */
+{
+    Erase_view(view);
+    view->cell.configured = 0;
+
+    /* copy the cell name into the view */
+    strcpy(view->cell.name, name);
+    strcpy(view->cell.mapset, mapset);
+
+    /* determine the map edges */
+    view->cell.left = view->left + (view->ncols - view->cell.head.cols) / 2;
+    view->cell.right = view->cell.left + view->cell.head.cols - 1;
+    view->cell.top = view->top + (view->nrows - view->cell.head.rows) / 2;
+    view->cell.bottom = view->cell.top + view->cell.head.rows - 1;
+
+    /* remember original resolutions */
+    view->cell.ns_res = ns_res;
+    view->cell.ew_res = ew_res;
+
+    view->cell.configured = 1;
+}
+
+int In_view(View * view, int x, int y)
+{
+    return (x >= view->left && x <= view->right && y >= view->top &&
+	    y <= view->bottom);
+}
+
+void Erase_view(View * view)
+{
+    R_standard_color(BLACK);
+    R_box_abs(view->left, view->top, view->right, view->bottom);
+}
+
+double magnification(View * view)
+{
+    if (!view->cell.configured)
+	return ((double)0.0);
+    return (view->cell.ew_res / view->cell.head.ew_res);
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/write_map.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/write_map.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/write_map.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,68 @@
+#include <stdio.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include "globals.h"
+#include <grass/glocale.h>
+
+void write_map(struct Cell_head *cellhd, char *name, char *mapset, char *dest)
+{
+    int fd_to, fd_from, row, nrows, ncols;
+    CELL *buf;
+    char command[500];
+    char *lp, *lm;
+    char *coldir;
+    struct stat statdir;
+
+    G_set_window(cellhd);
+
+    fd_from = G_open_cell_old(name, mapset);
+    if (fd_from < 0)
+	G_fatal_error(_("Error reading raster map <%s> in mapset <%s>"),
+		      name, mapset);
+
+
+    fd_to = G_open_cell_new(dest);
+    if (fd_to < 0)
+	G_fatal_error(_("Error writing raster map <%s> in mapset <%s>"),
+		      dest, G_mapset());
+
+    buf = G_allocate_raster_buf(CELL_TYPE);
+
+    ncols = G_window_cols();
+    nrows = G_window_rows();
+
+    for (row = 0; row < nrows; row++) {
+	G_get_raster_row(fd_from, buf, row, CELL_TYPE);
+	G_put_raster_row(fd_to, buf, CELL_TYPE);
+    }
+
+    /* memory cleanup */
+    G_free(buf);
+    G_close_cell(fd_to);
+    G_close_cell(fd_from);
+
+
+    if ((mapset = G_find_file("colr", name, mapset)) != NULL) {
+
+	lp = G_location_path();
+	lm = G_mapset();
+
+	coldir = G_calloc(500, sizeof(char));
+	sprintf(coldir, "%s/%s/colr", lp, lm);
+	if (stat(coldir, &statdir) == -1) {
+	    mkdir(coldir, 0755);
+	}
+	else {
+	    if (!S_ISDIR(statdir.st_mode)) {
+		G_fatal_error("coldir is not a dir");
+	    }
+	}
+
+	sprintf(command, "cp %s/%s/colr/%s %s/%s/colr/%s", lp,
+		mapset, name, lp, lm, dest);
+	sprintf(command, "cp %s/%s/colr/%s %s/%s/colr/%s", lp,
+		mapset, name, lp, lm, dest);
+	system(command);
+    }
+
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/zoom.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/zoom.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/zoom.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,20 @@
+#include "globals.h"
+
+void compute_temp_region(struct Cell_head *temp_region,
+			 struct Cell_head *region,
+			 double east, double west, double north, double south)
+{
+    *temp_region = *region;
+    temp_region->north = north;
+    temp_region->south = south;
+    temp_region->east = east;
+    temp_region->west = west;
+    temp_region->cols = (int)(temp_region->east - temp_region->west) /
+	region->ew_res;
+    temp_region->rows = (int)(temp_region->north - temp_region->south) /
+	region->ns_res;
+    temp_region->ns_res = region->ns_res;
+    temp_region->ew_res = region->ew_res;
+
+
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_training/zoom2.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_training/zoom2.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_training/zoom2.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,17 @@
+#include "globals.h"
+
+void compute_temp_region2(struct Cell_head *temp_region,
+			  struct Cell_head *region,
+			  double east,
+			  double north, int window_rows, int window_cols)
+{
+    *temp_region = *region;
+    temp_region->north =
+	north + (double)window_rows / 2 * temp_region->ns_res;
+    temp_region->south =
+	north - (double)window_rows / 2 * temp_region->ns_res;
+    temp_region->east = east + (double)window_cols / 2 * temp_region->ew_res;
+    temp_region->west = east - (double)window_cols / 2 * temp_region->ew_res;
+    temp_region->rows = window_rows;
+    temp_region->cols = window_cols;
+}

Modified: grass-addons/grass7/imagery/i.pr/i.pr_uxb/main.c
===================================================================
--- grass-addons/grass7/imagery/i.pr/i.pr_uxb/main.c	2014-12-02 20:39:07 UTC (rev 63336)
+++ grass-addons/grass7/imagery/i.pr/i.pr_uxb/main.c	2014-12-02 21:11:56 UTC (rev 63337)
@@ -0,0 +1,511 @@
+
+/****************************************************************
+ *
+ * MODULE:     i.pr
+ *
+ * AUTHOR(S):  Stefano Merler, ITC-irst
+ *             Updated to ANSI C by G. Antoniol <giulio.antoniol at gmail.com>
+ * 
+ * PURPOSE:    i.pr - Pattern Recognition
+ *
+ * COPYRIGHT:  (C) 2007 by the GRASS Development Team
+ *
+ *             This program is free software under the
+ *             GNU General Public License (>=v2).
+ *             Read the file COPYING that comes with GRASS
+ *             for details.
+ *
+ ****************************************************************/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include "global.h"
+
+int extract_array_with_null();
+
+int main(int argc, char *argv[])
+{
+    struct GModule *module;
+    struct Option *opt1;
+    struct Option *opt2;
+    int model_type;
+    NearestNeighbor nn;
+    GaussianMixture gm;
+    Tree tree;
+    SupportVectorMachine svm;
+    BTree btree;
+    BSupportVectorMachine bsvm;
+    char tmpbuf[500];
+    char *mapset;
+    struct Cell_head cellhd;
+    double ***matrix;
+    DCELL *rowbuf;
+    DCELL *tf;
+    int *fd;
+    int r, c, l;
+    Features features;
+    int i, j;
+    int *space_for_each_layer;
+    double *wind_vect;
+    double *X;
+    int borderC, borderR, borderC_upper, dim;
+    double mean, sd;
+    int corrent_feature;
+    double *projected;
+    int *compute_features;
+    double **output_cell;
+    int set_null;
+    int n_input_map;
+    int R, C;
+    int last_row;
+
+    /* Define the different options */
+
+    opt1 = G_define_option();
+    opt1->key = "input_map";
+    opt1->type = TYPE_STRING;
+    opt1->required = YES;
+    opt1->multiple = YES;
+    opt1->gisprompt = "old,cell,raster";
+    opt1->description =
+	"Input raster maps to be classified.\n\t\tIt is required a number of maps at least equal to the number of maps\n\t\tused for the training. If this number is greater the last maps will be ignored.\n\t\tWARNING: the order in which the maps are given should be compared \n\t\twith that used for the training.";
+
+    opt2 = G_define_option();
+    opt2->key = "model";
+    opt2->type = TYPE_STRING;
+    opt2->required = YES;
+    opt2->description =
+	"Input file containing the model (output of i .pr_model).\n\t\tIf the data used for model development are not GRASS_data the program will abort.";
+
+  /***** Start of main *****/
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("imagery, image processing, pattern recognition");
+    module->description =
+	_("Module for detection of unexploded bombs. "
+	  "i.pr: Pattern Recognition environment for image processing. Includes kNN, "
+	  "Decision Tree and SVM classification techniques. Also includes "
+	  "cross-validation and bagging methods for model validation.");
+
+    if (G_parser(argc, argv) < 0)
+	exit(EXIT_FAILURE);
+
+    /*read the model */
+    model_type = read_model(opt2->answer, &features, &nn, &gm, &tree,
+			    &svm, &btree, &bsvm);
+
+    if (features.training.data_type != GRASS_data) {
+	sprintf(tmpbuf, "Model build using othe than GRASS data\n");
+	G_fatal_error(tmpbuf);
+    }
+    if (model_type == 0) {
+	sprintf(tmpbuf, "Model not recognized\n");
+	G_fatal_error(tmpbuf);
+    }
+
+    if (model_type == GM_model) {
+	compute_test_gm(&gm);
+    }
+
+    /* load current region */
+    G_get_window(&cellhd);
+    if (fabs((cellhd.ew_res - features.training.ew_res) /
+	     features.training.ew_res) > 0.1) {
+	sprintf(tmpbuf,
+		"EW resolution of training data and test map differs more than 10%%\n");
+	G_warning(tmpbuf);
+    }
+    if (fabs((cellhd.ns_res - features.training.ns_res) /
+	     features.training.ns_res) > 0.1) {
+	sprintf(tmpbuf,
+		"NS resolution of training data and test map differs more than 10%%\n");
+	G_warning(tmpbuf);
+    }
+
+    /*compute features space */
+    dim = features.training.rows * features.training.cols;
+
+    space_for_each_layer = (int *)G_calloc(features.training.nlayers,
+					   sizeof(int));
+    compute_features =
+	(int *)G_calloc(features.training.nlayers, sizeof(int));
+    for (j = 0; j < features.training.nlayers; j++) {
+	if (features.f_mean[0]) {
+	    for (i = 2; i < 2 + features.f_mean[1]; i++) {
+		if (features.f_mean[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features.f_variance[0]) {
+	    for (i = 2; i < 2 + features.f_variance[1]; i++) {
+		if (features.f_variance[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += 1;
+		}
+	    }
+	}
+	if (features.f_pca[0]) {
+	    for (i = 2; i < 2 + features.f_pca[1]; i++) {
+		if (features.f_pca[i] == j) {
+		    compute_features[j] = TRUE;
+		    space_for_each_layer[j] += dim;
+		}
+	    }
+	}
+	if (space_for_each_layer[j] == 0) {
+	    space_for_each_layer[j] = dim;
+	}
+    }
+
+    /*alloc memory */
+    matrix =
+	(double ***)G_calloc(features.training.nlayers, sizeof(double **));
+    for (l = 0; l < features.training.nlayers; l++) {
+	matrix[l] =
+	    (double **)G_calloc(features.training.rows, sizeof(double *));
+	for (r = 0; r < features.training.rows; r++) {
+	    matrix[l][r] = (double *)G_calloc(cellhd.cols, sizeof(double));
+	}
+    }
+    fd = (int *)G_calloc(features.training.nlayers, sizeof(int));
+    X = (double *)G_calloc(features.examples_dim, sizeof(double));
+
+    wind_vect = (double *)G_calloc(dim, sizeof(double));
+    projected = (double *)G_calloc(features.npc, sizeof(double));
+
+    output_cell = (double **)G_calloc(cellhd.rows, sizeof(double *));
+    for (r = 0; r < cellhd.rows; r++)
+	output_cell[r] = (double *)G_calloc(cellhd.cols, sizeof(double));
+
+    /*open the input maps */
+    n_input_map = 0;
+    for (l = 0; opt1->answers[l]; l++) {
+	if ((mapset = G_find_cell2(opt1->answers[l], "")) == NULL) {
+	    sprintf(tmpbuf, "raster map [%s] not available",
+		    opt1->answers[l]);
+	    G_fatal_error(tmpbuf);
+	}
+
+	if ((fd[l] = G_open_cell_old(opt1->answers[l], mapset)) < 0) {
+	    sprintf(tmpbuf, "error opening raster map [%s]",
+		    opt1->answers[l]);
+	    G_fatal_error(tmpbuf);
+	}
+	n_input_map += 1;
+    }
+
+    if (n_input_map < features.training.nlayers) {
+	sprintf(tmpbuf, "Model requires %d input maps\n",
+		features.training.nlayers);
+	G_fatal_error(tmpbuf);
+    }
+    if (n_input_map > features.training.nlayers) {
+	sprintf(tmpbuf, "Only first %d maps considered\n",
+		features.training.nlayers);
+	G_warning(tmpbuf);
+    }
+
+    /*useful vars */
+    borderC = (features.training.cols - 1) / 2;
+    borderC_upper = cellhd.cols - borderC;
+    borderR = (features.training.rows - 1) / 2;
+    last_row = features.training.rows - 1;
+
+
+    /*read first rows */
+    for (r = 0; r < features.training.rows; r++) {
+	rowbuf =
+	    (DCELL *) G_calloc(features.training.rows * cellhd.cols,
+			       sizeof(DCELL));
+	tf = rowbuf;
+	for (l = 0; l < features.training.nlayers; l++) {
+	    if (G_get_d_raster_row(fd[l], tf, r) < 0) {
+		sprintf(tmpbuf, "Error reading raster map <%s>\n",
+			opt1->answers[l]);
+		G_fatal_error(tmpbuf);
+	    }
+	    for (c = 0; c < cellhd.cols; c++) {
+		if (G_is_d_null_value(tf))
+		    *tf = 0.0;
+		matrix[l][r][c] = *tf;
+		tf++;
+	    }
+	}
+	G_free(rowbuf);
+    }
+
+    /*computing... */
+    r = features.training.rows;
+
+    while (r < cellhd.rows) {
+	for (c = borderC; c < borderC_upper; c++) {
+	    corrent_feature = 0;
+	    for (l = 0; l < features.training.nlayers; l++) {
+		set_null = extract_array_with_null(features.training.rows,
+						   features.training.cols,
+						   c, borderC, matrix[l],
+						   wind_vect);
+
+		if (set_null) {
+		    break;
+		}
+		else {
+		    mean = mean_of_double_array(wind_vect, dim);
+		    sd = sd_of_double_array_given_mean(wind_vect, dim, mean);
+
+		    if (features.f_normalize[0]) {
+			for (j = 2; j < 2 + features.f_normalize[1]; j++) {
+			    if (features.f_normalize[j] == l) {
+				for (i = 0; i < dim; i++) {
+				    wind_vect[i] = (wind_vect[i] - mean) / sd;
+				}
+				break;
+			    }
+			}
+		    }
+
+		    if (!compute_features[l]) {
+			for (i = 0; i < dim; i++) {
+			    X[corrent_feature + i] = wind_vect[i];
+			}
+			corrent_feature += dim;
+		    }
+		    else {
+			if (features.f_mean[0]) {
+			    for (j = 2; j < 2 + features.f_mean[1]; j++) {
+				if (features.f_mean[j] == l) {
+				    X[corrent_feature] = mean;
+				    corrent_feature += 1;
+				    break;
+				}
+			    }
+			}
+			if (features.f_variance[0]) {
+			    for (j = 2; j < 2 + features.f_variance[1]; j++) {
+				if (features.f_variance[j] == l) {
+				    X[corrent_feature] = sd * sd;
+				    corrent_feature += 1;
+				    break;
+				}
+			    }
+			}
+			if (features.f_pca[0]) {
+			    for (j = 2; j < 2 + features.f_pca[1]; j++) {
+				if (features.f_pca[j] == l) {
+				    product_double_vector_double_matrix
+					(features.pca[l].eigmat, wind_vect,
+					 dim, features.npc, projected);
+
+				    for (i = 0; i < features.npc; i++) {
+					X[corrent_feature + i] = projected[i];
+				    }
+				    corrent_feature += features.npc;
+				    break;
+				}
+			    }
+			}
+		    }
+		}
+	    }
+	    if (set_null) {
+		output_cell[r][c] = 0.0;
+	    }
+	    else {
+		if (features.f_standardize[0]) {
+		    for (i = 2; i < 2 + features.f_standardize[1]; i++) {
+			X[features.f_standardize[i]] =
+			    (X[features.f_standardize[i]] -
+			     features.mean[i - 2]) / features.sd[i - 2];
+		    }
+		}
+		if (features.nclasses == 2) {
+		    switch (model_type) {
+		    case NN_model:
+			output_cell[r - borderR][c] =
+			    predict_nn_2class(&nn, X, nn.k, features.nclasses,
+					      features.p_classes);
+			break;
+		    case GM_model:
+			output_cell[r - borderR][c] =
+			    predict_gm_2class(&gm, X);
+			break;
+		    case CT_model:
+			output_cell[r - borderR][c] =
+			    predict_tree_2class(&tree, X);
+			break;
+		    case SVM_model:
+			output_cell[r - borderR][c] = predict_svm(&svm, X);
+			break;
+		    case BCT_model:
+			output_cell[r - borderR][c] =
+			    predict_btree_2class(&btree, X);
+			break;
+		    case BSVM_model:
+			output_cell[r - borderR][c] = predict_bsvm(&bsvm, X);
+			break;
+		    default:
+			break;
+		    }
+		}
+		else {
+		    switch (model_type) {
+		    case NN_model:
+			output_cell[r - borderR][c] =
+			    predict_nn_multiclass(&nn, X, nn.k,
+						  features.nclasses,
+						  features.p_classes);
+			break;
+		    case GM_model:
+			output_cell[r - borderR][c] =
+			    predict_gm_multiclass(&gm, X);
+			break;
+		    case CT_model:
+			output_cell[r - borderR][c] =
+			    predict_tree_multiclass(&tree, X);
+			break;
+		    case BCT_model:
+			output_cell[r - borderR][c] =
+			    predict_btree_multiclass(&btree, X);
+			break;
+		    default:
+			break;
+		    }
+		}
+	    }
+	}
+	percent(r, cellhd.rows, 1);
+
+	if (r < cellhd.rows) {
+	    for (l = 0; l < features.training.nlayers; l++) {
+		for (R = 0; R < last_row; R++) {
+		    for (C = 0; C < cellhd.cols; C++) {
+			matrix[l][R][C] = matrix[l][R + 1][C];
+		    }
+		}
+
+		rowbuf = (DCELL *) G_calloc(cellhd.cols, sizeof(DCELL));
+		tf = rowbuf;
+
+		if (G_get_d_raster_row(fd[l], tf, r) < 0) {
+		    sprintf(tmpbuf, "Error reading raster map <%s>\n",
+			    opt1->answers[l]);
+		    G_fatal_error(tmpbuf);
+		}
+		for (c = 0; c < cellhd.cols; c++) {
+		    if (G_is_d_null_value(tf))
+			*tf = 0.0;
+		    matrix[l][last_row][c] = *tf;
+		    tf++;
+		}
+		G_free(rowbuf);
+	    }
+	}
+	r += 1;
+    }
+    /*
+       for(r=0;r<cellhd.rows;r++){
+       for(c=0;c<cellhd.cols;c++)
+       fprintf(stderr,"%f\t",output_cell[r][c]);
+       fprintf(stderr,"\n");
+       }
+     */
+    {
+	Blob *blobs1, *blobs2;
+	BlobSites *sites1, *sites2;
+	int nblobs1, npoints1;
+	int nblobs2, npoints2;
+	double tm, tM;
+	int np;
+	double mdist;
+	double x1, x2, y1, y2, A, B;
+	double X[2], Y[2];
+
+	mdist = 100.0;
+	x1 = 1.0;
+	y1 = 0.5;
+	x2 = 5.0;
+	y2 = 0.0;
+	A = (y2 - y1) / (x2 - x1);
+	B = y1 - A * x1;
+
+	tm = 0.5;
+	tM = 1000000.0;
+
+	nblobs1 = 0;
+	npoints1 = 0;
+	find_blob(output_cell, cellhd.rows, cellhd.cols, &blobs1, &npoints1,
+		  &nblobs1, tm, tM);
+	sites1 = (BlobSites *) G_calloc(nblobs1, sizeof(BlobSites));
+	extract_sites_from_blob(blobs1, npoints1, nblobs1, &cellhd, sites1,
+				output_cell);
+
+	tm = 0.00001;
+	tM = 1000000.0;
+	nblobs2 = 0;
+	npoints2 = 0;
+	find_blob(output_cell, cellhd.rows, cellhd.cols, &blobs2, &npoints2,
+		  &nblobs2, tm, tM);
+	sites2 = (BlobSites *) G_calloc(nblobs2, sizeof(BlobSites));
+	extract_sites_from_blob(blobs2, npoints2, nblobs2, &cellhd, sites2,
+				output_cell);
+
+
+	for (i = 0; i < nblobs2; i++) {
+	    np = 0;
+	    for (j = 0; j < nblobs1; j++) {
+		X[0] = sites2[i].east;
+		X[1] = sites2[i].north;
+		Y[0] = sites1[j].east;
+		Y[1] = sites1[j].north;
+
+		if (euclidean_distance(X, Y, 2) <= mdist)
+		    np += 1;
+	    }
+
+	    if (np > 0) {
+		if (sites2[i].max > A * np + B)
+		    fprintf(stdout, "%f|%f|#%d%s%f%s%f\n", sites2[i].east,
+			    sites2[i].north, sites2[i].n, "%", sites2[i].min,
+			    "%", sites2[i].max);
+	    }
+
+	}
+	/*
+	   for(j=0;j<nblobs2;j++)
+	   if(sites2[j].n>=1)
+	   fprintf(stdout,"%f|%f|#%d%s%f\n",sites2[j].east,sites2[j].north,
+	   sites2[j].n, "%",sites2[j].min);
+	 */
+    }
+    return 0;
+}
+
+int extract_array_with_null(int R, int C, int c, int bc, double **mat,
+			    double *wind_vect)
+{
+    int i, j;
+    double sum;
+    int index;
+
+    sum = 0.0;
+    index = 0;
+    for (i = 0; i < R; i++) {
+	for (j = 0; j < C; j++) {
+	    wind_vect[index] = mat[i][c - bc + j];
+	    sum += wind_vect[index++];
+	}
+    }
+    if (sum == 0.0) {
+	return 1;
+    }
+    else {
+	return 0;
+    }
+}



More information about the grass-commit mailing list