[GRASS-SVN] r38890 - in grass/branches/develbranch_6: include lib lib/gmath lib/gmath/test

svn_grass at osgeo.org svn_grass at osgeo.org
Thu Aug 27 16:57:08 EDT 2009


Author: huhabla
Date: 2009-08-27 16:57:08 -0400 (Thu, 27 Aug 2009)
New Revision: 38890

Added:
   grass/branches/develbranch_6/lib/gmath/ATLAS_wrapper_blas_level_1.c
   grass/branches/develbranch_6/lib/gmath/blas_level_1.c
   grass/branches/develbranch_6/lib/gmath/blas_level_2.c
   grass/branches/develbranch_6/lib/gmath/blas_level_3.c
   grass/branches/develbranch_6/lib/gmath/ccmath_grass_wrapper.c
   grass/branches/develbranch_6/lib/gmath/solvers_classic_iter.c
   grass/branches/develbranch_6/lib/gmath/solvers_direct.c
   grass/branches/develbranch_6/lib/gmath/solvers_krylov.c
   grass/branches/develbranch_6/lib/gmath/sparse_matrix.c
   grass/branches/develbranch_6/lib/gmath/test/
   grass/branches/develbranch_6/lib/gmath/test/Makefile
   grass/branches/develbranch_6/lib/gmath/test/bench_blas2.c
   grass/branches/develbranch_6/lib/gmath/test/bench_blas3.c
   grass/branches/develbranch_6/lib/gmath/test/bench_solver_direct.c
   grass/branches/develbranch_6/lib/gmath/test/bench_solver_krylov.c
   grass/branches/develbranch_6/lib/gmath/test/test.gmath.lib.html
   grass/branches/develbranch_6/lib/gmath/test/test_blas1.c
   grass/branches/develbranch_6/lib/gmath/test/test_blas2.c
   grass/branches/develbranch_6/lib/gmath/test/test_blas3.c
   grass/branches/develbranch_6/lib/gmath/test/test_ccmath_wrapper.c
   grass/branches/develbranch_6/lib/gmath/test/test_gmath_lib.h
   grass/branches/develbranch_6/lib/gmath/test/test_main.c
   grass/branches/develbranch_6/lib/gmath/test/test_solvers.c
   grass/branches/develbranch_6/lib/gmath/test/test_tools.c
   grass/branches/develbranch_6/lib/gmath/test/test_tools_les.c
Removed:
   grass/branches/develbranch_6/lib/gmath/eigen.c
   grass/branches/develbranch_6/lib/gmath/jacobi.c
   grass/branches/develbranch_6/lib/gmath/svd.c
Modified:
   grass/branches/develbranch_6/include/gisdefs.h
   grass/branches/develbranch_6/include/gmath.h
   grass/branches/develbranch_6/lib/Makefile
   grass/branches/develbranch_6/lib/gmath/Makefile
   grass/branches/develbranch_6/lib/gmath/TODO
   grass/branches/develbranch_6/lib/gmath/del2g.c
   grass/branches/develbranch_6/lib/gmath/eigen_tools.c
   grass/branches/develbranch_6/lib/gmath/mult.c
Log:
Moving the linear equation solver from gpde to gmath library
Adding basic linear algebra (blas) level 1, 2 and 3 functions to gmath
Adding ATLAS wrapper for blas level 1 functions
Adding ccmath wrapper to replace numerical recipes code
Removing numerical recipes eigen and svd code from gmath library
Adding tests for linear equation solver, blas level 1, 2 and 3 functions
and the ccmath wrapper
Renaming several functions to fit the grass library naming convention


Modified: grass/branches/develbranch_6/include/gisdefs.h
===================================================================
--- grass/branches/develbranch_6/include/gisdefs.h	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/include/gisdefs.h	2009-08-27 20:57:08 UTC (rev 38890)
@@ -406,6 +406,17 @@
 /* date.c */
 char *G_date(void);
 
+/* dalloc.c */
+double *G_alloc_vector(size_t);
+double **G_alloc_matrix(int, int);
+float  *G_alloc_fvector(size_t);
+float  **G_alloc_fmatrix(int, int);
+void G_free_vector(double *);
+void G_free_matrix(double **);
+void G_free_fvector(float *);
+void G_free_fmatrix(float **);
+
+
 /* datum.c */
 int G_get_datum_by_name(const char *);
 char *G_datum_name(int);
@@ -658,6 +669,12 @@
 char *G_home(void);
 char *G__home(void);
 
+/* ialloc.c */
+int *G_alloc_ivector(size_t);
+int **G_alloc_imatrix(int, int);
+void G_free_ivector(int *);
+void G_free_imatrix(int **);
+
 /* index.c */
 char *G_index(const char *, int);
 char *G_rindex(const char *, int);

Modified: grass/branches/develbranch_6/include/gmath.h
===================================================================
--- grass/branches/develbranch_6/include/gmath.h	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/include/gmath.h	2009-08-27 20:57:08 UTC (rev 38890)
@@ -1,11 +1,10 @@
-
 /******************************************************************************
  * gmath.h
  * Top level header file for gmath units
 
  * @Copyright David D.Gray <ddgray at armadce.demon.co.uk>
  * 27th. Sep. 2000
- * Last updated: 2007-08-26
+ * Last updated: $Id$
  *
 
  * This file is part of GRASS GIS. It is free software. You can 
@@ -30,71 +29,151 @@
 #include <grass/la.h>
 #endif
 
+
+#define G_MATH_NORMAL_LES 0
+#define G_MATH_SPARSE_LES 1
+
+
 /* fft.c */
 int fft(int, double *[2], int, int, int);
 int fft2(int, double (*)[2], int, int, int);
-
 /* gauss.c */
-double G_math_rand_gauss(int, double);
-
+float gauss(int);
 /* max_pow2.c */
-long G_math_max_pow2(long);
-long G_math_min_pow2(long);
-
+long max_pow2 (long n);
+long min_pow2 (long n);
 /* rand1.c */
-float G_math_rand(int);
-
+float rand1(int);
 /* del2g.c */
 int del2g(double *[2], int, double);
-
-/* findzc.c */
-int G_math_findzc(double[], int, double[], double, int);
-
 /* getg.c */
 int getg(double, double *[2], int);
+/* eigen_tools.c */
+int G_math_egvorder(double *, double **, long);
+/* mult.c */
+int G_math_complex_mult (double *v1[2], int size1, double *v2[2], int size2, double *v3[2], int size3);
 
-/* eigen.c */
-int eigen(double **, double **, double *, int);
-int egvorder2(double *, double **, long);
-int transpose2(double **, long);
 
-/* jacobi.c */
-#define MX 9
-int jacobi(double[MX][MX], long, double[MX], double[MX][MX]);
-int egvorder(double[MX], double[MX][MX], long);
-int transpose(double[MX][MX], long);
+/* *************************************************************** */
+/* ***** WRAPPER FOR CCMATH FUNCTIONS USED IN GRASS ************** */
+/* *************************************************************** */
+extern int G_math_solv(double **,double *,int);
+extern int G_math_solvps(double **,double *,int);
+extern void G_math_solvtd(double *,double *,double *,double *,int);
+extern int G_math_solvru(double **,double *,int);
+extern int G_math_minv(double **,int);
+extern int G_math_psinv(double **,int);
+extern int G_math_ruinv(double **,int);
+extern void G_math_eigval(double **,double *,int);
+extern void G_math_eigen(double **,double *,int);
+extern double G_math_evmax(double **,double *,int);
+extern int G_math_svdval(double *,double **,int,int);
+extern int G_math_sv2val(double *,double **,int,int);
+extern int G_math_svduv(double *,double **,double **, int,double **,int);
+extern int G_math_sv2uv(double *,double **,double **,int,double **,int);
+extern int G_math_svdu1v(double *,double **,int,double **,int);
 
-/* mult.c */
-int mult(double *v1[2], int size1, double *v2[2], int size2, double *v3[2],
-	 int size3);
 
-/* dalloc.c */
-double *G_alloc_vector(size_t);
-double **G_alloc_matrix(int, int);
-float *G_alloc_fvector(size_t);
-float **G_alloc_fmatrix(int, int);
-void G_free_vector(double *);
-void G_free_matrix(double **);
-void G_free_fvector(float *);
-void G_free_fmatrix(float **);
+/* *************************************************************** */
+/* *************** LINEARE EQUATION SYSTEM PART ****************** */
+/* *************************************************************** */
 
-/* eigen_tools.c */
-int G_tqli(double[], double[], int, double **);
-void G_tred2(double **, int, double[], double[]);
+/*!
+ * \brief The row vector of the sparse matrix
+ * */
+typedef struct
+{
+    double *values;		/*The non null values of the row */
+    unsigned int cols;		/*Number of entries */
+    unsigned int *index;	/*the index number */
+} G_math_spvector;
 
-/* ialloc.c */
-int *G_alloc_ivector(size_t);
-int **G_alloc_imatrix(int, int);
-void G_free_ivector(int *);
-void G_free_imatrix(int **);
+/* Sparse matrix and sparse vector functions
+ * */
+extern G_math_spvector *G_math_alloc_spvector(int );
+extern G_math_spvector **G_math_alloc_spmatrix(int );
+extern void G_math_free_spmatrix(G_math_spvector ** , int );
+extern void G_math_free_spvector(G_math_spvector * );
+extern int G_math_add_spvector(G_math_spvector **, G_math_spvector * , int );
 
-/* lu.c */
-int G_ludcmp(double **, int, int *, double *);
-void G_lubksb(double **, int, int *, double[]);
+/*linear equation solver, most of them are multithreaded wih OpenMP*/
+extern int G_math_solver_gauss(double **, double *, double *, int );
+extern int G_math_solver_lu(double **, double *, double *, int );
+extern int G_math_solver_cholesky(double **, double *, double *, int , int );
+extern int G_math_solver_jacobi(double **, double *, double *, int , int , double , double );
+extern int G_math_solver_gs(double **, double *, double *, int , int , double , double );
+extern int G_math_solver_pcg(double **, double *, double *, int , int , double , int );
+extern int G_math_solver_cg(double **, double *, double *, int , int , double );
+extern int G_math_solver_bicgstab(double **, double *, double *, int , int , double );
+extern int G_math_solver_sparse_jacobi(G_math_spvector **, double *, double *, int , int , double , double );
+extern int G_math_solver_sparse_gs(G_math_spvector **, double *, double *, int , int , double , double );
+extern int G_math_solver_sparse_pcg(G_math_spvector **, double *, double *, int , int , double , int );
+extern int G_math_solver_sparse_cg(G_math_spvector **, double *, double *, int , int , double );
+extern int G_math_solver_sparse_bicgstab(G_math_spvector **, double *, double *, int , int , double );
+/* solver algoithms and helper functions*/
+extern void G_math_gauss_elimination(double **, double *, int );
+extern void G_math_lu_decomposition(double **, double *, int );
+extern int G_math_cholesky_decomposition(double **, int , int );
+extern void G_math_backward_solving(double **, double *, double *, int );
+extern void G_math_forward_solving(double **, double *, double *, int );
+extern int G_math_pivot_create(double **, double *, int , int );
 
-/* svd.c */
-int G_svdcmp(double **, int, int, double *, double **);
-int G_svbksb(double **, double[], double **, int, int, double[], double[]);
-int G_svelim(double *, int);
 
+/*BLAS like level 1,2 and 3 functions*/
+
+/*level 1 vector - vector grass implementation with OpenMP thread support*/
+extern void G_math_d_x_dot_y(double *, double *, double *, int );
+extern void G_math_d_asum_norm(double *, double *, int );
+extern void G_math_d_euclid_norm(double *, double *, int );
+extern void G_math_d_max_norm(double *, double *, int );
+extern void G_math_d_ax_by(double *, double *, double *, double , double , int );
+extern void G_math_d_copy(double *, double *, int );
+
+extern void G_math_f_x_dot_y(float *, float *, float *, int );
+extern void G_math_f_asum_norm(float *, float *, int );
+extern void G_math_f_euclid_norm(float *, float *, int );
+extern void G_math_f_max_norm(float *, float *, int );
+extern void G_math_f_ax_by(float *, float *, float *, float , float , int );
+extern void G_math_f_copy(float *, float *, int );
+
+extern void G_math_i_x_dot_y(int *, int *,  double *, int );
+extern void G_math_i_asum_norm(int *,  double *, int );
+extern void G_math_i_euclid_norm(int *,  double *,int );
+extern void G_math_i_max_norm(int *,  int *, int );
+extern void G_math_i_ax_by(int *, int *, int *, int , int , int );
+extern void G_math_i_copy(int *, int *, int );
+
+/*ATLAS blas level 1 wrapper*/
+extern double G_math_ddot(double *, double *, int );
+extern float G_math_sdot(float *, float *, int );
+extern float G_math_sdsdot(float *, float *, float , int );
+extern double G_math_dnrm2(double *, int );
+extern double G_math_dasum(double *, int );
+extern double G_math_idamax(double *, int );
+extern float  G_math_snrm2(float *, int );
+extern float  G_math_sasum(float *, int );
+extern float  G_math_isamax(float *, int );
+extern void G_math_dscal(double *, double , int );
+extern void G_math_sscal(float *, float , int );
+extern void G_math_dcopy(double *, double *, int );
+extern void G_math_scopy(float *, float *, int );
+extern void G_math_daxpy(double *, double *, double , int );
+extern void G_math_saxpy(float *, float *, float , int );
+
+/*level 2 matrix - vector grass implementation with OpenMP thread support*/
+extern void G_math_Ax_sparse(G_math_spvector **, double *, double *, int );
+extern void G_math_d_Ax(double **, double *, double *, int , int );
+extern void G_math_f_Ax(float **, float *, float *, int , int );
+extern void G_math_d_x_dyad_y(double *, double *, double **, int, int );
+extern void G_math_f_x_dyad_y(float *, float *, float **, int, int );
+extern void G_math_d_aAx_by(double **, double *, double *, double , double , double *, int , int );
+extern void G_math_f_aAx_by(float **, float *, float *, float , float , float *, int , int );
+
+/*level 3 matrix - matrix grass implementation with OpenMP thread support*/
+extern void G_math_d_aA_B(double **, double **, double , double **, int , int );
+extern void G_math_f_aA_B(float **, float **, float , float **, int , int );
+extern void G_math_d_AB(double **, double **, double **, int , int , int );
+extern void G_math_f_AB(float **,  float **,  float **,  int , int , int );
+
 #endif /* GMATH_H_ */
+

Modified: grass/branches/develbranch_6/lib/Makefile
===================================================================
--- grass/branches/develbranch_6/lib/Makefile	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/Makefile	2009-08-27 20:57:08 UTC (rev 38890)
@@ -4,6 +4,7 @@
 SUBDIRS = \
 	datetime \
 	gis \
+	external \
 	gmath \
 	linkm \
 	driver \
@@ -16,7 +17,6 @@
 	db \
 	vask \
 	edit \
-	external \
 	fonts \
 	gtcltk \
 	form \

Added: grass/branches/develbranch_6/lib/gmath/ATLAS_wrapper_blas_level_1.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/ATLAS_wrapper_blas_level_1.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/ATLAS_wrapper_blas_level_1.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,417 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      blas level 1 like functions   
+* 		part of the gpde library
+*               
+* COPYRIGHT:    (C) 2000 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <grass/gmath.h>
+
+#if defined(HAVE_ATLAS)
+#include <cblas.h>
+#endif
+
+
+/*!
+ * \brief Compute the dot product of vector x and y 
+ * using the ATLAS routine cblas_ddot 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_x_dot_y, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param y       (float *)
+ * \param rows (int)
+ * \return (double)
+ *
+ * */
+double G_math_ddot(double *x, double *y, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_ddot(rows, x, 1, y, 1);
+#else
+    double val;
+
+    G_math_d_x_dot_y(x, y, &val, rows);
+    return val;
+#endif
+}
+
+
+/*!
+ * \brief Compute the dot product of vector x and y 
+ * using the ATLAS routine cblas_sdsdot 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_x_dot_y, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param y       (float *)
+ * \param a       (float)
+ * \param rows (int)
+ * \return (float)
+ *
+ * */
+float G_math_sdsdot(float *x, float *y, float a, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_sdsdot(rows, a, x, 1, y, 1);
+#else
+    float val;
+
+    G_math_f_x_dot_y(x, y, &val, rows);
+    return a + val;
+#endif
+}
+
+/*!
+ * \brief Compute the euclidean norm of vector x  
+ * using the ATLAS routine cblas_dnrm2 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_d_euclid_norm, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (double *)
+ * \param rows (int)
+ * \return (double)
+ *
+ * */
+double G_math_dnrm2(double *x, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_dnrm2(rows, x, 1);
+#else
+    double val;
+
+    G_math_d_euclid_norm(x, &val, rows);
+    return val;
+#endif
+}
+
+/*!
+ * \brief Compute the absolute sum norm of vector x  
+ * using the ATLAS routine cblas_dasum 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_d_asum_norm, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (double *)
+ * \param rows (int)
+ * \return (double)
+ *
+ * */
+double G_math_dasum(double *x, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_dasum(rows, x, 1);
+#else
+    double val;
+
+    G_math_d_asum_norm(x, &val, rows);
+    return val;
+#endif
+}
+
+/*!
+ * \brief Compute the maximum norm of vector x  
+ * using the ATLAS routine cblas_idamax 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_d_max_norm, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (double *)
+ * \param rows (int)
+ * \return (double)
+ *
+ * */
+double G_math_idamax(double *x, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_idamax(rows, x, 1);
+#else
+    double val;
+
+    G_math_d_max_norm(x, &val, rows);
+    return val;
+#endif
+}
+
+/*!
+ * \brief Scale vector x with scalar a
+ * using the ATLAS routine cblas_dscal
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_d_ax_by, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (double *)
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_dscal(double *x, double a, int rows)
+{
+#if defined(HAVE_ATLAS)
+    cblas_dscal(rows, a, x, 1);
+#else
+    G_math_d_ax_by(x, x, x, a, 0.0, rows);
+#endif
+
+    return;
+}
+
+/*!
+ * \brief  Copy vector x to vector y
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_d_copy
+ *
+ * \param x       (double *)
+ * \param y       (double *)
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_dcopy(double *x, double *y, int rows)
+{
+#if defined(HAVE_ATLAS)
+    cblas_dcopy(rows, x, 1, y, 1);
+#else
+    G_math_d_copy(x, y, rows);
+#endif
+
+    return;
+}
+
+
+/*!
+ * \brief Scale vector x with scalar a and add it to y 
+ *
+ * \f[ {\bf z} = a{\bf x} + {\bf y} \f]
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_d_ax_by, the 
+ * grass implementatiom
+
+ *
+ * \param x      (double *)
+ * \param y      (double *)
+ * \param a      (double)
+ * \param rows (int)
+ * \return (void)
+ * 
+ * */
+void G_math_daxpy(double *x, double *y, double a, int rows)
+{
+#if defined(HAVE_ATLAS)
+    cblas_daxpy(rows, a, x, 1, y, 1);
+#else
+    G_math_d_ax_by(x, y, y, a, 1.0, rows);
+#endif
+
+    return;
+}
+
+/****************************************************************** */
+
+/********* F L O A T / S I N G L E   P E P R E C I S I O N ******** */
+
+/****************************************************************** */
+
+/*!
+ * \brief Compute the dot product of vector x and y 
+ * using the ATLAS routine cblas_sdot 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_x_dot_y, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param y       (float *)
+ * \param rows (int)
+ * \return (float)
+ *
+ * */
+float G_math_sdot(float *x, float *y, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_sdot(rows, x, 1, y, 1);
+#else
+    float val;
+
+    G_math_f_x_dot_y(x, y, &val, rows);
+    return val;
+#endif
+}
+
+/*!
+ * \brief Compute the euclidean norm of vector x  
+ * using the ATLAS routine cblas_dnrm2 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_euclid_norm, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param rows (int)
+ * \return (float)
+ *
+ * */
+float G_math_snrm2(float *x, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_snrm2(rows, x, 1);
+#else
+    float val;
+
+    G_math_f_euclid_norm(x, &val, rows);
+    return val;
+#endif
+}
+
+/*!
+ * \brief Compute the absolute sum norm of vector x  
+ * using the ATLAS routine cblas_dasum 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_asum_norm, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param rows (int)
+ * \return (float)
+ *
+ * */
+float G_math_sasum(float *x, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_sasum(rows, x, 1);
+#else
+    float val;
+
+    G_math_f_asum_norm(x, &val, rows);
+    return val;
+#endif
+}
+
+/*!
+ * \brief Compute the maximum norm of vector x  
+ * using the ATLAS routine cblas_idamax 
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_max_norm, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param rows (int)
+ * \return (float)
+ *
+ * */
+float G_math_isamax(float *x, int rows)
+{
+#if defined(HAVE_ATLAS)
+    return cblas_isamax(rows, x, 1);
+#else
+    float val;
+
+    G_math_f_max_norm(x, &val, rows);
+    return val;
+#endif
+}
+
+/*!
+ * \brief Scale vector x with scalar a
+ * using the ATLAS routine cblas_dscal
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_ax_by, the OpenMP multi threaded 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param rows (int)
+ * \return (float)
+ *
+ * */
+void G_math_sscal(float *x, float a, int rows)
+{
+#if defined(HAVE_ATLAS)
+    cblas_sscal(rows, a, x, 1);
+#else
+    G_math_f_ax_by(x, x, x, a, 0.0, rows);
+#endif
+
+    return;
+}
+
+/*!
+ * \brief  Copy vector x to vector y
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_copy, the 
+ * grass implementatiom
+ *
+ * \param x       (float *)
+ * \param y       (float *)
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_scopy(float *x, float *y, int rows)
+{
+#if defined(HAVE_ATLAS)
+    cblas_scopy(rows, x, 1, y, 1);
+#else
+    G_math_f_copy(x, y, rows);
+#endif
+
+    return;
+}
+
+
+/*!
+ * \brief Scale vector x with scalar a and add it to y 
+ *
+ * \f[ {\bf z} = a{\bf x} + {\bf y} \f]
+ *
+ * If grass was not compiled with ATLAS support
+ * it will call #G_math_f_ax_by, the 
+ * grass implementatiom
+
+ *
+ * \param x      (float *)
+ * \param y      (float *)
+ * \param a      (float)
+ * \param rows (int)
+ * \return (void)
+ * 
+ * */
+void G_math_saxpy(float *x, float *y, float a, int rows)
+{
+#if defined(HAVE_ATLAS)
+    cblas_saxpy(rows, a, x, 1, y, 1);
+#else
+    G_math_f_ax_by(x, y, y, a, 1.0, rows);
+#endif
+
+    return;
+}

Modified: grass/branches/develbranch_6/lib/gmath/Makefile
===================================================================
--- grass/branches/develbranch_6/lib/gmath/Makefile	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/Makefile	2009-08-27 20:57:08 UTC (rev 38890)
@@ -1,6 +1,6 @@
 MODULE_TOPDIR = ../..
 
-EXTRA_LIBS=$(GISLIB) $(FFTWLIB) $(LAPACKLIB) $(BLASLIB)
+EXTRA_LIBS=$(GISLIB) $(FFTWLIB) $(LAPACKLIB) $(BLASLIB) $(CCMATHLIB)
 
 EXTRA_CFLAGS = $(FFTWINC) $(PICFLAGS)
 

Modified: grass/branches/develbranch_6/lib/gmath/TODO
===================================================================
--- grass/branches/develbranch_6/lib/gmath/TODO	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/TODO	2009-08-27 20:57:08 UTC (rev 38890)
@@ -39,4 +39,20 @@
   transpose(), transpose2()
  both used in i.pca and i.cca. Header different, functionality identical.
  To be merged.
+ -> Merged and partly replaced by ccmath eigenvector functions
  -> some cleanup done by Glynn
+Aug 2009 Soeren:
+Replaced the eigenvalue and singular value decomposition code with ccmath implementations
+
+The lu.c need to be replaced by ccmath lu solver. The following modules must be edited:
+./imagery/i.smap/bouman/invert.c
+./imagery/i.gensigset/invert.c 
+./raster/r.param.scale/process.c
+./raster/r.param.scale/process.c
+./lib/rst/interp_float/ressegm2d.c
+./lib/rst/interp_float/ressegm2d.c
+./lib/rst/interp_float/ressegm2d.c
+./lib/rst/interp_float/segmen2d.c
+./lib/rst/interp_float/segmen2d.c
+
+

Added: grass/branches/develbranch_6/lib/gmath/blas_level_1.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/blas_level_1.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/blas_level_1.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,674 @@
+
+/*****************************************************************************
+ *
+ * MODULE:       Grass Gmath Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2007
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      blas level 1 like functions   
+ * 		part of the gmath library
+ *               
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <grass/gmath.h>
+#include <grass/gis.h>
+
+/* **************************************************************** */
+/* *************** D O U B L E ************************************ */
+/* **************************************************************** */
+
+/*!
+ * \brief Compute the dot product of vector x and y 
+ *
+ * \f[ a = {\bf x}^T  {\bf y} \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (double *)
+ * \param y       (double *)
+ * \param value (double *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_d_x_dot_y(double *x, double *y, double *value, int rows)
+{
+    int i;
+
+    double s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += x[i] * y[i];
+    }
+#pragma omp single
+    {
+	*value = s;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the euclid norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_2 \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (double *) -- the vector
+ * \param value (double *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_d_euclid_norm(double *x, double *value, int rows)
+{
+    int i;
+
+    double s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += x[i] * x[i];
+    }
+#pragma omp single
+    {
+	*value = sqrt(s);
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the asum norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_1 \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (double *)-- the vector
+ * \param value (double *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_d_asum_norm(double *x, double *value, int rows)
+{
+    int i = 0;
+
+    double s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += fabs(x[i]);
+    }
+#pragma omp single
+    {
+	*value = s;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the maximum norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_\infty \f]
+ *
+ * This function is not multi-threaded
+ *
+ * \param x       (double *)-- the vector
+ * \param value (double *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_d_max_norm(double *x, double *value, int rows)
+{
+    int i;
+
+    double max = 0.0;
+
+    max = fabs(x[rows - 1]);
+    for (i = rows - 2; i >= 0; i--) {
+	if (max < fabs(x[i]))
+	    max = fabs(x[i]);
+    }
+
+    *value = max;
+}
+
+/*!
+ * \brief Scales vectors x and y with the scalars a and b and adds them
+ *
+ * \f[ {\bf z} = a{\bf x} + b{\bf y} \f]
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * \param x      (double *)
+ * \param y      (double *)
+ * \param z      (double *)
+ * \param a      (double)
+ * \param b      (double)
+ * \param rows (int)
+ * \return (void)
+ * 
+ * */
+void G_math_d_ax_by(double *x, double *y, double *z, double a, double b,
+		    int rows)
+{
+    int i;
+
+    /*find specific cases */
+    if (b == 0.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i];
+	}
+    }
+    else if ((a == 1.0) && (b == 1.0)) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = x[i] + y[i];
+	}
+    }
+    else if ((a == 1.0) && (b == -1.0)) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = x[i] - y[i];
+	}
+    }
+    else if (a == b) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * (x[i] + y[i]);
+	}
+    }
+    else if (b == -1.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] - y[i];
+	}
+    }
+    else if (b == 1.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] + y[i];
+	}
+    }
+    else {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] + b * y[i];
+	}
+    }
+
+    return;
+}
+
+/*!
+ * \brief Copy the vector x to y
+ *
+ * \f[ {\bf y} = {\bf x} \f]
+ *
+ * This function is not multi-threaded
+ *
+ * \param x      (double *)
+ * \param y      (double *)
+ * \param rows (int)
+ * 
+ * */
+void G_math_d_copy(double *x, double *y, int rows)
+{
+    y = memcpy(y, x, rows * sizeof(double));
+
+    return;
+}
+
+/* **************************************************************** */
+/* *************** F L O A T ************************************** */
+/* **************************************************************** */
+
+/*!
+ * \brief Compute the dot product of vector x and y 
+ *
+ * \f[ a = {\bf x}^T  {\bf y} \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (float *)
+ * \param y       (float *)
+ * \param value (float *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_f_x_dot_y(float *x, float *y, float *value, int rows)
+{
+    int i;
+
+    float s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += x[i] * y[i];
+    }
+#pragma omp single
+    {
+	*value = s;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the euclid norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_2 \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (double *) -- the vector
+ * \param value (float *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_f_euclid_norm(float *x, float *value, int rows)
+{
+    int i;
+
+    float s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += x[i] * x[i];
+    }
+#pragma omp single
+    {
+	*value = sqrt(s);
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the asum norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_1 \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (float *)-- the vector
+ * \param value (float *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_f_asum_norm(float *x, float *value, int rows)
+{
+    int i;
+
+    int count = 0;
+
+    float s = 0.0;
+
+#pragma omp parallel for schedule (static) private(i) reduction(+:s, count)
+    for (i = 0; i < rows; i++) {
+	s += fabs(x[i]);
+	count++;
+    }
+#pragma omp single
+    {
+	*value = s;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the maximum norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_\infty \f]
+ *
+ * This function is not multi-threaded
+ *
+ * \param x       (float *)-- the vector
+ * \param value (float *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_f_max_norm(float *x, float *value, int rows)
+{
+    int i;
+
+    float max = 0.0;
+
+    max = fabs(x[rows - 1]);
+    for (i = rows - 2; i >= 0; i--) {
+	if (max < fabs(x[i]))
+	    max = fabs(x[i]);
+    }
+    *value = max;
+    return;
+}
+
+/*!
+ * \brief Scales vectors x and y with the scalars a and b and adds them
+ *
+ * \f[ {\bf z} = a{\bf x} + b{\bf y} \f]
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * \param x      (float *)
+ * \param y      (float *)
+ * \param z      (float *)
+ * \param a      (float)
+ * \param b      (float)
+ * \param rows (int)
+ * \return (void)
+ * 
+ * */
+void G_math_f_ax_by(float *x, float *y, float *z, float a, float b, int rows)
+{
+    int i;
+
+    /*find specific cases */
+    if (b == 0.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i];
+	}
+    }
+    else if ((a == 1.0) && (b == 1.0)) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = x[i] + y[i];
+	}
+    }
+    else if ((a == 1.0) && (b == -1.0)) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = x[i] - y[i];
+	}
+    }
+    else if (a == b) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * (x[i] + y[i]);
+	}
+    }
+    else if (b == -1.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] - y[i];
+	}
+    }
+    else if (b == 1.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] + y[i];
+	}
+    }
+    else {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] + b * y[i];
+	}
+    }
+
+    return;
+}
+
+/*!
+ * \brief Copy the vector x to y
+ *
+ * \f[ {\bf y} = {\bf x} \f]
+ *
+ * This function is not multi-threaded
+ *
+ * \param x      (float *)
+ * \param y      (float *)
+ * \param rows (int)
+ * 
+ * */
+void G_math_f_copy(float *x, float *y, int rows)
+{
+    y = memcpy(y, x, rows * sizeof(float));
+
+    return;
+}
+
+/* **************************************************************** */
+/* *************** I N T E G E R ********************************** */
+/* **************************************************************** */
+
+/*!
+ * \brief Compute the dot product of vector x and y 
+ *
+ * \f[ a = {\bf x}^T  {\bf y} \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (int *)
+ * \param y       (int *)
+ * \param value (double *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_i_x_dot_y(int *x, int *y, double *value, int rows)
+{
+    int i;
+
+    double s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += x[i] * y[i];
+    }
+#pragma omp single
+    {
+	*value = s;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the euclid norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_2 \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (int *) -- the vector
+ * \param value (double *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_i_euclid_norm(int *x, double *value, int rows)
+{
+    int i;
+
+    double s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += x[i] * x[i];
+    }
+#pragma omp single
+    {
+	*value = sqrt(s);
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the asum norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_1 \f]
+ *
+ * The functions creates its own parallel OpenMP region.
+ * It can be called within a parallel OpenMP region if nested parallelism is supported
+ * by the compiler.
+ *
+ * \param x       (int *)-- the vector
+ * \param value (double *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_i_asum_norm(int *x, double *value, int rows)
+{
+    int i;
+
+    double s = 0.0;
+
+#pragma omp parallel for schedule (static) reduction(+:s)
+    for (i = rows - 1; i >= 0; i--) {
+	s += fabs(x[i]);
+    }
+#pragma omp single
+    {
+	*value = s;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the maximum norm of vector x  
+ *
+ * \f[ a = ||{\bf x}||_\infty \f]
+ *
+ * This function is not multi-threaded
+ *
+ * \param x       (int *)-- the vector
+ * \param value (int *)  -- the return value
+ * \param rows (int)
+ * \return (void)
+ *
+ * */
+void G_math_i_max_norm(int *x, int *value, int rows)
+{
+    int i;
+
+    int max = 0.0;
+
+    max = fabs(x[rows - 1]);
+    for (i = rows - 2; i >= 0; i--) {
+	if (max < fabs(x[i]))
+	    max = fabs(x[i]);
+    }
+
+    *value = max;
+}
+
+/*!
+ * \brief Scales vectors x and y with the scalars a and b and adds them
+ *
+ * \f[ {\bf z} = a{\bf x} + b{\bf y} \f]
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * \param x      (int *)
+ * \param y      (int *)
+ * \param z      (int *)
+ * \param a      (int)
+ * \param b      (int)
+ * \param rows (int)
+ * \return (void)
+ * 
+ * */
+void G_math_i_ax_by(int *x, int *y, int *z, int a, int b, int rows)
+{
+    int i;
+
+    /*find specific cases */
+    if (b == 0.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i];
+	}
+    }
+    else if ((a == 1.0) && (b == 1.0)) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = x[i] + y[i];
+	}
+    }
+    else if ((a == 1.0) && (b == -1.0)) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = x[i] - y[i];
+	}
+    }
+    else if (a == b) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * (x[i] + y[i]);
+	}
+    }
+    else if (b == -1.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] - y[i];
+	}
+    }
+    else if (b == 1.0) {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] + y[i];
+	}
+    }
+    else {
+#pragma omp for schedule (static)
+	for (i = rows - 1; i >= 0; i--) {
+	    z[i] = a * x[i] + b * y[i];
+	}
+    }
+
+    return;
+}
+
+/*!
+ * \brief Copy the vector x to y
+ *
+ * \f[ {\bf y} = {\bf x} \f]
+ *
+ * This function is not multi-threaded
+ *
+ * \param x      (int *)
+ * \param y      (int *)
+ * \param rows (int)
+ * 
+ * */
+void G_math_i_copy(int *x, int *y, int rows)
+{
+    y = memcpy(y, x, rows * sizeof(int));
+
+    return;
+}

Added: grass/branches/develbranch_6/lib/gmath/blas_level_2.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/blas_level_2.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/blas_level_2.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,420 @@
+
+/*****************************************************************************
+ *
+ * MODULE:       Grass PDE Numerical Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2007
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      linear equation system solvers
+ * 		part of the gpde library
+ *               
+ * COPYRIGHT:    (C) 2007 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <grass/gmath.h>
+#include <grass/gis.h>
+#include <grass/gisdefs.h>
+
+#define EPSILON 0.00000000000000001
+
+/*!
+ * \brief Compute the matrix - vector product  
+ * of sparse matrix **Asp and vector x.
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * y = A * x
+ *
+ *
+ * \param Asp (G_math_spvector **) 
+ * \param x (double) *)
+ * \param y (double * )
+ * \return (void)
+ *
+ * */
+void G_math_Ax_sparse(G_math_spvector ** Asp, double *x, double *y, int rows)
+{
+    int i, j;
+
+    double tmp;
+
+#pragma omp for schedule (static) private(i, j, tmp)
+    for (i = 0; i < rows; i++) {
+	tmp = 0;
+	for (j = 0; j < Asp[i]->cols; j++) {
+	    tmp += Asp[i]->values[j] * x[Asp[i]->index[j]];
+	}
+	y[i] = tmp;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the matrix - vector product  
+ * of matrix A and vector x.
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * y = A * x
+ *
+ *
+ * \param A (double ** )
+ * \param x (double *)
+ * \param y (double *) 
+ * \param rows (int)
+ * \param cols (int)
+ * \return (void)
+ *
+ * */
+void G_math_d_Ax(double **A, double *x, double *y, int rows, int cols)
+{
+    int i, j;
+
+    double tmp;
+
+#pragma omp for schedule (static) private(i, j, tmp)
+    for (i = 0; i < rows; i++) {
+	tmp = 0;
+	for (j = cols - 1; j >= 0; j--) {
+	    tmp += A[i][j] * x[j];
+	}
+	y[i] = tmp;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the matrix - vector product  
+ * of matrix A and vector x.
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * y = A * x
+ *
+ *
+ * \param A (float ** )
+ * \param x (float *)
+ * \param y (float *) 
+ * \param rows (int)
+ * \param cols (int)
+ * \return (void)
+ *
+ * */
+void G_math_f_Ax(float **A, float *x, float *y, int rows, int cols)
+{
+    int i, j;
+
+    float tmp;
+
+#pragma omp for schedule (static) private(i, j, tmp)
+    for (i = 0; i < rows; i++) {
+	tmp = 0;
+	for (j = cols - 1; j >= 0; j--) {
+	    tmp += A[i][j] * x[j];
+	}
+	y[i] = tmp;
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the dyadic product of two vectors. 
+ * The result is stored in the matrix A.
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * A = x * y^T
+ *
+ *
+ * \param x (double *)
+ * \param y (double *) 
+ * \param A (float **)  -- matrix of size rows*cols
+ * \param rows (int) -- length of vector x
+ * \param cols (int) -- lengt of vector y
+ * \return (void)
+ *
+ * */
+void G_math_d_x_dyad_y(double *x, double *y, double **A, int rows, int cols)
+{
+    int i, j;
+
+#pragma omp for schedule (static) private(i, j)
+    for (i = 0; i < rows; i++) {
+	for (j = cols - 1; j >= 0; j--) {
+	    A[i][j] = x[i] * y[j];
+	}
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the dyadic product of twMo vectors. 
+ * The result is stored in the matrix A.
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * A = x * y^T
+ *
+ *
+ * \param x (float *)
+ * \param y (float *) 
+ * \param A (float **=  -- matrix of size rows*cols 
+ * \param rows (int) -- length of vector x
+ * \param cols (int) -- lengt of vector y
+ * \return (void)
+ *
+ * */
+void G_math_f_x_dyad_y(float *x, float *y, float **A, int rows, int cols)
+{
+    int i, j;
+
+#pragma omp for schedule (static) private(i, j)
+    for (i = 0; i < rows; i++) {
+	for (j = cols - 1; j >= 0; j--) {
+	    A[i][j] = x[i] * y[j];
+	}
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the scaled matrix - vector product  
+ * of matrix double **A and vector x and y.
+ *
+ * z = a * A * x + b * y
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ *
+ * \param A (double **) 
+ * \param x (double *)
+ * \param y (double *) 
+ * \param a (double)
+ * \param b (double)
+ * \param z (double *) 
+ * \param rows (int)
+ * \param cols (int)
+ * \return (void)
+ *
+ * */
+
+void G_math_d_aAx_by(double **A, double *x, double *y, double a, double b,
+		     double *z, int rows, int cols)
+{
+    int i, j;
+
+    double tmp;
+
+    /*catch specific cases */
+    if (a == b) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += A[i][j] * x[j] + y[j];
+	    }
+	    z[i] = a * tmp;
+	}
+    }
+    else if (b == -1.0) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += a * A[i][j] * x[j] - y[j];
+	    }
+	    z[i] = tmp;
+	}
+    }
+    else if (b == 0.0) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += A[i][j] * x[j];
+	    }
+	    z[i] = a * tmp;
+	}
+    }
+    else if (a == -1.0) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += b * y[j] - A[i][j] * x[j];
+	    }
+	    z[i] = tmp;
+	}
+    }
+    else {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += a * A[i][j] * x[j] + b * y[j];
+	    }
+	    z[i] = tmp;
+	}
+    }
+    return;
+}
+
+/*!
+ * \brief Compute the scaled matrix - vector product  
+ * of matrix A and vectors x and y.
+ *
+ * z = a * A * x + b * y
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ *
+ * \param A (float **) 
+ * \param x (float *)
+ * \param y (float *) 
+ * \param a (float)
+ * \param b (float)
+ * \param z (float *) 
+ * \param rows (int)
+ * \param cols (int)
+ * \return (void)
+ *
+ * */
+
+void G_math_f_aAx_by(float **A, float *x, float *y, float a, float b,
+		     float *z, int rows, int cols)
+{
+    int i, j;
+
+    float tmp;
+
+    /*catch specific cases */
+    if (a == b) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += A[i][j] * x[j] + y[j];
+	    }
+	    z[i] = a * tmp;
+	}
+    }
+    else if (b == -1.0) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += a * A[i][j] * x[j] - y[j];
+	    }
+	    z[i] = tmp;
+	}
+    }
+    else if (b == 0.0) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += A[i][j] * x[j];
+	    }
+	    z[i] = a * tmp;
+	}
+    }
+    else if (a == -1.0) {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += b * y[j] - A[i][j] * x[j];
+	    }
+	    z[i] = tmp;
+	}
+    }
+    else {
+#pragma omp for schedule (static) private(i, j, tmp)
+	for (i = 0; i < rows; i++) {
+	    tmp = 0;
+	    for (j = cols - 1; j >= 0; j--) {
+		tmp += a * A[i][j] * x[j] + b * y[j];
+	    }
+	    z[i] = tmp;
+	}
+    }
+    return;
+}
+
+
+
+/*!
+ * \fn int G_math_d_A_T(double **A, int rows)
+ *
+ * \brief Compute the transposition of matrix A.
+ * Matrix A will be overwritten.
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * Returns 0.
+ *
+ * \param A (double **)
+ * \param rows (int)
+ * \return int
+ */
+
+int G_math_d_A_T(double **A, int rows)
+{
+    int i, j;
+
+    double tmp;
+
+#pragma omp for schedule (static) private(i, j, tmp)
+    for (i = 0; i < rows; i++)
+	for (j = 0; j < i; j++) {
+	    tmp = A[i][j];
+
+	    A[i][j] = A[j][i];
+	    A[j][i] = tmp;
+	}
+
+    return 0;
+}
+
+/*!
+ * \fn int G_math_d_A_T(float **A, int rows)
+ *
+ * \brief Compute the transposition of matrix A.
+ * Matrix A will be overwritten.
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * Returns 0.
+ *
+ * \param A (float **)
+ * \param rows (int)
+ * \return int
+ */
+
+int G_math_f_A_T(float **A, int rows)
+{
+    int i, j;
+
+    float tmp;
+
+#pragma omp for schedule (static) private(i, j, tmp)
+    for (i = 0; i < rows; i++)
+	for (j = 0; j < i; j++) {
+	    tmp = A[i][j];
+
+	    A[i][j] = A[j][i];
+	    A[j][i] = tmp;
+	}
+
+    return 0;
+}

Added: grass/branches/develbranch_6/lib/gmath/blas_level_3.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/blas_level_3.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/blas_level_3.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,231 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2007
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      linear equation system solvers
+* 		part of the gpde library
+*               
+* COPYRIGHT:    (C) 2007 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include "grass/gmath.h"
+#include <grass/gis.h>
+
+
+/*!
+ * \brief Add two matrices and scale matrix A with the scalar a
+ *
+ * \f[ {\bf C} = a {\bf A} + {\bf B} \f]
+ *
+ * In case B == NULL, matrix A will be scaled by scalar a. \n
+ * In case a == 1.0, a simple matrix addition is performed. \n
+ * In case a == -1.0 matrix A is substracted from matrix B. \n
+ * The result is written into matrix C. 
+ *
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * \param A (double **)
+ * \param B (double **) if NULL, matrix A is scaled by scalar a only
+ * \param a (double)
+ * \param C (double **)
+ * \param rows (int)
+ * \param cols (int)
+ * \return (void) 
+ *
+ * */
+void G_math_d_aA_B(double **A, double **B, double a, double **C, int rows,
+		   int cols)
+{
+    int i, j;
+
+
+    /*If B is null, scale the matrix A with th scalar a */
+    if (B == NULL) {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = a * A[i][j];
+
+	return;
+    }
+
+    /*select special cases */
+    if (a == 1.0) {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = A[i][j] + B[i][j];
+    }
+    else if (a == -1.0) {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = B[i][j] - A[i][j];
+    }
+    else {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = a * A[i][j] + B[i][j];
+    }
+
+    return;
+}
+
+/*!
+ * \brief Add two matrices and scale matrix A with the scalar a
+ *
+ * \f[ {\bf C} = a {\bf A} + {\bf B} \f]
+ *
+ * In case B == NULL, matrix A will be scaled by scalar a. \n
+ * In case a == 1.0, a simple matrix addition is performed. \n
+ * In case a == -1.0 matrix A is substracted from matrix B. \n
+ * The result is written into matrix C. 
+ *
+ *
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * \param A (float **)
+ * \param B (float **) if NULL, matrix A is scaled by scalar a only
+ * \param a (float)
+ * \param C (float **) 
+ * \param rows (int)
+ * \param cols (int)
+
+ * \return  (void) 
+ *
+ * */
+void G_math_f_aA_B(float **A, float **B, float a, float **C, int rows,
+		   int cols)
+{
+    int i, j;
+
+    /*If B is null, scale the matrix A with th scalar a */
+    if (B == NULL) {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = a * A[i][j];
+	return;
+    }
+
+    /*select special cases */
+    if (a == 1.0) {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = A[i][j] + B[i][j];
+    }
+    else if (a == -1.0) {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = B[i][j] - A[i][j];
+    }
+    else {
+#pragma omp for schedule (static) private(i, j)
+	for (i = rows - 1; i >= 0; i--)
+	    for (j = cols - 1; j >= 0; j--)
+		C[i][j] = a * A[i][j] + B[i][j];
+    }
+
+    return;
+}
+
+
+/*!
+ * \brief Matrix multiplication
+ *
+ * \f[ {\bf C} = {\bf A}{\bf B} \f]
+ *
+ * The result is written into matrix C. 
+ *
+ * A must be of size rows_A * cols_A
+ * B must be of size rows_B * cols_B with rows_B == cols_A
+ * C must be of size rows_A * rows_B
+ *
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * \param A (double **)
+ * \param B (double **)
+ * \param C (double **)
+ * \param rows_A (int)
+ * \param cols_A (int)
+ * \param rows_B (int)
+ * \return (void)
+ *
+ * */
+void G_math_d_AB(double **A, double **B, double **C, int rows_A,
+		 int cols_A, int rows_B)
+{
+    int i, j, k;
+
+#pragma omp for schedule (static) private(i, j, k)
+    for (i = 0; i < rows_A; i++) {
+	for (j = 0; j < rows_B; j++) {
+	    C[i][j] = 0.0;
+	    for (k = cols_A - 1; k >= 0; k--) {
+		C[i][j] += A[i][k] * B[k][j];
+	    }
+	}
+    }
+
+    return;
+}
+
+/*!
+ * \brief Matrix multiplication
+ *
+ * \f[ {\bf C} = {\bf A}{\bf B} \f]
+ *
+ * The result is written into matrix C. 
+ *
+ * A must be of size rows_A * cols_A
+ * B must be of size rows_B * cols_B with rows_B == cols_A
+ * C must be of size rows_A * rows_B
+ *
+ *
+ * This function is multi-threaded with OpenMP and can be called within a parallel OpenMP region.
+ *
+ * \param A (float **)
+ * \param B (float **) 
+ * \param D (float **) 
+ * \param rows_A (int)
+ * \param cols_A (int)
+ * \param rows_B (int)
+ * \return (void)
+ *
+ * */
+void G_math_f_AB(float **A, float **B, float **C, int rows_A,
+		 int cols_A, int rows_B)
+{
+    int i, j, k;
+
+#pragma omp for schedule (static) private(i, j, k)
+    for (i = 0; i < rows_A; i++) {
+	for (j = 0; j < rows_B; j++) {
+	    C[i][j] = 0.0;
+	    for (k = cols_A - 1; k >= 0; k--) {
+		C[i][j] += A[i][k] * B[k][j];
+	    }
+	}
+    }
+
+    return;
+}

Added: grass/branches/develbranch_6/lib/gmath/ccmath_grass_wrapper.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/ccmath_grass_wrapper.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/ccmath_grass_wrapper.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,441 @@
+#if defined(HAVE_CCMATH)
+#include <ccmath.h>
+#else
+#include <grass/ccmath_grass.h>
+#endif
+/**
+                                Chapter 1
+
+                              LINEAR ALGEBRA
+
+                                 Summary
+
+               The matrix algebra library contains functions that
+               perform the standard computations of linear algebra.
+               General areas covered are:
+
+                         o Solution of Linear Systems
+                         o Matrix Inversion
+                         o Eigensystem Analysis
+                         o Matrix Utility Operations
+                         o Singular Value Decomposition
+
+               The operations covered here are fundamental to many
+               areas of mathematics and statistics. Thus, functions
+               in this library segment are called by other library
+               functions. Both real and complex valued matrices
+               are covered by functions in the first four of these
+               categories.
+
+
+ Notes on Contents
+
+     Functions in this library segment provide the basic operations of
+ numerical linear algebra and some useful utility functions for operations on
+ vectors and matrices. The following list describes the functions available for
+ operations with real-valued matrices.
+
+
+ o  Solving and Inverting Linear Systems:
+
+    solv  --------- solve a general system of real linear equations.
+    solvps  ------- solve a real symmetric linear system.
+    solvru  ------- solve a real right upper triangular linear system.
+    solvtd  ------- solve a tridiagonal real linear system.
+
+    minv  --------- invert a general real square matrix.
+    psinv  -------- invert a real symmetric matrix.
+    ruinv  -------- invert a right upper triangular matrix.
+
+
+     The solution of a general linear system and efficient algorithms for
+ solving special systems with symmetric and tridiagonal matrices are provided
+ by these functions. The general solution function employs a LU factorization
+ with partial pivoting and it is very robust. It will work efficiently on any
+ problem that is not ill-conditioned. The symmetric matrix solution is based
+ on a modified Cholesky factorization. It is best used on positive definite
+ matrices that do not require pivoting for numeric stability. Tridiagonal
+ solvers require order-N operations (N = dimension). Thus, they are highly
+ recommended for this important class of sparse systems. Two matrix inversion
+ routines are provided. The general inversion function is again LU based. It
+ is suitable for use on any stable (ie. well-conditioned) problem. The
+ Cholesky based symmetric matrix inversion is efficient and safe for use on
+ matrices known to be positive definite, such as the variance matrices
+ encountered in statistical computations. Both the solver and the inverse
+ functions are designed to enhance data locality. They are very effective
+ on modern microprocessors.
+
+
+ o  Eigensystem Analysis:
+
+    eigen  ------ extract all eigen values and vectors of a real
+                  symmetric matrix.
+    eigval  ----- extract the eigen values of a real symmetric matrix.
+    evmax  ------ compute the eigen value of maximum absolute magnitude
+                  and its corresponding vector for a symmetric matrix.
+
+
+     Eigensystem functions operate on real symmetric matrices. Two forms of
+ the general eigen routine are provided because the computation of eigen values
+ only is much faster when vectors are not required. The basic algorithms use
+ a Householder reduction to tridiagonal form followed by QR iterations with
+ shifts to enhance convergence. This has become the accepted standard for
+ symmetric eigensystem computation. The evmax function uses an efficient
+ iterative power method algorithm to extract the eigen value of maximum
+ absolute size and the corresponding eigenvector.
+
+
+ o Singular Value Decomposition:
+
+    svdval  ----- compute the singular values of a m by n real matrix.
+    sv2val  ----- compute the singular values of a real matrix
+                  efficiently for m >> n.
+    svduv  ------ compute the singular values and the transformation
+                  matrices u and v for a real m by n matrix.
+    sv2uv  ------ compute the singular values and transformation
+                  matrices efficiently for m >> n.
+    svdu1v  ----- compute the singular values and transformation
+                  matrices u1 and v, where u1 overloads the input
+                  with the first n column vectors of u.
+    sv2u1v  ----- compute the singular values and the transformation
+                  matrices u1 and v efficiently for m >> n.
+
+
+     Singular value decomposition is extremely useful when dealing with linear
+ systems that may be singular. Singular values with values near zero are flags
+ of a potential rank deficiency in the system matrix. They can be used to
+ identify the presence of an ill-conditioned problem and, in some cases, to
+ deal with the potential instability. They are applied to the linear least
+ squares problem in this library. Singular values also define some important
+ matrix norm parameters such as the 2-norm and the condition value. A complete
+ decomposition provides both singular values and an orthogonal decomposition of
+ vector spaces related to the matrix identifying the range and null-space.
+ Fortunately, a highly stable algorithm based on Householder reduction to
+ bidiagonal form and QR rotations can be used to implement the decomposition.
+ The library provides two forms with one more efficient when the dimensions
+ satisfy m > (3/2)n.
+
+ General Technical Comments
+
+     Efficient computation with matrices on modern processors must be
+ adapted to the storage scheme employed for matrix elements. The functions
+ of this library segment do not employ the multidimensional array intrinsic
+ of the C language. Access to elements employs the simple row-major scheme
+ described here.
+
+     Matrices are modeled by the library functions as arrays with elements
+ stored in row order. Thus, the element in the jth row and kth column of
+ the n by n matrix M, stored in the array mat[], is addressed by
+
+           M[j,k] = mat[n*j+k]  , with   0 =< j,k <= n-1 .
+
+ (Remember that C employs zero as the starting index.) The storage order has
+ important implications for data locality.
+
+     The algorithms employed here all have excellent numerical stability, and
+ the default double precision arithmetic of C enhances this. Thus, any
+ problems encountered in using the matrix algebra functions will almost
+ certainly be due to an ill-conditioned matrix. (The Hilbert matrices,
+
+                 H[i,j] = 1/(1+i+j)  for i,j < n
+
+ form a good example of such ill-conditioned systems.) We remind the reader
+ that the appropriate response to such ill-conditioning is to seek an
+ alternative approach to the problem. The option of increasing precision has
+ already been exploited. Modification of the linear algebra algorithm code is
+ not normally effective in an ill-conditioned problem.
+
+------------------------------------------------------------------------------
+                      FUNCTION SYNOPSES
+------------------------------------------------------------------------------
+
+ Linear System Solutions:
+-----------------------------------------------------------------------------
+*/
+/**
+     \brief Solve a general linear system  A*x = b.
+
+     \param  a = array containing system matrix A in row order (altered to L-U factored form by computation)
+     \param  b = array containing system vector b at entry and solution vector x at exit
+     \param  n = dimension of system
+     \return 0 -> normal exit; -1 -> singular input
+ */
+int G_math_solv(double **a,double *b,int n)
+{
+    return solv(a[0],b, n);
+}
+/**
+     \brief Solve a symmetric positive definite linear system S*x = b.
+
+     \param  a = array containing system matrix S (altered to Cholesky upper right factor by computation)
+     \param  b = array containing system vector b as input and solution vector x as output
+     \param  n = dimension of system
+     \return: 0 -> normal exit 1 -> input matrix not positive definite
+ */
+ int G_math_solvps(double **a,double *b,int n)
+{
+    solvps(a[0], b,n);
+}
+/**
+     \brief Solve a tridiagonal linear system M*x = y.
+
+     \param a = array containing m+1 diagonal elements of M
+     \param  b = array of m elements below the main diagonal of M
+     \param  c = array of m elements above the main diagonal
+     \param  x = array containing the system vector y initially, and the solution vector at exit (m+1 elements)
+     \param  m = dimension parameter ( M is (m+1)x(m+1) )
+
+*/
+void G_math_solvtd(double *a,double *b,double *c,double *x,int m)
+{
+    solvtd(a, b, c, x, m);
+    return;
+}
+/*
+     \brief Solve an upper right triangular linear system T*x = b.
+
+     \param  a = pointer to array of upper right triangular matrix T
+     \param  b = pointer to array of system vector The computation overloads this with the solution vector x.
+     \param  n = dimension (dim(a)=n*n,dim(b)=n)
+     \return value: f = status flag, with 0 -> normal exit, -1 -> system singular
+*/
+
+int G_math_solvru(double **a,double *b,int n)
+{
+    return solvru(a[0], b, n);
+}
+/**
+     \brief Invert (in place) a general real matrix A -> Inv(A).
+
+
+     \param  a = array containing the input matrix A. This is converted to the inverse matrix.
+     \param  n = dimension of the system (i.e. A is n x n )
+     \return: 0 -> normal exit, 1 -> singular input matrix
+*/
+int G_math_minv(double **a,int n)
+{
+    return minv(a[0], n);
+}
+/**
+     \brief Invert (in place) a symmetric real matrix, V -> Inv(V).
+
+     The input matrix V is symmetric (V[i,j] = V[j,i]).
+     \param  v = array containing a symmetric input matrix. This is converted to the inverse matrix.
+     \param  n = dimension of the system (dim(v)=n*n)
+     \return: 0 -> normal exit 1 -> input matrix not positive definite
+*/
+int G_math_psinv(double **a,int n)
+{
+    return psinv( a[0], n);
+}
+
+/**
+
+     \brief Invert an upper right triangular matrix T -> Inv(T).
+
+
+     \param  a = pointer to array of upper right triangular matrix, This is replaced by the inverse matrix.
+     \param  n = dimension (dim(a)=n*n)
+     \return value: status flag, with 0 -> matrix inverted -1 -> matrix singular
+*/
+int G_math_ruinv(double **a,int n)
+{
+    return ruinv(a[0], n);
+}
+/*
+-----------------------------------------------------------------------------
+
+     Symmetric Eigensystem Analysis:
+-----------------------------------------------------------------------------
+*/
+/**
+
+     \brief Compute the eigenvalues of a real symmetric matrix A.
+
+
+     \param  a = pointer to array of symmetric n by n input matrix A. The computation alters these values.
+     \param  ev = pointer to array of the output eigenvalues
+     \param  n = dimension parameter (dim(a)= n*n, dim(ev)= n)
+*/
+void G_math_eigval(double **a,double *ev,int n)
+{
+    eigval(a[0], ev, n);
+    return;
+}
+/**
+     \brief Compute the eigenvalues and eigenvectors of a real symmetric matrix A.
+
+      The input and output matrices are related by
+
+          A = E*D*E~ where D is the diagonal matrix of eigenvalues
+          D[i,j] = ev[i] if i=j and 0 otherwise.
+
+     The columns of E are the eigenvectors.
+
+     \param  a = pointer to store for symmetric n by n input matrix A. The computation overloads this with an orthogonal matrix of eigenvectors E.
+     \param  ev = pointer to the array of the output eigenvalues
+     \param  n = dimension parameter (dim(a)= n*n, dim(ev)= n)
+*/
+void G_math_eigen(double **a,double *ev,int n)
+{
+    eigen(a[0], ev, n);
+    return;
+}
+/*
+     \brief Compute the maximum (absolute) eigenvalue and corresponding eigenvector of a real symmetric matrix A.
+
+
+     \param  a = array containing symmetric input matrix A
+     \param  u = array containing the n components of the eigenvector at exit (vector normalized to 1)
+     \param  n = dimension of system
+     \return: ev = eigenvalue of A with maximum absolute value HUGE -> convergence failure
+*/
+double G_math_evmax(double **a,double *u,int n)
+{
+    return evmax(a[0], u, n);
+}
+
+
+/* 
+------------------------------------------------------------------------------
+
+ Singular Value Decomposition:
+------------------------------------------------------------------------------
+
+     A number of versions of the Singular Value Decomposition (SVD)
+     are implemented in the library. They support the efficient
+     computation of this important factorization for a real m by n
+     matrix A. The general form of the SVD is
+
+          A = U*S*V~     with S = | D |
+                                  | 0 |
+
+     where U is an m by m orthogonal matrix, V is an n by n orthogonal matrix,
+     D is the n by n diagonal matrix of singular value, and S is the singular
+     m by n matrix produced by the transformation.
+
+     The singular values computed by these functions provide important
+     information on the rank of the matrix A, and on several matrix
+     norms of A. The number of non-zero singular values d[i] in D
+     equal to the rank of A. The two norm of A is
+
+          ||A|| = max(d[i]) , and the condition number is
+
+          k(A) = max(d[i])/min(d[i]) .
+
+     The Frobenius norm of the matrix A is
+
+          Fn(A) = Sum(i=0 to n-1) d[i]^2 .
+
+     Singular values consistent with zero are easily recognized, since
+     the decomposition algorithms have excellent numerical stability.
+     The value of a 'zero' d[i] is no larger than a few times the
+     computational rounding error e.
+     
+     The matrix U1 is formed from the first n orthonormal column vectors
+     of U.  U1[i,j] = U[i,j] for i = 1 to m and j = 1 to n. A singular
+     value decomposition of A can also be expressed in terms of the m by\
+     n matrix U1, with
+
+                       A = U1*D*V~ .
+
+     SVD functions with three forms of output are provided. The first
+     form computes only the singular values, while the second computes
+     the singular values and the U and V orthogonal transformation
+     matrices. The third form of output computes singular values, the
+     V matrix, and saves space by overloading the input array with
+     the U1 matrix.
+
+     Two forms of decomposition algorithm are available for each of the
+     three output types. One is computationally efficient when m ~ n.
+     The second, distinguished by the prefix 'sv2' in the function name,
+     employs a two stage Householder reduction to accelerate computation
+     when m substantially exceeds n. Use of functions of the second form
+     is recommended for m > 2n.
+
+     Singular value output from each of the six SVD functions satisfies
+
+          d[i] >= 0 for i = 0 to n-1.
+-------------------------------------------------------------------------------
+*/
+/**
+     \brief Compute the singular values of a real m by n matrix A.
+
+
+     \param  d = pointer to double array of dimension n (output = singular values of A)
+     \param  a = pointer to store of the m by n input matrix A (A is altered by the computation)
+     \param  m = number of rows in A
+     \param  n = number of columns in A (m>=n required)
+     \return value: status flag with: 0 -> success -1 -> input error m < n
+
+*/
+int G_math_svdval(double *d,double **a,int m,int n)
+{
+    return svdval(d, a[0], m, n);
+}
+/**
+
+     \brief Compute singular values when m >> n.
+
+     
+     \param  d = pointer to double array of dimension n (output = singular values of A)
+     \param  a = pointer to store of the m by n input matrix A (A is altered by the computation)
+     \param  m = number of rows in A
+     \param  n = number of columns in A (m>=n required)
+     \return value: status flag with: 0 -> success -1 -> input error m < n
+*/
+int G_math_sv2val(double *d,double **a,int m,int n)
+{
+    return sv2val(d, a[0], m, n);
+}
+/*
+     \brief Compute the singular value transformation S = U~*A*V.
+
+     
+     \param  d = pointer to double array of dimension n (output = singular values of A)
+     \param  a = pointer to store of the m by n input matrix A (A is altered by the computation)
+     \param  u = pointer to store for m by m orthogonal matrix U
+     \param  v = pointer to store for n by n orthogonal matrix V
+     \param  m = number of rows in A
+     \param  n = number of columns in A (m>=n required)
+     \return value: status flag with: 0 -> success -1 -> input error m < n
+*/
+int G_math_svduv(double *d,double **a,double **u,int m,double **v,int n)
+{
+    return svduv(d, a[0], u[0], m, v[0], n);
+}
+
+/**
+     \brief Compute the singular value transformation when m >> n.
+
+     
+     \param  d = pointer to double array of dimension n (output = singular values of A)
+     \param  a = pointer to store of the m by n input matrix A (A is altered by the computation)
+     \param  u = pointer to store for m by m orthogonal matrix U
+     \param  v = pointer to store for n by n orthogonal matrix V
+     \param  m = number of rows in A
+     \param  n = number of columns in A (m>=n required)
+     \return value: status flag with: 0 -> success -1 -> input error m < n
+*/
+int G_math_sv2uv(double *d,double **a,double **u,int m,double **v,int n)
+{
+    return sv2uv(d, a[0], u[0], m, v[0], n);
+}
+/**
+
+     \brief Compute the singular value transformation with A overloaded by the partial U-matrix.
+
+     
+     \param  d = pointer to double array of dimension n
+           (output = singular values of A)
+     \param   a = pointer to store of the m by n input matrix A (At output a is overloaded by the matrix U1 whose n columns are orthogonal vectors equal to the first n columns of U.)
+     \param   v = pointer to store for n by n orthogonal matrix V
+     \param   m = number of rows in A
+     \param   n = number of columns in A (m>=n required)
+     \return value: status flag with: 0 -> success -1 -> input error m < n
+
+*/
+int G_math_svdu1v(double *d,double **a,int m,double **v,int n)
+{
+    return svdu1v(d, a[0], m, v[0], n);
+}

Modified: grass/branches/develbranch_6/lib/gmath/del2g.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/del2g.c	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/del2g.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -61,7 +61,7 @@
 
     /* multiply the complex vectors img and g, each of length size*size */
     G_message(_("    multiplying transforms..."));
-    mult(img, size * size, g, size * size, img, size * size);
+    G_math_complex_mult(img, size * size, g, size * size, img, size * size);
 
     G_message(_("    taking inverse FFT..."));
     fft(INVERSE, img, size * size, size, size);

Deleted: grass/branches/develbranch_6/lib/gmath/eigen.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/eigen.c	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/eigen.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -1,147 +0,0 @@
-/* taken from i.pca */
-
-#include <stdlib.h>
-#include <grass/gmath.h>
-#include <grass/gis.h>
-
-
-static int egcmp(const void *pa, const void *pb);
-
-
-/*!
- * \fn int eigen (double **M, double **Vectors, double *lambda, int n)
- *
- * \brief Computes eigenvalues (and eigen vectors if desired) for
- * symmetric matices.
- *
- * Computes eigenvalues (and eigen vectors if desired) for symmetric matices.
- *
- * \param M Input matrix
- * \param Vectors eigen output vector matrix
- * \param lambda Output eigenvalues
- * \param n Input matrix dimension
- * \return int
- */
-
-int eigen(double **M,		/* Input matrix */
-	  double **Vectors,	/* eigen vector matrix -output */
-	  double *lambda,	/* Output eigenvalues */
-	  int n			/* Input matrix dimension */
-    )
-{
-    int i, j;
-    double **a, *e;
-
-    a = G_alloc_matrix(n, n);
-    e = G_alloc_vector(n);
-
-    for (i = 0; i < n; i++)
-	for (j = 0; j < n; j++)
-	    a[i][j] = M[i][j];
-
-    G_tred2(a, n, lambda, e);
-    G_tqli(lambda, e, n, a);
-
-    /* Returns eigenvectors */
-    if (Vectors)
-	for (i = 0; i < n; i++)
-	    for (j = 0; j < n; j++)
-		Vectors[i][j] = a[i][j];
-
-    G_free_matrix(a);
-    G_free_vector(e);
-
-    return 0;
-}
-
-
-/*!
- * \fn int egvorder2 (double *d, double **z, long bands)
- *
- * \brief
- *
- * Returns 0.
- *
- * \param d
- * \param z
- * \param bands
- * \return int
- */
-
-int egvorder2(double *d, double **z, long bands)
-{
-    double *buff;
-    double **tmp;
-    int i, j;
-
-    /* allocate temporary matrix */
-    buff = (double *)G_malloc(bands * (bands + 1) * sizeof(double));
-    tmp = (double **)G_malloc(bands * sizeof(double *));
-    for (i = 0; i < bands; i++)
-	tmp[i] = &buff[i * (bands + 1)];
-
-    /* concatenate (vertically) z and d into tmp */
-    for (i = 0; i < bands; i++) {
-	for (j = 0; j < bands; j++)
-	    tmp[i][j + 1] = z[j][i];
-	tmp[i][0] = d[i];
-    }
-
-    /* sort the combined matrix */
-    qsort(tmp, bands, sizeof(double *), egcmp);
-
-    /* split tmp into z and d */
-    for (i = 0; i < bands; i++) {
-	for (j = 0; j < bands; j++)
-	    z[j][i] = tmp[i][j + 1];
-	d[i] = tmp[i][0];
-    }
-
-    /* free temporary matrix */
-    G_free(tmp);
-    G_free(buff);
-
-    return 0;
-}
-
-
-/*!
- * \fn int transpose2 (double **eigmat, long bands)
- *
- * \brief
- *
- * Returns 0.
- *
- * \param eigmat
- * \param bands
- * \return int
- */
-
-int transpose2(double **eigmat, long bands)
-{
-    int i, j;
-
-    for (i = 0; i < bands; i++)
-	for (j = 0; j < i; j++) {
-	    double tmp = eigmat[i][j];
-
-	    eigmat[i][j] = eigmat[j][i];
-	    eigmat[j][i] = tmp;
-	}
-
-    return 0;
-}
-
-
-static int egcmp(const void *pa, const void *pb)
-{
-    const double *a = *(const double *const *)pa;
-    const double *b = *(const double *const *)pb;
-
-    if (*a > *b)
-	return -1;
-    if (*a < *b)
-	return 1;
-
-    return 0;
-}

Modified: grass/branches/develbranch_6/lib/gmath/eigen_tools.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/eigen_tools.c	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/eigen_tools.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -1,156 +1,59 @@
-#include <grass/gis.h>
+#include <stdlib.h>
 #include <math.h>
+#include <grass/gis.h>
+#include <grass/gmath.h>
 
+static int egcmp(const void *pa, const void *pb);
 
-#define MAX_ITERS 30
-#define SIGN(a,b) ((b)<0 ? -fabs(a) : fabs(a))
 
-
-int G_tqli(double d[], double e[], int n, double **z)
+int G_math_egvorder(double *d, double **z, long bands)
 {
-    int m, l, iter, i, k;
-    double s, r, p, g, f, dd, c, b;
+    double *buff;
+    double **tmp;
+    int i, j;
 
-    for (i = 1; i < n; i++)
-	e[i - 1] = e[i];
-    e[n - 1] = 0.0;
-    for (l = 0; l < n; l++) {
-	iter = 0;
+    /* allocate temporary matrix */
+    buff = (double *)G_malloc(bands * (bands + 1) * sizeof(double));
+    tmp = (double **)G_malloc(bands * sizeof(double *));
+    for (i = 0; i < bands; i++)
+	tmp[i] = &buff[i * (bands + 1)];
 
-	do {
-	    for (m = l; m < n - 1; m++) {
-		dd = fabs(d[m]) + fabs(d[m + 1]);
-		if (fabs(e[m]) + dd == dd)
-		    break;
-	    }
+    /* concatenate (vertically) z and d into tmp */
+    for (i = 0; i < bands; i++) {
+	for (j = 0; j < bands; j++)
+	    tmp[i][j + 1] = z[j][i];
+	tmp[i][0] = d[i];
+    }
 
-	    if (m != l) {
-		if (iter++ == MAX_ITERS)
-		    return 0;	/* Too many iterations in TQLI */
-		g = (d[l + 1] - d[l]) / (2.0 * e[l]);
-		r = sqrt((g * g) + 1.0);
-		g = d[m] - d[l] + e[l] / (g + SIGN(r, g));
-		s = c = 1.0;
-		p = 0.0;
+    /* sort the combined matrix */
+    qsort(tmp, bands, sizeof(double *), egcmp);
 
-		for (i = m - 1; i >= l; i--) {
-		    f = s * e[i];
-		    b = c * e[i];
-
-		    if (fabs(f) >= fabs(g)) {
-			c = g / f;
-			r = sqrt((c * c) + 1.0);
-			e[i + 1] = f * r;
-			c *= (s = 1.0 / r);
-		    }
-		    else {
-			s = f / g;
-			r = sqrt((s * s) + 1.0);
-			e[i + 1] = g * r;
-			s *= (c = 1.0 / r);
-		    }
-
-		    g = d[i + 1] - p;
-		    r = (d[i] - g) * s + 2.0 * c * b;
-		    p = s * r;
-		    d[i + 1] = g + p;
-		    g = c * r - b;
-
-		    /* Next loop can be omitted if eigenvectors not wanted */
-		    for (k = 0; k < n; k++) {
-			f = z[k][i + 1];
-			z[k][i + 1] = s * z[k][i] + c * f;
-			z[k][i] = c * z[k][i] - s * f;
-		    }
-		}
-		d[l] = d[l] - p;
-		e[l] = g;
-		e[m] = 0.0;
-	    }
-	} while (m != l);
+    /* split tmp into z and d */
+    for (i = 0; i < bands; i++) {
+	for (j = 0; j < bands; j++)
+	    z[j][i] = tmp[i][j + 1];
+	d[i] = tmp[i][0];
     }
 
-    return 1;
+    /* free temporary matrix */
+    G_free(tmp);
+    G_free(buff);
+
+    return 0;
 }
 
+/***************************************************************************/
 
-void G_tred2(double **a, int n, double d[], double e[])
+static int egcmp(const void *pa, const void *pb)
 {
-    int l, k, j, i;
-    double scale, hh, h, g, f;
+    const double *a = *(const double *const *)pa;
+    const double *b = *(const double *const *)pb;
 
-    for (i = n - 1; i >= 1; i--) {
-	l = i - 1;
-	h = scale = 0.0;
+    if (*a > *b)
+	return -1;
+    if (*a < *b)
+	return 1;
 
-	if (l > 0) {
-	    for (k = 0; k <= l; k++)
-		scale += fabs(a[i][k]);
-
-	    if (scale == 0.0)
-		e[i] = a[i][l];
-	    else {
-		for (k = 0; k <= l; k++) {
-		    a[i][k] /= scale;
-		    h += a[i][k] * a[i][k];
-		}
-
-		f = a[i][l];
-		g = f > 0 ? -sqrt(h) : sqrt(h);
-		e[i] = scale * g;
-		h -= f * g;
-		a[i][l] = f - g;
-		f = 0.0;
-
-		for (j = 0; j <= l; j++) {
-		    /* Next statement can be omitted if eigenvectors not wanted */
-		    a[j][i] = a[i][j] / h;
-		    g = 0.0;
-		    for (k = 0; k <= j; k++)
-			g += a[j][k] * a[i][k];
-		    for (k = j + 1; k <= l; k++)
-			g += a[k][j] * a[i][k];
-		    e[j] = g / h;
-		    f += e[j] * a[i][j];
-		}
-
-		hh = f / (h + h);
-		for (j = 0; j <= l; j++) {
-		    f = a[i][j];
-		    e[j] = g = e[j] - hh * f;
-
-		    for (k = 0; k <= j; k++)
-			a[j][k] -= (f * e[k] + g * a[i][k]);
-		}
-	    }
-	}
-	else
-	    e[i] = a[i][l];
-	d[i] = h;
-    }
-
-    /* Next statement can be omitted if eigenvectors not wanted */
-    d[0] = 0.0;
-    e[0] = 0.0;
-
-    /* Contents of this loop can be omitted if eigenvectors not
-       wanted except for statement d[i]=a[i][i]; */
-    for (i = 0; i < n; i++) {
-	l = i - 1;
-
-	if (d[i]) {
-	    for (j = 0; j <= l; j++) {
-		g = 0.0;
-		for (k = 0; k <= l; k++)
-		    g += a[i][k] * a[k][j];
-		for (k = 0; k <= l; k++)
-		    a[k][j] -= g * a[k][i];
-	    }
-	}
-
-	d[i] = a[i][i];
-	a[i][i] = 1.0;
-	for (j = 0; j <= l; j++)
-	    a[j][i] = a[i][j] = 0.0;
-    }
+    return 0;
 }
+/***************************************************************************/

Deleted: grass/branches/develbranch_6/lib/gmath/jacobi.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/jacobi.c	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/jacobi.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -1,99 +0,0 @@
-#include <stdlib.h>
-#include <math.h>
-#include <grass/gis.h>
-#include <grass/gmath.h>
-
-
-/***************************************************************************/
-
-/* this does not use the Jacobi method, but it should give the same result */
-
-int jacobi(double a[MX][MX], long n, double d[MX], double v[MX][MX])
-{
-    double *aa[MX], *vv[MX], *dd;
-    int i;
-
-    for (i = 0; i < n; i++) {
-	aa[i] = &a[i + 1][1];
-	vv[i] = &v[i + 1][1];
-    }
-    dd = &d[1];
-    eigen(aa, vv, dd, n);
-
-    return 0;
-}
-
-/***************************************************************************/
-
-static int egcmp(const void *pa, const void *pb)
-{
-    const double *a = *(const double *const *)pa;
-    const double *b = *(const double *const *)pb;
-
-    if (*a > *b)
-	return -1;
-    if (*a < *b)
-	return 1;
-
-    return 0;
-}
-
-int egvorder(double d[MX], double z[MX][MX], long bands)
-{
-    double *buff;
-    double **tmp;
-    int i, j;
-
-    /* allocate temporary matrix */
-
-    buff = (double *)G_malloc(bands * (bands + 1) * sizeof(double));
-    tmp = (double **)G_malloc(bands * sizeof(double *));
-    for (i = 0; i < bands; i++)
-	tmp[i] = &buff[i * (bands + 1)];
-
-    /* concatenate (vertically) z and d into tmp */
-
-    for (i = 0; i < bands; i++) {
-	for (j = 0; j < bands; j++)
-	    tmp[i][j + 1] = z[j + 1][i + 1];
-	tmp[i][0] = d[i + 1];
-    }
-
-    /* sort the combined matrix */
-
-    qsort(tmp, bands, sizeof(double *), egcmp);
-
-    /* split tmp into z and d */
-
-    for (i = 0; i < bands; i++) {
-	for (j = 0; j < bands; j++)
-	    z[j + 1][i + 1] = tmp[i][j + 1];
-	d[i + 1] = tmp[i][0];
-    }
-
-    /* free temporary matrix */
-
-    G_free(tmp);
-    G_free(buff);
-
-    return 0;
-}
-
-/***************************************************************************/
-
-int transpose(double eigmat[MX][MX], long bands)
-{
-    int i, j;
-
-    for (i = 1; i <= bands; i++)
-	for (j = 1; j < i; j++) {
-	    double tmp = eigmat[i][j];
-
-	    eigmat[i][j] = eigmat[j][i];
-	    eigmat[j][i] = tmp;
-	}
-
-    return 0;
-}
-
-/***************************************************************************/

Modified: grass/branches/develbranch_6/lib/gmath/mult.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/mult.c	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/mult.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -1,7 +1,7 @@
 /* Author: Bill Hoff,2-114C,8645,3563478 (hoff) at uicsl */
 
 /*!
- * \fn int mult (double *v1[2], int size1, double *v2[2], int size2, double *v3[2], int size3)
+ * \fn int G_math_complex_mult (double *v1[2], int size1, double *v2[2], int size2, double *v3[2], int size3)
  *
  * \brief Multiply two complex vectors, point by point
  *
@@ -20,7 +20,7 @@
  */
 
 int
-mult(double *v1[2], int size1, double *v2[2], int size2, double *v3[2],
+G_math_complex_mult(double *v1[2], int size1, double *v2[2], int size2, double *v3[2],
      int size3)
 {
     int i, n;

Added: grass/branches/develbranch_6/lib/gmath/solvers_classic_iter.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/solvers_classic_iter.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/solvers_classic_iter.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,281 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      linear equation system solvers
+* 		part of the gpde library
+*               
+* COPYRIGHT:    (C) 2007 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+
+
+/*!
+ * \brief The iterative jacobi solver for sparse matrices
+ *
+ * The Jacobi solver solves the linear equation system Ax = b
+ * The result is written to the vector x.
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param Asp G_math_spvector ** -- the sparse matrix
+ * \param x double * -- the vector of unknowns
+ * \param b double * -- the right side vector
+ * \param rows int -- number of rows
+ * \param maxit int -- the maximum number of iterations
+ * \param sor double -- defines the successive overrelaxion parameter [0:1]
+ * \param error double -- defines the error break criteria
+ * \return int -- 1=success, -1=could not solve the les
+ *
+ * */
+int G_math_solver_sparse_jacobi(G_math_spvector ** Asp, double *x, double *b,
+				int rows, int maxit, double sor, double error)
+{
+    int i, j, k, center, finished = 0;
+
+    double *Enew;
+
+    double E, err = 0;
+
+    Enew = G_alloc_vector(rows);
+
+    for (k = 0; k < maxit; k++) {
+	err = 0;
+	{
+	    if (k == 0) {
+		for (j = 0; j < rows; j++) {
+		    Enew[j] = x[j];
+		}
+	    }
+	    for (i = 0; i < rows; i++) {
+		E = 0;
+		center = 0;
+		for (j = 0; j < Asp[i]->cols; j++) {
+		    E += Asp[i]->values[j] * x[Asp[i]->index[j]];
+		    if (Asp[i]->index[j] == i)
+			center = j;
+		}
+		Enew[i] = x[i] - sor * (E - b[i]) / Asp[i]->values[center];
+	    }
+	    for (j = 0; j < rows; j++) {
+		err += (x[j] - Enew[j]) * (x[j] - Enew[j]);
+
+		x[j] = Enew[j];
+	    }
+	}
+
+	G_message(_("sparse Jacobi -- iteration %5i error %g\n"), k, err);
+
+	if (err < error) {
+	    finished = 1;
+	    break;
+	}
+    }
+
+    G_free(Enew);
+
+    return finished;
+}
+
+
+/*!
+ * \brief The iterative gauss seidel solver for sparse matrices
+ *
+ * The Jacobi solver solves the linear equation system Ax = b
+ * The result is written to the vector x.
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param Asp G_math_spvector ** -- the sparse matrix
+ * \param x double * -- the vector of unknowns
+ * \param b double * -- the right side vector
+ * \param rows int -- number of rows
+ * \param maxit int -- the maximum number of iterations
+ * \param sor double -- defines the successive overrelaxion parameter [0:2]
+ * \param error double -- defines the error break criteria
+ * \return int -- 1=success, -1=could not solve the les
+ *
+ * */
+int G_math_solver_sparse_gs(G_math_spvector ** Asp, double *x, double *b,
+			    int rows, int maxit, double sor, double error)
+{
+    int i, j, k, finished = 0;
+
+    double *Enew;
+
+    double E, err = 0;
+
+    int center;
+
+    Enew = G_alloc_vector(rows);
+
+    for (k = 0; k < maxit; k++) {
+	err = 0;
+	{
+	    if (k == 0) {
+		for (j = 0; j < rows; j++) {
+		    Enew[j] = x[j];
+		}
+	    }
+	    for (i = 0; i < rows; i++) {
+		E = 0;
+		center = 0;
+		for (j = 0; j < Asp[i]->cols; j++) {
+		    E += Asp[i]->values[j] * Enew[Asp[i]->index[j]];
+		    if (Asp[i]->index[j] == i)
+			center = j;
+		}
+		Enew[i] = x[i] - sor * (E - b[i]) / Asp[i]->values[center];
+	    }
+	    for (j = 0; j < rows; j++) {
+		err += (x[j] - Enew[j]) * (x[j] - Enew[j]);
+
+		x[j] = Enew[j];
+	    }
+	}
+
+	G_message(_("sparse SOR -- iteration %5i error %g\n"), k, err);
+
+	if (err < error) {
+	    finished = 1;
+	    break;
+	}
+    }
+
+    G_free(Enew);
+
+    return finished;
+}
+
+
+/*!
+ * \brief The iterative jacobi solver for quadratic matrices
+ *
+ * The Jacobi solver solves the linear equation system Ax = b
+ * The result is written to the vector x.
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param Asp G_math_spvector ** -- the sparse matrix
+ * \param x double * -- the vector of unknowns
+ * \param b double * -- the right side vector
+ * \param rows int -- number of rows
+ * \param maxit int -- the maximum number of iterations
+ * \param sor double -- defines the successive overrelaxion parameter [0:1]
+ * \param error double -- defines the error break criteria
+ * \return int -- 1=success, -1=could not solve the les
+ *
+ * */
+int G_math_solver_jacobi(double **A, double *x, double *b, int rows,
+			 int maxit, double sor, double error)
+{
+    int i, j, k;
+
+    double *Enew;
+
+    double E, err = 0;
+
+    Enew = G_alloc_vector(rows);
+
+    for (j = 0; j < rows; j++) {
+	Enew[j] = x[j];
+    }
+
+    for (k = 0; k < maxit; k++) {
+	for (i = 0; i < rows; i++) {
+	    E = 0;
+	    for (j = 0; j < rows; j++) {
+		E += A[i][j] * x[j];
+	    }
+	    Enew[i] = x[i] - sor * (E - b[i]) / A[i][i];
+	}
+	err = 0;
+	for (j = 0; j < rows; j++) {
+	    err += (x[j] - Enew[j]) * (x[j] - Enew[j]);
+	    x[j] = Enew[j];
+	}
+	G_message(_("Jacobi -- iteration %5i error %g\n"), k, err);
+	if (err < error)
+	    break;
+    }
+
+    return 1;
+}
+
+
+/*!
+ * \brief The iterative gauss seidel solver for quadratic matrices
+ *
+ * The Jacobi solver solves the linear equation system Ax = b
+ * The result is written to the vector x.
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param Asp G_math_spvector ** -- the sparse matrix
+ * \param x double * -- the vector of unknowns
+ * \param b double * -- the right side vector
+ * \param rows int -- number of rows
+ * \param maxit int -- the maximum number of iterations
+ * \param sor double -- defines the successive overrelaxion parameter [0:2]
+ * \param error double -- defines the error break criteria
+ * \return int -- 1=success, -1=could not solve the les
+ *
+ * */
+int G_math_solver_gs(double **A, double *x, double *b, int rows, int maxit,
+		     double sor, double error)
+{
+    int i, j, k;
+
+    double *Enew;
+
+    double E, err = 0;
+
+    Enew = G_alloc_vector(rows);
+
+    for (j = 0; j < rows; j++) {
+	Enew[j] = x[j];
+    }
+
+    for (k = 0; k < maxit; k++) {
+	for (i = 0; i < rows; i++) {
+	    E = 0;
+	    for (j = 0; j < rows; j++) {
+		E += A[i][j] * Enew[j];
+	    }
+	    Enew[i] = x[i] - sor * (E - b[i]) / A[i][i];
+	}
+	err = 0;
+	for (j = 0; j < rows; j++) {
+	    err += (x[j] - Enew[j]) * (x[j] - Enew[j]);
+	    x[j] = Enew[j];
+	}
+	G_message(_("SOR -- iteration %5i error %g\n"), k, err);
+	if (err < error)
+	    break;
+    }
+
+    return 1;
+}

Added: grass/branches/develbranch_6/lib/gmath/solvers_direct.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/solvers_direct.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/solvers_direct.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,416 @@
+
+/*****************************************************************************
+ *
+ * MODULE:       Grass PDE Numerical Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      direkt linear equation system solvers
+ * 		part of the gpde library
+ *               
+ * COPYRIGHT:    (C) 2007 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include "grass/gis.h"
+#include "grass/glocale.h"
+#include "grass/gmath.h"
+
+#define TINY 1.0e-20
+#define COMP_PIVOT 100
+
+/*!
+ * \brief The gauss elimination solver for quardatic matrices
+ *
+ * This solver does not support sparse matrices
+ * The matrix A will be overwritten.
+ * The result is written to the vector x 
+ *
+ * \param A double **
+ * \param x double *
+ * \param b double *
+ * \int rows int
+ * \return int -- 1 success
+ * */
+int G_math_solver_gauss(double **A, double *x, double *b, int rows)
+{
+    G_message(_("Starting direct gauss elimination solver"));
+
+    G_math_gauss_elimination(A, b, rows);
+    G_math_backward_solving(A, x, b, rows);
+
+    return 1;
+}
+
+/*!
+ * \brief The LU solver for quardatic matrices
+ *
+ * This solver does not support sparse matrices
+ * The matrix A will be overwritten.
+ * The result is written to the vector x in the G_math_les structure
+ *
+ *
+ * \param A double **
+ * \param x double *
+ * \param b double *
+ * \int rows int
+ * \return int -- 1 success
+ * */
+int G_math_solver_lu(double **A, double *x, double *b, int rows)
+{
+    int i;
+
+    double *c, *tmpv;
+
+    G_message(_("Starting direct lu decomposition solver"));
+
+    tmpv = G_alloc_vector(rows);
+    c = G_alloc_vector(rows);
+
+    G_math_lu_decomposition(A, b, rows);
+
+#pragma omp parallel
+    {
+
+#pragma omp for  schedule (static) private(i)
+	for (i = 0; i < rows; i++) {
+	    tmpv[i] = A[i][i];
+	    A[i][i] = 1;
+	}
+
+#pragma omp single
+	{
+	    G_math_forward_solving(A, b, b, rows);
+	}
+
+#pragma omp for  schedule (static) private(i)
+	for (i = 0; i < rows; i++) {
+	    A[i][i] = tmpv[i];
+	}
+
+#pragma omp single
+	{
+	    G_math_backward_solving(A, x, b, rows);
+	}
+    }
+
+    G_free(c);
+    G_free(tmpv);
+
+
+    return 1;
+}
+
+/*!
+ * \brief The choleksy decomposition solver for quardatic, symmetric
+ * positiv definite matrices
+ *
+ * This solver does not support sparse matrices
+ * The matrix A will be overwritten.
+ * The result is written to the vector x 
+ *
+ * \param A double **
+ * \param x double *
+ * \param b double *
+ * \int rows int
+ * \return int -- 1 success
+ * */
+int G_math_solver_cholesky(double **A, double *x, double *b, int bandwith,
+			   int rows)
+{
+
+    G_message(_("Starting cholesky decomposition solver"));
+
+    if (G_math_cholesky_decomposition(A, rows, bandwith) != 1) {
+	G_warning(_("Unable to solve the linear equation system"));
+	return -2;
+    }
+
+    G_math_forward_solving(A, b, b, rows);
+    G_math_backward_solving(A, x, b, rows);
+
+    return 1;
+}
+
+/*!
+ * \brief Gauss elimination
+ *
+ * To run this solver efficiently,
+ * no pivoting is supported.
+ * The matrix will be overwritten with the decomposite form
+ * \param A double **
+ * \param b double * 
+ * \param rows int
+ * \return void
+ *
+ * */
+void G_math_gauss_elimination(double **A, double *b, int rows)
+{
+    int i, j, k;
+
+    double tmpval = 0.0;
+
+    /*compute the pivot -- commented out, because its meaningless
+       to compute it only nth times. */
+    /*G_math_pivot_create(A, b, rows, 0); */
+
+    for (k = 0; k < rows - 1; k++) {
+#pragma omp parallel for schedule (static) private(i, j, tmpval) shared(k, A, b, rows)
+	for (i = k + 1; i < rows; i++) {
+	    tmpval = A[i][k] / A[k][k];
+	    b[i] = b[i] - tmpval * b[k];
+	    for (j = k + 1; j < rows; j++) {
+		A[i][j] = A[i][j] - tmpval * A[k][j];
+	    }
+	}
+    }
+
+    return;
+}
+
+/*!
+ * \brief lu decomposition
+ *
+ * To run this solver efficiently,
+ * no pivoting is supported.
+ * The matrix will be overwritten with the decomposite form
+ *
+ * \param A double **
+ * \param b double * -- this vector is needed if its part of the linear equation system, otherwise set it to NULL
+ * \param rows int
+ * \return void
+ *
+ * */
+void G_math_lu_decomposition(double **A, double *b, int rows)
+{
+
+    int i, j, k;
+
+    /*compute the pivot -- commented out, because its meaningless
+       to compute it only nth times. */
+    /*G_math_pivot_create(A, b, rows, 0); */
+
+    for (k = 0; k < rows - 1; k++) {
+#pragma omp parallel for schedule (static) private(i, j) shared(k, A, rows)
+	for (i = k + 1; i < rows; i++) {
+	    A[i][k] = A[i][k] / A[k][k];
+	    for (j = k + 1; j < rows; j++) {
+		A[i][j] = A[i][j] - A[i][k] * A[k][j];
+	    }
+	}
+    }
+
+    return;
+}
+
+/*!
+ * \brief cholesky decomposition for symmetric, positiv definite matrices
+ *        with bandwith optimization
+ *
+ * The provided matrix will be overwritten with the lower and 
+ * upper triangle matrix A = LL^T 
+ *
+ * \param A double **
+ * \param rows int
+ * \param bandwith int -- the bandwith of the matrix (0 > bandwith <= cols)
+ * \return void
+ *
+ * */
+int G_math_cholesky_decomposition(double **A, int rows, int bandwith)
+{
+
+    int i = 0, j = 0, k = 0;
+
+    double sum_1 = 0.0;
+
+    double sum_2 = 0.0;
+
+    int colsize;
+
+    if (bandwith <= 0)
+	bandwith = rows;
+
+    colsize = bandwith;
+
+    for (k = 0; k < rows; k++) {
+#pragma omp parallel for schedule (static) private(i, j, sum_2) shared(A, k) reduction(+:sum_1)
+	for (j = 0; j < k; j++) {
+	    sum_1 += A[k][j] * A[k][j];
+	}
+
+	if (0 > (A[k][k] - sum_1)) {
+	    G_warning("Matrix is not positive definite. break.");
+	    return -1;
+	}
+	A[k][k] = sqrt(A[k][k] - sum_1);
+	sum_1 = 0.0;
+
+	if ((k + bandwith) > rows) {
+	    colsize = rows;
+	}
+	else {
+	    colsize = k + bandwith;
+	}
+
+#pragma omp parallel for schedule (static) private(i, j, sum_2) shared(A, k, sum_1, colsize)
+
+	for (i = k + 1; i < colsize; i++) {
+	    sum_2 = 0.0;
+	    for (j = 0; j < k; j++) {
+		sum_2 += A[i][j] * A[k][j];
+	    }
+	    A[i][k] = (A[i][k] - sum_2) / A[k][k];
+	}
+
+    }
+    /*we need to copy the lower triangle matrix to the upper trianle */
+#pragma omp parallel for schedule (static) private(i, k) shared(A, rows)
+    for (k = 0; k < rows; k++) {
+	for (i = k + 1; i < rows; i++) {
+	    A[k][i] = A[i][k];
+	}
+    }
+
+
+    return 1;
+}
+
+/*!
+ * \brief backward solving
+ *
+ * \param A double **
+ * \param x double *
+ * \param b double *
+ * \param rows int
+ * \return void
+ *
+ * */
+void G_math_backward_solving(double **A, double *x, double *b, int rows)
+{
+    int i, j;
+
+    for (i = rows - 1; i >= 0; i--) {
+	for (j = i + 1; j < rows; j++) {
+	    b[i] = b[i] - A[i][j] * x[j];
+	}
+	x[i] = (b[i]) / A[i][i];
+    }
+
+    return;
+}
+
+/*!
+ * \brief forward solving
+ *
+ * \param A double **
+ * \param x double *
+ * \param b double *
+ * \param rows int
+ * \return void
+ *
+ * */
+void G_math_forward_solving(double **A, double *x, double *b, int rows)
+{
+    int i, j;
+
+    double tmpval = 0.0;
+
+    for (i = 0; i < rows; i++) {
+	tmpval = 0;
+	for (j = 0; j < i; j++) {
+	    tmpval += A[i][j] * x[j];
+	}
+	x[i] = (b[i] - tmpval) / A[i][i];
+    }
+
+    return;
+}
+
+
+/*!
+ * \brief Optimize the structure of the linear equation system with a common pivoting strategy
+ *
+ * Create a optimized linear equation system for
+ * direct solvers: gauss and lu decomposition.
+ *
+ * The rows are permuted based on the pivot elements.
+ *
+ * This algorithm will modify the provided linear equation system
+ * and should only be used with the gauss elimination and lu decomposition solver.
+ *
+ * \param A double ** - a quadratic matrix
+ * \param b double *  - the right hand  vector, if not available set it to NULL
+ * \param rows int 
+ * \param start int -- the row
+ * \return int - the number of swapped rows
+ *
+ *
+ * */
+int G_math_pivot_create(double **A, double *b, int rows, int start)
+{
+    int num = 0;		/*number of changed rows */
+
+    int i, j, k;
+
+    double max;
+
+    int number = 0;
+
+    double tmpval = 0.0, s = 0.0;
+
+    double *link = NULL;
+
+    link = G_alloc_vector(rows);
+
+    G_debug(2, "G_math_pivot_create: swap rows if needed");
+    for (i = start; i < rows; i++) {
+	s = 0.0;
+	for (k = i + 1; k < rows; k++) {
+	    s += fabs(A[i][k]);
+	}
+	max = fabs(A[i][i]) / s;
+	number = i;
+	for (j = i + 1; j < rows; j++) {
+	    s = 0.0;
+	    for (k = j; k < rows; k++) {
+		s += fabs(A[j][k]);
+	    }
+	    /*search for the pivot element */
+	    if (max < fabs(A[j][i]) / s) {
+		max = fabs(A[j][i] / s);
+		number = j;
+	    }
+	}
+	if (max == 0) {
+	    max = TINY;
+	    G_warning("Matrix is singular");
+	}
+	/*if an pivot element was found, swap the les entries */
+	if (number != i) {
+
+	    G_debug(4, "swap row %i with row %i", i, number);
+
+	    if (b != NULL) {
+		tmpval = b[number];
+		b[number] = b[i];
+		b[i] = tmpval;
+	    }
+	    G_math_d_copy(A[number], link, rows);
+	    G_math_d_copy(A[i], A[number], rows);
+	    G_math_d_copy(link, A[i], rows);
+	    num++;
+	}
+    }
+
+    G_free_vector(link);
+
+    return num;
+}

Added: grass/branches/develbranch_6/lib/gmath/solvers_krylov.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/solvers_krylov.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/solvers_krylov.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,737 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      linear equation system solvers
+* 		part of the gpde library
+*               
+* COPYRIGHT:    (C) 2000 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <math.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <grass/gis.h>
+#include <grass/gmath.h>
+#include <grass/glocale.h>
+
+#define		G_MATH_ROWSCALE_L2_NORM_PRECONDITION 1
+#define		G_MATH_ROWSCALE_L1_NORM_PRECONDITION 2
+#define		G_MATH_DIAGONAL_PRECONDITION 3
+
+static G_math_spvector **create_diag_precond_matrix(double **A,
+						    G_math_spvector ** Asp,
+						    int rows, int prec);
+static int solver_pcg(double **A, G_math_spvector ** Asp, double *x,
+		      double *b, int rows, int maxit, double err, int prec);
+static int solver_cg(double **A, G_math_spvector ** Asp, double *x, double *b,
+		     int rows, int maxit, double err);
+static int solver_bicgstab(double **A, G_math_spvector ** Asp, double *x,
+			   double *b, int rows, int maxit, double err);
+
+
+/*!
+ * \brief The iterative preconditioned conjugate gradients solver for symmetric positive definite matrices
+ *
+ * This iterative solver works with symmetric positive definite  regular quadratic matrices.
+ *
+ * This solver solves the linear equation system:
+ *  A x = b
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param A (double **) -- the matrix
+ * \param x (double *) -- the value vector
+ * \param b (double *) -- the right hand side
+ * \param rows (int)
+ * \param maxit (int) -- the maximum number of iterations
+ * \param err (double) -- defines the error break criteria
+ * \param prec (int) -- the preconditioner which shoudl be used 1,2 or 3
+ * \return (int) -- 1 - success, 2 - not finisehd but success, 0 - matrix singular, -1 - could not solve the les
+ * 
+ * */
+int G_math_solver_pcg(double **A, double *x, double *b, int rows, int maxit,
+		      double err, int prec)
+{
+
+    return solver_pcg(A, NULL, x, b, rows, maxit, err, prec);
+}
+
+/*!
+ * \brief The iterative preconditioned conjugate gradients solver for sparse symmetric positive definite matrices
+ *
+ * This iterative solver works with symmetric positive definite sparse matrices.
+ *
+ * This solver solves the linear equation system:
+ *  A x = b
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param Asp (G_math_spvector **) -- the sparse matrix
+ * \param x (double *) -- the value vector
+ * \param b (double *) -- the right hand side
+ * \param rows (int)
+ * \param maxit (int) -- the maximum number of iterations
+ * \param err (double) -- defines the error break criteria
+ * \param prec (int) -- the preconditioner which shoudl be used 1,2 or 3
+ * \return (int) -- 1 - success, 2 - not finisehd but success, 0 - matrix singular, -1 - could not solve the les
+ * 
+ * */
+int G_math_solver_sparse_pcg(G_math_spvector ** Asp, double *x, double *b,
+			     int rows, int maxit, double err, int prec)
+{
+
+    return solver_pcg(NULL, Asp, x, b, rows, maxit, err, prec);
+}
+
+int solver_pcg(double **A, G_math_spvector ** Asp, double *x, double *b,
+	       int rows, int maxit, double err, int prec)
+{
+    double *r, *z;
+
+    double *p;
+
+    double *v;
+
+    double s = 0.0;
+
+    double a0 = 0, a1 = 0, mygamma, tmp = 0;
+
+    int m, i;
+
+    int finished = 2;
+
+    int error_break;
+
+    G_math_spvector **M;
+
+    r = G_alloc_vector(rows);
+    p = G_alloc_vector(rows);
+    v = G_alloc_vector(rows);
+    z = G_alloc_vector(rows);
+
+    error_break = 0;
+
+    /*compute the preconditioning matrix, this is a sparse matrix */
+    M = create_diag_precond_matrix(A, Asp, rows, prec);
+
+    /*
+     * residual calculation 
+     */
+#pragma omp parallel
+    {
+	if (Asp)
+	    G_math_Ax_sparse(Asp, x, v, rows);
+	else
+	    G_math_d_Ax(A, x, v, rows, rows);
+
+	G_math_d_ax_by(b, v, r, 1.0, -1.0, rows);
+	/*performe the preconditioning */
+	G_math_Ax_sparse(M, r, p, rows);
+
+	/* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s)
+	for (i = 0; i < rows; i++) {
+	    s += p[i] * r[i];
+	}
+    }
+
+    a0 = s;
+    s = 0.0;
+
+    /* ******************* */
+    /* start the iteration */
+    /* ******************* */
+    for (m = 0; m < maxit; m++) {
+#pragma omp parallel default(shared)
+	{
+	    if (Asp)
+		G_math_Ax_sparse(Asp, p, v, rows);
+	    else
+		G_math_d_Ax(A, p, v, rows, rows);
+
+
+
+	    /* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s)
+	    for (i = 0; i < rows; i++) {
+		s += v[i] * p[i];
+	    }
+
+	    /* barrier */
+#pragma omp single
+	    {
+		tmp = s;
+		mygamma = a0 / tmp;
+		s = 0.0;
+	    }
+
+	    G_math_d_ax_by(p, x, x, mygamma, 1.0, rows);
+
+	    if (m % 50 == 1) {
+		if (Asp)
+		    G_math_Ax_sparse(Asp, x, v, rows);
+		else
+		    G_math_d_Ax(A, x, v, rows, rows);
+
+		G_math_d_ax_by(b, v, r, 1.0, -1.0, rows);
+	    }
+	    else {
+		G_math_d_ax_by(r, v, r, 1.0, -1.0 * mygamma, rows);
+	    }
+
+	    /*performe the preconditioning */
+	    G_math_Ax_sparse(M, r, z, rows);
+
+
+	    /* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s)
+	    for (i = 0; i < rows; i++) {
+		s += z[i] * r[i];
+	    }
+
+	    /* barrier */
+#pragma omp single
+	    {
+		a1 = s;
+		tmp = a1 / a0;
+		a0 = a1;
+		s = 0.0;
+
+		if (a1 < 0 || a1 == 0 || a1 > 0) {
+		    ;
+		}
+		else {
+		    G_warning(_
+			      ("Unable to solve the linear equation system"));
+		    error_break = 1;
+		}
+	    }
+	    G_math_d_ax_by(p, z, p, tmp, 1.0, rows);
+	}
+
+	if (Asp != NULL)
+	    G_message(_("Sparse PCG -- iteration %i error  %g\n"), m, a0);
+	else
+	    G_message(_("PCG -- iteration %i error  %g\n"), m, a0);
+
+	if (error_break == 1) {
+	    finished = -1;
+	    break;
+	}
+
+
+	if (a0 < err) {
+	    finished = 1;
+	    break;
+	}
+    }
+
+    G_free(r);
+    G_free(p);
+    G_free(v);
+    G_free(z);
+    G_math_free_spmatrix(M, rows);
+
+    return finished;
+}
+
+
+/*!
+ * \brief The iterative conjugate gradients solver for symmetric positive definite matrices
+ *
+ * This iterative solver works with symmetric positive definite  regular quadratic matrices.
+ *
+ * This solver solves the linear equation system:
+ *  A x = b
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param A (double **) -- the matrix
+ * \param x (double *) -- the value vector
+ * \param b (double *) -- the right hand side
+ * \param rows (int)
+ * \param maxit (int) -- the maximum number of iterations
+ * \param err (double) -- defines the error break criteria
+ * \return (int) -- 1 - success, 2 - not finisehd but success, 0 - matrix singular, -1 - could not solve the les
+ * 
+ * */
+int G_math_solver_cg(double **A, double *x, double *b, int rows, int maxit,
+		     double err)
+{
+    return solver_cg(A, NULL, x, b, rows, maxit, err);
+}
+
+/*!
+ * \brief The iterative conjugate gradients solver for sparse symmetric positive definite matrices
+ *
+ * This iterative solver works with symmetric positive definite sparse matrices.
+ *
+ * This solver solves the linear equation system:
+ *  A x = b
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param Asp (G_math_spvector **) -- the sparse matrix
+ * \param x (double *) -- the value vector
+ * \param b (double *) -- the right hand side
+ * \param rows (int)
+ * \param maxit (int) -- the maximum number of iterations
+ * \param err (double) -- defines the error break criterias
+ * \return (int) -- 1 - success, 2 - not finisehd but success, 0 - matrix singular, -1 - could not solve the les
+ * 
+ * */
+int G_math_solver_sparse_cg(G_math_spvector ** Asp, double *x, double *b,
+			    int rows, int maxit, double err)
+{
+    return solver_cg(NULL, Asp, x, b, rows, maxit, err);
+}
+
+
+int solver_cg(double **A, G_math_spvector ** Asp, double *x, double *b,
+	      int rows, int maxit, double err)
+{
+    double *r;
+
+    double *p;
+
+    double *v;
+
+    double s = 0.0;
+
+    double a0 = 0, a1 = 0, mygamma, tmp = 0;
+
+    int m, i;
+
+    int finished = 2;
+
+    int error_break;
+
+    r = G_alloc_vector(rows);
+    p = G_alloc_vector(rows);
+    v = G_alloc_vector(rows);
+
+    error_break = 0;
+    /*
+     * residual calculation 
+     */
+#pragma omp parallel
+    {
+	if (Asp)
+	    G_math_Ax_sparse(Asp, x, v, rows);
+	else
+	    G_math_d_Ax(A, x, v, rows, rows);
+
+	G_math_d_ax_by(b, v, r, 1.0, -1.0, rows);
+	G_math_d_copy(r, p, rows);
+
+	/* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s)
+	for (i = 0; i < rows; i++) {
+	    s += r[i] * r[i];
+	}
+    }
+
+    a0 = s;
+    s = 0.0;
+
+    /* ******************* */
+    /* start the iteration */
+    /* ******************* */
+    for (m = 0; m < maxit; m++) {
+#pragma omp parallel default(shared)
+	{
+	    if (Asp)
+		G_math_Ax_sparse(Asp, p, v, rows);
+	    else
+		G_math_d_Ax(A, p, v, rows, rows);
+
+	    /* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s)
+	    for (i = 0; i < rows; i++) {
+		s += v[i] * p[i];
+	    }
+
+	    /* barrier */
+#pragma omp single
+	    {
+		tmp = s;
+		mygamma = a0 / tmp;
+		s = 0.0;
+	    }
+
+	    G_math_d_ax_by(p, x, x, mygamma, 1.0, rows);
+
+	    if (m % 50 == 1) {
+		if (Asp)
+		    G_math_Ax_sparse(Asp, x, v, rows);
+		else
+		    G_math_d_Ax(A, x, v, rows, rows);
+
+		G_math_d_ax_by(b, v, r, 1.0, -1.0, rows);
+	    }
+	    else {
+		G_math_d_ax_by(r, v, r, 1.0, -1.0 * mygamma, rows);
+	    }
+
+	    /* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s)
+	    for (i = 0; i < rows; i++) {
+		s += r[i] * r[i];
+	    }
+
+	    /* barrier */
+#pragma omp single
+	    {
+		a1 = s;
+		tmp = a1 / a0;
+		a0 = a1;
+		s = 0.0;
+
+		if (a1 < 0 || a1 == 0 || a1 > 0) {
+		    ;
+		}
+		else {
+		    G_warning(_
+			      ("Unable to solve the linear equation system"));
+		    error_break = 1;
+		}
+	    }
+	    G_math_d_ax_by(p, r, p, tmp, 1.0, rows);
+	}
+
+	if (Asp != NULL)
+	    G_message(_("Sparse CG -- iteration %i error  %g\n"), m, a0);
+	else
+	    G_message(_("CG -- iteration %i error  %g\n"), m, a0);
+
+	if (error_break == 1) {
+	    finished = -1;
+	    break;
+	}
+
+	if (a0 < err) {
+	    finished = 1;
+	    break;
+	}
+    }
+
+    G_free(r);
+    G_free(p);
+    G_free(v);
+
+    return finished;
+}
+
+
+
+/*!
+ * \brief The iterative biconjugate gradients solver with stabilization for unsymmetric non-definite matrices
+ *
+ * This iterative solver works with regular quadratic matrices.
+ *
+ * This solver solves the linear equation system:
+ *  A x = b
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param A (double **) -- the matrix
+ * \param x (double *) -- the value vector
+ * \param b (double *) -- the right hand side
+ * \param rows (int)
+ * \param maxit (int) -- the maximum number of iterations
+ * \param err (double) -- defines the error break criterias
+ * \return (int) -- 1 - success, 2 - not finisehd but success, 0 - matrix singular, -1 - could not solve the les
+ * 
+ * */
+int G_math_solver_bicgstab(double **A, double *x, double *b, int rows,
+			   int maxit, double err)
+{
+    return solver_bicgstab(A, NULL, x, b, rows, maxit, err);
+}
+
+/*!
+ * \brief The iterative biconjugate gradients solver with stabilization for unsymmetric non-definite matrices
+ *
+ * This iterative solver works with sparse matrices.
+ *
+ * This solver solves the linear equation system:
+ *  A x = b
+ *
+ * The parameter <i>maxit</i> specifies the maximum number of iterations. If the maximum is reached, the
+ * solver will abort the calculation and writes the current result into the vector x.
+ * The parameter <i>err</i> defines the error break criteria for the solver.
+ *
+ * \param Asp (G_math_spvector **) -- the sparse matrix
+ * \param x (double *) -- the value vector
+ * \param b (double *) -- the right hand side
+ * \param rows (int)
+ * \param maxit (int) -- the maximum number of iterations
+ * \param err (double) -- defines the error break criterias
+ * \return (int) -- 1 - success, 2 - not finisehd but success, 0 - matrix singular, -1 - could not solve the les
+ * 
+ * */
+int G_math_solver_sparse_bicgstab(G_math_spvector ** Asp, double *x,
+				  double *b, int rows, int maxit, double err)
+{
+    return solver_bicgstab(NULL, Asp, x, b, rows, maxit, err);
+}
+
+
+int solver_bicgstab(double **A, G_math_spvector ** Asp, double *x, double *b,
+		    int rows, int maxit, double err)
+{
+    double *r;
+
+    double *r0;
+
+    double *p;
+
+    double *v;
+
+    double *s;
+
+    double *t;
+
+    double s1 = 0.0, s2 = 0.0, s3 = 0.0;
+
+    double alpha = 0, beta = 0, omega, rr0 = 0, error;
+
+    int m, i;
+
+    int finished = 2;
+
+    int error_break;
+
+    r = G_alloc_vector(rows);
+    r0 = G_alloc_vector(rows);
+    p = G_alloc_vector(rows);
+    v = G_alloc_vector(rows);
+    s = G_alloc_vector(rows);
+    t = G_alloc_vector(rows);
+
+    error_break = 0;
+
+#pragma omp parallel
+    {
+	if (Asp)
+	    G_math_Ax_sparse(Asp, x, v, rows);
+	else
+	    G_math_d_Ax(A, x, v, rows, rows);
+
+	G_math_d_ax_by(b, v, r, 1.0, -1.0, rows);
+	G_math_d_copy(r, r0, rows);
+	G_math_d_copy(r, p, rows);
+    }
+
+    s1 = s2 = s3 = 0.0;
+
+    /* ******************* */
+    /* start the iteration */
+    /* ******************* */
+    for (m = 0; m < maxit; m++) {
+
+#pragma omp parallel default(shared)
+	{
+	    if (Asp)
+		G_math_Ax_sparse(Asp, p, v, rows);
+	    else
+		G_math_d_Ax(A, p, v, rows, rows);
+
+	    /* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s1, s2, s3)
+	    for (i = 0; i < rows; i++) {
+		s1 += r[i] * r[i];
+		s2 += r[i] * r0[i];
+		s3 += v[i] * r0[i];
+	    }
+
+#pragma omp single
+	    {
+		error = s1;
+
+		if (error < 0 || error == 0 || error > 0) {
+		    ;
+		}
+		else {
+		    G_warning(_
+			      ("Unable to solve the linear equation system"));
+		    error_break = 1;
+		}
+
+		rr0 = s2;
+		alpha = rr0 / s3;
+		s1 = s2 = s3 = 0.0;
+	    }
+
+	    G_math_d_ax_by(r, v, s, 1.0, -1.0 * alpha, rows);
+	    if (Asp)
+		G_math_Ax_sparse(Asp, s, t, rows);
+	    else
+		G_math_d_Ax(A, s, t, rows, rows);
+
+	    /* scalar product */
+#pragma omp for schedule (static) private(i) reduction(+:s1, s2)
+	    for (i = 0; i < rows; i++) {
+		s1 += t[i] * s[i];
+		s2 += t[i] * t[i];
+	    }
+
+#pragma omp single
+	    {
+		omega = s1 / s2;
+		s1 = s2 = 0.0;
+	    }
+
+	    G_math_d_ax_by(p, s, r, alpha, omega, rows);
+	    G_math_d_ax_by(x, r, x, 1.0, 1.0, rows);
+	    G_math_d_ax_by(s, t, r, 1.0, -1.0 * omega, rows);
+
+#pragma omp for schedule (static) private(i) reduction(+:s1)
+	    for (i = 0; i < rows; i++) {
+		s1 += r[i] * r0[i];
+	    }
+
+#pragma omp single
+	    {
+		beta = alpha / omega * s1 / rr0;
+		s1 = s2 = s3 = 0.0;
+	    }
+
+	    G_math_d_ax_by(p, v, p, 1.0, -1.0 * omega, rows);
+	    G_math_d_ax_by(p, r, p, beta, 1.0, rows);
+	}
+
+
+	if (Asp != NULL)
+	    G_message(_("Sparse BiCGStab -- iteration %i error  %g\n"), m,
+		      error);
+	else
+	    G_message(_("BiCGStab -- iteration %i error  %g\n"), m, error);
+
+	if (error_break == 1) {
+	    finished = -1;
+	    break;
+	}
+
+	if (error < err) {
+	    finished = 1;
+	    break;
+	}
+    }
+
+    G_free(r);
+    G_free(r0);
+    G_free(p);
+    G_free(v);
+    G_free(s);
+    G_free(t);
+
+    return finished;
+}
+
+
+/*!
+ * \brief Compute a diagonal preconditioning matrix for krylov space solver
+ *
+ * \param A (double **) -- the matrix for which the precondition should be computed (if the sparse matrix is used, set it to NULL)
+ * \param Asp (G_math_spvector **) -- the matrix for which the precondition should be computed 
+ * \param rows (int)
+ * \param prec (int) -- which preconditioner should be used 1, 2 or 3
+ *
+ * */
+G_math_spvector **create_diag_precond_matrix(double **A,
+					     G_math_spvector ** Asp, int rows,
+					     int prec)
+{
+    G_math_spvector **Msp;
+
+    int i, j, cols = rows;
+
+    double sum;
+
+    Msp = G_math_alloc_spmatrix(rows);
+
+    if (A != NULL) {
+#pragma omp parallel for schedule (static) private(i, j, sum) shared(A, Msp, rows, cols, prec)
+	for (i = 0; i < rows; i++) {
+	    G_math_spvector *spvect = G_math_alloc_spvector(1);
+
+	    switch (prec) {
+	    case G_MATH_ROWSCALE_L2_NORM_PRECONDITION:
+		sum = 0;
+		for (j = 0; j < cols; j++)
+		    sum += A[i][j] * A[i][j];
+		spvect->values[0] = 1.0 / sqrt(sum);
+		break;
+	    case G_MATH_ROWSCALE_L1_NORM_PRECONDITION:
+		sum = 0;
+		for (j = 0; j < cols; j++)
+		    sum += fabs(A[i][j]);
+		spvect->values[0] = 1.0 / (sum);
+		break;
+	    case G_MATH_DIAGONAL_PRECONDITION:
+	    default:
+		spvect->values[0] = 1.0 / A[i][i];
+		break;
+	    }
+
+
+	    spvect->index[0] = i;
+	    spvect->cols = 1;;
+	    G_math_add_spvector(Msp, spvect, i);
+
+	}
+    }
+    else {
+#pragma omp parallel for schedule (static) private(i, j, sum) shared(Asp, Msp, rows, cols, prec)
+	for (i = 0; i < rows; i++) {
+	    G_math_spvector *spvect = G_math_alloc_spvector(1);
+
+	    switch (prec) {
+	    case G_MATH_ROWSCALE_L2_NORM_PRECONDITION:
+		sum = 0;
+		for (j = 0; j < Asp[i]->cols; j++)
+		    sum += Asp[i]->values[j] * Asp[i]->values[j];
+		spvect->values[0] = 1.0 / sqrt(sum);
+		break;
+	    case G_MATH_ROWSCALE_L1_NORM_PRECONDITION:
+		sum = 0;
+		for (j = 0; j < Asp[i]->cols; j++)
+		    sum += fabs(Asp[i]->values[j]);
+		spvect->values[0] = 1.0 / (sum);
+		break;
+	    case G_MATH_DIAGONAL_PRECONDITION:
+	    default:
+		for (j = 0; j < Asp[i]->cols; j++)
+		    if (i == Asp[i]->index[j])
+			spvect->values[0] = 1.0 / Asp[i]->values[j];
+		break;
+	    }
+
+	    spvect->index[0] = i;
+	    spvect->cols = 1;;
+	    G_math_add_spvector(Msp, spvect, i);
+	}
+    }
+    return Msp;
+}

Added: grass/branches/develbranch_6/lib/gmath/sparse_matrix.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/sparse_matrix.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/sparse_matrix.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,240 @@
+
+/*****************************************************************************
+ *
+ * MODULE:       Grass Gmath Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      functions to manage linear equation systems
+ * 		part of the gmath library
+ *               
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <stdlib.h>
+#include <math.h>
+#include <grass/gmath.h>
+#include <grass/gis.h>
+
+/*!
+ * \brief Adds a sparse vector to a sparse linear equation system at position row
+ *
+ * Return 1 for success and -1 for failure
+ *
+ * \param spmatrix G_math_spvector ** 
+ * \param spvector G_math_spvector * 
+ * \param row int
+ * \return int 1 success, -1 failure
+ *
+ * */
+int G_math_add_spvector(G_math_spvector ** Asp, G_math_spvector * spvector,
+			int row)
+{
+    if (Asp != NULL) {
+	G_debug(5,
+		"Add sparse vector %p to the sparse linear equation system at row %i\n",
+		spvector, row);
+	Asp[row] = spvector;
+    }
+    else {
+	return -1;
+    }
+
+    return 1;
+}
+
+/*!
+ * \brief Allocate memory for a sparse matrix
+ *
+ * \param rows int
+ * \return G_math_spvector **
+ *
+ * */
+G_math_spvector **G_math_alloc_spmatrix(int rows)
+{
+    G_math_spvector **spmatrix;
+
+    G_debug(4, "Allocate memory for a sparse matrix with %i rows\n", rows);
+
+    spmatrix = (G_math_spvector **) G_calloc(rows, sizeof(G_math_spvector *));
+
+    return spmatrix;
+}
+
+/*!
+ * \brief Allocate memory for a sparse vector
+ *
+ * \param cols int
+ * \return G_math_spvector *
+ *
+ * */
+G_math_spvector *G_math_alloc_spvector(int cols)
+{
+    G_math_spvector *spvector;
+
+    G_debug(4, "Allocate memory for a sparse vector with %i cols\n", cols);
+
+    spvector = (G_math_spvector *) G_calloc(1, sizeof(G_math_spvector));
+
+    spvector->cols = cols;
+    spvector->index = (unsigned int *)G_calloc(cols, sizeof(unsigned int));
+    spvector->values = (double *)G_calloc(cols, sizeof(double));
+
+    return spvector;
+}
+
+/*!
+ * \brief Release the memory of the sparse vector
+ *
+ * \param spvector G_math_spvector *
+ * \return void
+ *
+ * */
+void G_math_free_spvector(G_math_spvector * spvector)
+{
+    if (spvector) {
+	if (spvector->values)
+	    G_free(spvector->values);
+	if (spvector->index)
+	    G_free(spvector->index);
+	G_free(spvector);
+
+	spvector = NULL;
+    }
+
+    return;
+}
+
+/*!
+ * \brief Release the memory of the sparse matrix
+ *
+ * \param spvector G_math_spvector **
+ * \param rows int
+ * \return void
+ *
+ * */
+void G_math_free_spmatrix(G_math_spvector ** spmatrix, int rows)
+{
+    int i;
+
+    if (spmatrix) {
+	for (i = 0; i < rows; i++)
+	    G_math_free_spvector(spmatrix[i]);
+
+	G_free(spmatrix);
+	spmatrix = NULL;
+    }
+
+    return;
+}
+
+/*!
+ *
+ * \brief print the sparse matrix Asp to stdout
+ *
+ *
+ * \param Asp (G_math_spvector **)
+ * \param rows (int)
+ * \return void
+ *  
+ * */
+void G_math_print_spmatrix(G_math_spvector ** Asp, int rows)
+{
+    int i, j, k, out;
+
+    for (i = 0; i < rows; i++) {
+	for (j = 0; j < rows; j++) {
+	    out = 0;
+	    for (k = 0; k < Asp[i]->cols; k++) {
+		if (Asp[i]->index[k] == j) {
+		    fprintf(stdout, "%4.5f ", Asp[i]->values[k]);
+		    out = 1;
+		}
+	    }
+	    if (!out)
+		fprintf(stdout, "%4.5f ", 0.0);
+	}
+	fprintf(stdout, "\n");
+    }
+
+    return;
+}
+
+
+/*!
+ * \brief Convert a sparse matrix into a quadratic matrix
+ *
+ * This function is multi-threaded with OpenMP. It creates its own parallel OpenMP region.
+ *
+ * \param Asp (G_math_spvector **) 
+ * \param rows (int)
+ * \return (double **)
+ *
+ * */
+double **G_math_Asp_to_A(G_math_spvector ** Asp, int rows)
+{
+    int i, j;
+
+    double **A = NULL;
+
+    A = G_alloc_matrix(rows, rows);
+
+#pragma omp parallel for schedule (static) private(i, j)
+    for (i = 0; i < rows; i++) {
+	for (j = 0; j < Asp[i]->cols; j++) {
+	    A[i][Asp[i]->index[j]] = Asp[i]->values[j];
+	}
+    }
+    return A;
+}
+
+/*!
+ * \brief Convert a quadratic matrix into a sparse matrix
+ *
+ * This function is multi-threaded with OpenMP. It creates its own parallel OpenMP region.
+ *
+ * \param A (double **) 
+ * \param rows (int)
+ * \param epsilon (double) -- non-zero values are greater then epsilon
+ * \return (G_math_spvector **)
+ *
+ * */
+G_math_spvector **G_math_A_to_Asp(double **A, int rows, double epsilon)
+{
+    int i, j;
+
+    int nonull, count = 0;
+
+    G_math_spvector **Asp = NULL;
+
+    Asp = G_math_alloc_spmatrix(rows);
+
+#pragma omp parallel for schedule (static) private(i, j, nonull, count)
+    for (i = 0; i < rows; i++) {
+	nonull = 0;
+	/*Count the number of non zero entries */
+	for (j = 0; j < rows; j++) {
+	    if (A[i][j] > epsilon)
+		nonull++;
+	}
+	/*Allocate the sparse vector and insert values */
+	G_math_spvector *v = G_math_alloc_spvector(nonull);
+
+	count = 0;
+	for (j = 0; j < rows; j++) {
+	    if (A[i][j] > epsilon) {
+		v->index[count] = j;
+		v->values[count] = A[i][j];
+		count++;
+	    }
+	}
+	/*Add vector to sparse matrix */
+	G_math_add_spvector(Asp, v, i);
+    }
+    return Asp;
+}

Deleted: grass/branches/develbranch_6/lib/gmath/svd.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/svd.c	2009-08-27 20:52:10 UTC (rev 38889)
+++ grass/branches/develbranch_6/lib/gmath/svd.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -1,283 +0,0 @@
-#include <math.h>
-#include <grass/gis.h>
-#include <grass/gmath.h>
-
-static double at, bt, ct;
-
-#define PYTHAG(a,b) ((at=fabs(a)) > (bt=fabs(b)) ? \
-    (ct=bt/at,at*sqrt(1.0+ct*ct)) : (bt ? (ct=at/bt,bt*sqrt(1.0+ct*ct)): 0.0))
-
-static double maxarg1, maxarg2;
-
-#define MAX(a,b) (maxarg1=(a),maxarg2=(b),(maxarg1) > (maxarg2) ?\
-	(maxarg1) : (maxarg2))
-#define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
-
-int G_svdcmp(double **a, int m, int n, double *w, double **v)
-{
-    int flag, i, its, j, jj, k, ii = 0, nm = 0;
-    double c, f, h, s, x, y, z;
-    double anorm = 0.0, g = 0.0, scale = 0.0;
-    double *rv1, *G_alloc_vector();
-
-
-    if (m < n)
-	return -1;		/* must augment A with extra zero rows */
-    rv1 = G_alloc_vector(n);
-
-    n--;
-    m--;
-
-    for (i = 0; i <= n; i++) {
-	ii = i + 1;
-	rv1[i] = scale * g;
-	g = s = scale = 0.0;
-	if (i <= m) {
-	    for (k = i; k <= m; k++)
-		scale += fabs(a[k][i]);
-	    if (scale) {
-		for (k = i; k <= m; k++) {
-		    a[k][i] /= scale;
-		    s += a[k][i] * a[k][i];
-		}
-		f = a[i][i];
-		g = -SIGN(sqrt(s), f);
-		h = f * g - s;
-		a[i][i] = f - g;
-		if (i != n) {
-		    for (j = ii; j <= n; j++) {
-			for (s = 0.0, k = i; k <= m; k++)
-			    s += a[k][i] * a[k][j];
-			f = s / h;
-			for (k = i; k <= m; k++)
-			    a[k][j] += f * a[k][i];
-		    }
-		}
-		for (k = i; k <= m; k++)
-		    a[k][i] *= scale;
-	    }
-	}
-	w[i] = scale * g;
-	g = s = scale = 0.0;
-	if (i <= m && i != n) {
-	    for (k = ii; k <= n; k++)
-		scale += fabs(a[i][k]);
-	    if (scale) {
-		for (k = ii; k <= n; k++) {
-		    a[i][k] /= scale;
-		    s += a[i][k] * a[i][k];
-		}
-		f = a[i][ii];
-		g = -SIGN(sqrt(s), f);
-		h = f * g - s;
-		a[i][ii] = f - g;
-		for (k = ii; k <= n; k++)
-		    rv1[k] = a[i][k] / h;
-		if (i != m) {
-		    for (j = ii; j <= m; j++) {
-			for (s = 0.0, k = ii; k <= n; k++)
-			    s += a[j][k] * a[i][k];
-			for (k = ii; k <= n; k++)
-			    a[j][k] += s * rv1[k];
-		    }
-		}
-		for (k = ii; k <= n; k++)
-		    a[i][k] *= scale;
-	    }
-	}
-	anorm = MAX(anorm, (fabs(w[i]) + fabs(rv1[i])));
-    }
-    for (i = n; i >= 0; i--) {
-	if (i < n) {
-	    if (g) {
-		for (j = ii; j <= n; j++)
-		    v[j][i] = (a[i][j] / a[i][ii]) / g;
-		for (j = ii; j <= n; j++) {
-		    for (s = 0.0, k = ii; k <= n; k++)
-			s += a[i][k] * v[k][j];
-		    for (k = ii; k <= n; k++)
-			v[k][j] += s * v[k][i];
-		}
-	    }
-	    for (j = ii; j <= n; j++)
-		v[i][j] = v[j][i] = 0.0;
-	}
-	v[i][i] = 1.0;
-	g = rv1[i];
-	ii = i;
-    }
-    for (i = n; i >= 0; i--) {
-	ii = i + 1;
-	g = w[i];
-	if (i < n)
-	    for (j = ii; j <= n; j++)
-		a[i][j] = 0.0;
-	if (g) {
-	    g = 1.0 / g;
-	    if (i != n) {
-		for (j = ii; j <= n; j++) {
-		    for (s = 0.0, k = ii; k <= m; k++)
-			s += a[k][i] * a[k][j];
-		    f = (s / a[i][i]) * g;
-		    for (k = i; k <= m; k++)
-			a[k][j] += f * a[k][i];
-		}
-	    }
-	    for (j = i; j <= m; j++)
-		a[j][i] *= g;
-	}
-	else {
-	    for (j = i; j <= m; j++)
-		a[j][i] = 0.0;
-	}
-	++a[i][i];
-    }
-    for (k = n; k >= 0; k--) {
-	for (its = 1; its <= 30; its++) {
-	    flag = 1;
-	    for (ii = k; ii >= 0; ii--) {
-		nm = ii - 1;
-		if (fabs(rv1[ii]) + anorm == anorm) {
-		    flag = 0;
-		    break;
-		}
-		if (fabs(w[nm]) + anorm == anorm)
-		    break;
-	    }
-	    if (flag) {
-		c = 0.0;
-		s = 1.0;
-		for (i = ii; i <= k; i++) {
-		    f = s * rv1[i];
-		    if (fabs(f) + anorm != anorm) {
-			g = w[i];
-			h = PYTHAG(f, g);
-			w[i] = h;
-			h = 1.0 / h;
-			c = g * h;
-			s = (-f * h);
-			for (j = 0; j <= m; j++) {
-			    y = a[j][nm];
-			    z = a[j][i];
-			    a[j][nm] = y * c + z * s;
-			    a[j][i] = z * c - y * s;
-			}
-		    }
-		}
-	    }
-	    z = w[k];
-	    if (ii == k) {
-		if (z < 0.0) {
-		    w[k] = -z;
-		    for (j = 0; j <= n; j++)
-			v[j][k] = (-v[j][k]);
-		}
-		break;
-	    }
-	    if (its == 30)
-		return -2;	/*No convergence in 30 SVDCMP iterations */
-	    x = w[ii];
-	    nm = k - 1;
-	    y = w[nm];
-	    g = rv1[nm];
-	    h = rv1[k];
-	    f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2.0 * h * y);
-	    g = PYTHAG(f, 1.0);
-	    f = ((x - z) * (x + z) + h * ((y / (f + SIGN(g, f))) - h)) / x;
-	    c = s = 1.0;
-	    for (j = ii; j <= nm; j++) {
-		i = j + 1;
-		g = rv1[i];
-		y = w[i];
-		h = s * g;
-		g = c * g;
-		z = PYTHAG(f, h);
-		rv1[j] = z;
-		c = f / z;
-		s = h / z;
-		f = x * c + g * s;
-		g = g * c - x * s;
-		h = y * s;
-		y = y * c;
-		for (jj = 0; jj <= n; jj++) {
-		    x = v[jj][j];
-		    z = v[jj][i];
-		    v[jj][j] = x * c + z * s;
-		    v[jj][i] = z * c - x * s;
-		}
-		z = PYTHAG(f, h);
-		w[j] = z;
-		if (z) {
-		    z = 1.0 / z;
-		    c = f * z;
-		    s = h * z;
-		}
-		f = (c * g) + (s * y);
-		x = (c * y) - (s * g);
-		for (jj = 0; jj <= m; jj++) {
-		    y = a[jj][j];
-		    z = a[jj][i];
-		    a[jj][j] = y * c + z * s;
-		    a[jj][i] = z * c - y * s;
-		}
-	    }
-	    rv1[ii] = 0.0;
-	    rv1[k] = f;
-	    w[k] = x;
-	}
-    }
-    G_free_vector(rv1);
-    return 0;
-}
-
-#undef SIGN
-#undef MAX
-#undef PYTHAG
-
-int G_svbksb(double **u, double w[], double **v,
-	     int m, int n, double b[], double x[])
-{
-    int j, i;
-    double s, *tmp, *G_alloc_vector();
-
-    tmp = G_alloc_vector(n);
-    for (j = 0; j < n; j++) {
-	s = 0.0;
-	if (w[j]) {
-	    for (i = 0; i < m; i++)
-		s += u[i][j] * b[i];
-	    s /= w[j];
-	}
-	tmp[j] = s;
-    }
-    for (j = 0; j < n; j++) {
-	s = 0.0;
-	for (i = 0; i < n; i++)
-	    s += v[j][i] * tmp[i];
-	x[j] = s;
-    }
-    G_free_vector(tmp);
-
-    return 0;
-}
-
-#define TOL 1e-8
-
-int G_svelim(double *w, int n)
-{
-    int i;
-    double thresh;
-
-    thresh = 0.0;		/* remove singularity */
-    for (i = 0; i < n; i++)
-	if (w[i] > thresh)
-	    thresh = w[i];
-    thresh *= TOL;
-    for (i = 0; i < n; i++)
-	if (w[i] < thresh)
-	    w[i] = 0.0;
-
-    return 0;
-}
-
-#undef TOL

Added: grass/branches/develbranch_6/lib/gmath/test/Makefile
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/Makefile	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/Makefile	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,9 @@
+MODULE_TOPDIR = ../../..
+
+PGM=test.gmath.lib
+
+LIBES = $(GISLIB) $(GMATHLIB) 
+DEPENDENCIES = $(GISDEP) $(GMATHDEP)
+include $(MODULE_TOPDIR)/include/Make/Module.make
+
+default: cmd

Added: grass/branches/develbranch_6/lib/gmath/test/bench_blas2.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/bench_blas2.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/bench_blas2.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,111 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      Unit benchs for les creation
+*
+* COPYRIGHT:    (C) 2000 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include <math.h>
+#include "test_gmath_lib.h"
+#include <sys/time.h>
+#define EPSILON 0.0000001
+
+/* prototypes */
+static void bench_blas_level_2_double(int rows);
+
+
+/* *************************************************************** */
+/* Perfrome the blas level 2 unit benchs ************************** */
+/* *************************************************************** */
+int bench_blas_level_2(int rows)
+{
+    G_message(_("\n++ Running blas level 2 benchmark ++"));
+
+    bench_blas_level_2_double(rows);
+
+    return 1;
+}
+
+/* *************************************************************** */
+/* ************** D O U B L E ************************************ */
+/* *************************************************************** */
+void bench_blas_level_2_double(int rows)
+{
+
+    double **A, *x, *y, *z;
+    struct timeval tstart;
+    struct timeval tend;
+
+    G_math_les *les;
+    les = create_normal_unsymmetric_les(rows);
+    G_math_les *sples;
+    sples = create_sparse_unsymmetric_les(rows);
+
+    x = G_alloc_vector(rows);
+    y = G_alloc_vector(rows);
+    z = G_alloc_vector(rows);
+
+    A = G_alloc_matrix(rows, rows);
+
+    fill_d_vector_range_1(x, 1, rows);
+
+    gettimeofday(&tstart, NULL);
+#pragma omp parallel default(shared)
+{
+    G_math_Ax_sparse(sples->Asp, x, z, rows);
+}
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time G_math_Ax_sparse: %g\n", compute_time_difference(tstart, tend));
+    gettimeofday(&tstart, NULL);
+#pragma omp parallel default(shared)
+{
+    G_math_d_Ax(les->A, x, z, rows, rows);
+}
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time G_math_d_Ax: %g\n", compute_time_difference(tstart, tend));
+    gettimeofday(&tstart, NULL);
+#pragma omp parallel default(shared)
+{
+    G_math_d_aAx_by(les->A, x, y, 3.0, 4.0, z, rows, rows);
+}
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time G_math_d_Ax_by: %g\n", compute_time_difference(tstart, tend));
+    gettimeofday(&tstart, NULL);
+#pragma omp parallel default(shared)
+{
+    G_math_d_x_dyad_y(x, x, A, rows, rows);
+}
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time G_math_d_x_dyad: %g\n", compute_time_difference(tstart, tend));
+    gettimeofday(&tstart, NULL);
+
+
+    if(x)
+      G_free_vector(x);
+    if(y)
+      G_free_vector(y);
+    if(z)
+    G_free_vector(z);
+
+    G_math_free_les(les);
+    G_math_free_les(sples);
+
+    if(A)
+      G_free_matrix(A);
+
+    return;
+}
+

Added: grass/branches/develbranch_6/lib/gmath/test/bench_blas3.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/bench_blas3.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/bench_blas3.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,93 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2007
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      Unit benchs for les creation
+*
+* COPYRIGHT:    (C) 2007 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include <math.h>
+#include "test_gmath_lib.h"
+#include <sys/time.h>
+
+/* prototypes */
+static void bench_blas_level_3_double(int rows);
+
+
+/* *************************************************************** */
+/* Perfrome the blas level 3 benchs ****************************** */
+/* *************************************************************** */
+int bench_blas_level_3(int rows)
+{
+    G_message(_("\n++ Running blas level 3 benchmark ++"));
+
+    bench_blas_level_3_double(rows);
+
+    return 1;
+}
+
+/* *************************************************************** */
+/* ************** D O U B L E ************************************ */
+/* *************************************************************** */
+void bench_blas_level_3_double(int rows)
+{
+    struct timeval tstart;
+    struct timeval tend;
+    double **A, **B, **C, *x, *y;
+
+    x = G_alloc_vector(rows);
+    y = G_alloc_vector(rows);
+
+    A = G_alloc_matrix(rows, rows);
+    B = G_alloc_matrix(rows, rows);
+    C = G_alloc_matrix(rows, rows);
+
+    fill_d_vector_range_1(x, 1, rows);
+    fill_d_vector_range_1(y, 1, rows);
+
+    fill_d_vector_range_1(A[0], 1, rows*rows);
+    fill_d_vector_range_1(B[0], 1, rows*rows);
+
+
+    gettimeofday(&tstart, NULL);
+#pragma omp parallel default(shared)
+{
+    G_math_d_aA_B(A, B, 4.0 , C, rows , rows);
+}
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time G_math_d_aA_B: %g\n", compute_time_difference(tstart, tend));
+    gettimeofday(&tstart, NULL);
+#pragma omp parallel default(shared)
+{
+    G_math_d_AB(A, B, C, rows , rows , rows);
+}
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time G_math_d_AB: %g\n", compute_time_difference(tstart, tend));
+
+
+    if(x)
+      G_free_vector(x);
+    if(y)
+      G_free_vector(y);
+
+    if(A)
+      G_free_matrix(A);
+    if(B)
+      G_free_matrix(B);
+    if(C)
+      G_free_matrix(C);
+
+    return;
+}

Added: grass/branches/develbranch_6/lib/gmath/test/bench_solver_direct.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/bench_solver_direct.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/bench_solver_direct.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,99 @@
+/*****************************************************************************
+ *
+ * MODULE:       Grass PDE Numerical Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      benchmarking the direct solvers
+ *
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include "test_gmath_lib.h"
+#include <sys/time.h>
+
+/* prototypes */
+static int bench_solvers(int rows);
+
+
+/* ************************************************************************* */
+/* Performe the solver unit tests ****************************************** */
+
+/* ************************************************************************* */
+int bench_solvers_direct(int rows) {
+    G_message(_("\n++ Running direct solver benchmark ++"));
+
+    bench_solvers(rows);
+
+    return 1;
+}
+
+
+/* *************************************************************** */
+/* Test all implemented solvers for sparse and normal matrix *** */
+
+/* *************************************************************** */
+int bench_solvers(int rows) {
+    G_math_les *les;
+    struct timeval tstart;
+    struct timeval tend;
+
+    G_message("\t * benchmarking gmath lu decomposition solver with unsymmetric matrix\n");
+
+    les = create_normal_unsymmetric_les(rows);
+    gettimeofday(&tstart, NULL);
+    G_math_solver_lu(les->A, les->x, les->b, les->rows);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time gmath lu decomposition: %g\n", compute_time_difference(tstart, tend));
+    G_math_free_les(les);
+
+    G_message("\t * benchmarking lu ccmath decomposition solver with unsymmetric matrix\n");
+
+    les = create_normal_unsymmetric_les(rows);
+    gettimeofday(&tstart, NULL);
+    G_math_solv(les->A, les->b, les->rows);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time ccmath lu decomposition: %g\n", compute_time_difference(tstart, tend));
+    G_math_free_les(les);
+
+
+    G_message("\t * benchmarking gauss elimination solver with unsymmetric matrix\n");
+
+    les = create_normal_unsymmetric_les(rows);
+    gettimeofday(&tstart, NULL);
+    G_math_solver_gauss(les->A, les->x, les->b, les->rows);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time gauss elimination: %g\n", compute_time_difference(tstart, tend));
+    G_math_free_les(les);
+
+    G_message("\t * benchmarking gmath cholesky decomposition solver with symmetric matrix\n");
+
+    les = create_normal_symmetric_les(rows);
+    gettimeofday(&tstart, NULL);
+    G_math_solver_cholesky(les->A, les->x, les->b, les->rows, les->rows);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time gmath cholesky decomposition: %g\n", compute_time_difference(tstart, tend));
+    G_math_free_les(les);
+
+    G_message("\t * benchmarking ccmath cholesky decomposition solver with symmetric matrix\n");
+
+    les = create_normal_symmetric_les(rows);
+    gettimeofday(&tstart, NULL);
+    G_math_solvps(les->A, les->b, les->rows);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time ccmath cholesky decomposition: %g\n", compute_time_difference(tstart, tend));
+    G_math_free_les(les);
+
+    return 1;
+}
+

Added: grass/branches/develbranch_6/lib/gmath/test/bench_solver_krylov.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/bench_solver_krylov.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/bench_solver_krylov.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,111 @@
+/*****************************************************************************
+ *
+ * MODULE:       Grass PDE Numerical Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      benchmarking the krylov subspace solvers
+ *
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include "test_gmath_lib.h"
+#include <sys/time.h>
+
+/* prototypes */
+static int bench_solvers(int rows);
+
+
+/* ************************************************************************* */
+/* Performe the solver unit tests ****************************************** */
+
+/* ************************************************************************* */
+int bench_solvers_krylov(int rows) {
+    G_message(_("\n++ Running krylov solver benchmark ++"));
+
+    bench_solvers(rows);
+
+    return 1;
+}
+
+
+/* *************************************************************** */
+/* Test all implemented solvers for sparse and normal matrix *** */
+
+/* *************************************************************** */
+int bench_solvers(int rows) {
+    G_math_les *les;
+    G_math_les *sples;
+    struct timeval tstart;
+    struct timeval tend;
+
+    G_message("\t * benchmarking pcg solver with symmetric matrix and preconditioner 1\n");
+
+    les = create_normal_symmetric_les(rows);
+    sples = create_sparse_symmetric_les(rows);
+
+    gettimeofday(&tstart, NULL);
+    G_math_solver_pcg(les->A, les->x, les->b, les->rows, 250, 0.1e-9, 1);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time pcg normal matrix: %g\n", compute_time_difference(tstart, tend));
+
+    gettimeofday(&tstart, NULL);
+    G_math_solver_sparse_pcg(sples->Asp, sples->x, sples->b, les->rows, 250,
+            0.1e-9, 1);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time pcg sparse matrix: %g\n", compute_time_difference(tstart, tend));
+
+    G_math_free_les(les);
+    G_math_free_les(sples);
+
+    G_message("\t * benchmark cg solver with symmetric matrix\n");
+
+    les = create_normal_symmetric_les(rows);
+    sples = create_sparse_symmetric_les(rows);
+    
+    gettimeofday(&tstart, NULL);
+    G_math_solver_cg(les->A, les->x, les->b, les->rows, 250, 0.1e-9);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time cg normal matrix: %g\n", compute_time_difference(tstart, tend));
+    
+    gettimeofday(&tstart, NULL);
+    G_math_solver_sparse_cg(sples->Asp, sples->x, sples->b, les->rows, 250,
+            0.1e-9);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time cg sparse matrix: %g\n", compute_time_difference(tstart, tend));
+    
+    G_math_free_les(les);
+    G_math_free_les(sples);
+
+    G_message("\t * benchmark bicgstab solver with unsymmetric matrix\n");
+
+    les = create_normal_unsymmetric_les(rows);
+    sples = create_sparse_unsymmetric_les(rows);
+    
+    gettimeofday(&tstart, NULL);
+    G_math_solver_bicgstab(les->A, les->x, les->b, les->rows, 250, 0.1e-9);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time bicgstab normal matrix: %g\n", compute_time_difference(tstart, tend));
+    
+    gettimeofday(&tstart, NULL);
+    G_math_solver_sparse_bicgstab(sples->Asp, sples->x, sples->b, les->rows,
+            250, 0.1e-9);
+    gettimeofday(&tend, NULL);
+    G_important_message("Computation time bicgstab sparse matrix: %g\n", compute_time_difference(tstart, tend));
+
+    G_math_free_les(les);
+    G_math_free_les(sples);
+
+    return 1;
+}
+

Added: grass/branches/develbranch_6/lib/gmath/test/test.gmath.lib.html
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test.gmath.lib.html	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test.gmath.lib.html	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,3 @@
+
+
+Take a look at the module command line help for more information.

Added: grass/branches/develbranch_6/lib/gmath/test/test_blas1.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_blas1.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_blas1.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,517 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      Unit tests for les creation
+*
+* COPYRIGHT:    (C) 2000 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include <math.h>
+#include "test_gmath_lib.h"
+
+#define EPSILON 0.000001
+
+
+/* prototypes */
+static int test_blas_level_1_double(void);
+static int test_blas_level_1_float(void);
+static int test_blas_level_1_int(void);
+
+
+/* *************************************************************** */
+/* Perfrome the blas level 1 unit tests ************************** */
+/* *************************************************************** */
+int unit_test_blas_level_1(void)
+{
+    int sum = 0;
+
+    G_message(_("\n++ Running blas level 1 unit tests ++"));
+
+    sum += test_blas_level_1_double();
+    sum += test_blas_level_1_float();
+    sum += test_blas_level_1_int();
+
+    if (sum > 0)
+	G_warning(_("\n-- blas level 1 unit tests failure --"));
+    else
+	G_message(_("\n-- blas level 1 unit tests finished successfully --"));
+
+    return sum;
+}
+
+
+/* *************************************************************** */
+/* ************** D O U B L E ************************************ */
+/* *************************************************************** */
+int test_blas_level_1_double(void)
+{
+
+    int sum = 0;
+    int rows = 10000;
+    double *x, *y, *z, a = 0.0, b = 0.0, c = 0.0, d = 0.0, e = 0.0;
+
+    x = G_alloc_vector(rows);
+    y = G_alloc_vector(rows);
+    z = G_alloc_vector(rows);
+
+    fill_d_vector_scalar(x, 1, rows);
+    fill_d_vector_scalar(y, 2, rows);
+
+/*test the grass implementation*/
+    G_math_d_x_dot_y(x, y, &a, rows);
+    G_math_d_asum_norm(x, &b, rows);
+    G_math_d_euclid_norm(x, &c, rows);
+
+
+    if(a != 2.0*rows) {
+    	G_message("Error in G_math_d_x_dot_y %f != %f", 2.0*rows, a);
+	sum++;
+    }
+
+    if(b != rows) {
+    	G_message("Error in G_math_d_asum_norm");
+	sum++;
+    }
+
+    if(c != sqrt(rows)) {
+    	G_message("Error in G_math_d_euclid_norm");
+	sum++;
+    }
+    
+    /*test the ATALS implemenation*/
+    a = G_math_dnrm2(x, rows);
+    b = G_math_dasum(x, rows);
+    c = G_math_ddot(x, y, rows);
+
+    if(a != sqrt(rows)) {
+    	G_message("Error in G_math_dnrm2 %f != %f", sqrt(rows), a);
+	sum++;
+    }
+
+    if(b != rows) {
+    	G_message("Error in G_math_dasum %f != %f", 2.0*rows, b);
+	sum++;
+    }
+
+    if(c != 2.0*rows) {
+    	G_message("Error in G_math_ddot %f != %f", 2.0*rows, c);
+	sum++;
+    }
+
+    fill_d_vector_range_1(x, 1.0, rows);
+    fill_d_vector_range_2(y, 1.0, rows);
+
+    /*grass function*/
+    G_math_d_max_norm(x, &a, rows);
+    /*atlas function*/
+    b = G_math_idamax(x, rows);
+
+    if(a != 1.0*(rows - 1)) {
+    	G_message("Error in G_math_d_max_norm: %f != %f", (double)1.0*(rows - 1), a);
+	sum++;
+    }
+
+    if(b != 1.0*(rows - 1)) {
+    	G_message("Error in G_math_idamax: %f != %f", (double)1.0*(rows - 1), b);
+	sum++;
+    }
+
+#pragma omp parallel default(shared)
+{
+    G_math_d_ax_by(x, y, z, 1.0, 1.0, rows);
+}
+    G_math_d_asum_norm(z, &a, rows);
+
+#pragma omp parallel default(shared)
+{
+    G_math_d_ax_by(x, y, z, 1.0, -1.0, rows);
+}
+    G_math_d_asum_norm(z, &b, rows);
+
+#pragma omp parallel default(shared)
+{
+    G_math_d_ax_by(x, y, z, 2.0, 1.0, rows);
+}
+    G_math_d_asum_norm(z, &c, rows);
+
+
+    if(a != 1.0*(rows - 1)* rows) {
+    	G_message("Error in G_math_d_ax_by: %f != %f", (double)1.0*(rows - 1)* rows, a);
+	sum++;
+    }
+
+    if(b != 5.0*(rows)*(rows/10)) {
+    	G_message("Error in G_math_d_ax_by: %f != %f", (double)5.0*(rows)*(rows/10), b);
+	sum++;
+    }
+
+    if(c != 149985000) {
+    	G_message("Error in G_math_d_ax_by: 149985000 != %f", c);
+	sum++;
+    }
+
+
+#pragma omp parallel  default(shared)
+{
+    /*scale x with 1*/
+    G_math_d_ax_by(x, z, z, 1.0, 0.0, rows);
+}
+    G_math_d_asum_norm(x, &a, rows);
+    G_math_d_asum_norm(z, &b, rows);
+    /*scale x with -1*/
+#pragma omp parallel  default(shared)
+{
+    G_math_d_ax_by(x, z, z, -1.0, 0.0, rows);
+}
+    G_math_d_asum_norm(z, &c, rows);
+
+    /*ATLAS implementation*/
+    G_math_dscal(x, 1.0, rows);
+    G_math_d_asum_norm(x, &d, rows);
+
+    /*ATLAS implementation*/
+    fill_d_vector_range_1(x, 1.0, rows);
+    fill_d_vector_scalar(z, 0.0, rows);
+    G_math_daxpy(x, z, 1.0, rows);
+    G_math_d_asum_norm(z, &e, rows);
+
+
+    if(a != 49995000 || a != b || b != c) {
+    	G_message("Error in G_math_d_ax: 49995000 != %f", a);
+	sum++;
+    }
+
+    if(49995000 != d) {
+    	G_message("Error in G_math_dscal: 49995000 != %f", d);
+	sum++;
+    }
+
+    if(49995000 != e) {
+    	G_message("Error in G_math_daxpy: 49995000 != %f", e);
+	sum++;
+    }
+
+    fill_d_vector_scalar(z, 0, rows);
+
+    G_math_d_copy(x, z, rows);
+    G_math_d_asum_norm(x, &a, rows);
+    G_math_dcopy(x, z, rows);
+    G_math_d_asum_norm(x, &b, rows);
+
+    if(a != 49995000) {
+    	G_message("Error in G_math_d_copy: 49995000 != %f", a);
+	sum++;
+    }
+
+    if(b != 49995000) {
+    	G_message("Error in G_math_dcopy: 49995000 != %f", a);
+	sum++;
+    }
+
+    G_free_vector(x);
+    G_free_vector(y);
+    G_free_vector(z);
+
+    return sum;
+}
+
+
+/* *************************************************************** */
+/* ************** F L O A T ************************************** */
+/* *************************************************************** */
+int test_blas_level_1_float(void)
+{
+
+    int sum = 0;
+    int rows = 1000;
+    float *x, *y, *z, a = 0.0, b = 0.0, c = 0.0, d = 0.0, e = 0.0;
+
+    x = G_alloc_fvector(rows);
+    y = G_alloc_fvector(rows);
+    z = G_alloc_fvector(rows);
+
+    fill_f_vector_scalar(x, 1, rows);
+    fill_f_vector_scalar(y, 2, rows);
+
+/*test the grass implementation*/
+    G_math_f_x_dot_y(x, y, &a, rows);
+    G_math_f_asum_norm(x, &b, rows);
+    G_math_f_euclid_norm(x, &c, rows);
+
+
+    if(a != 2.0*rows) {
+    	G_message("Error in G_math_f_x_dot_y %f != %f", 2.0*rows, a);
+	sum++;
+    }
+
+    if(b != rows) {
+    	G_message("Error in G_math_f_asum_norm");
+	sum++;
+    }
+
+    if(fabs(c - (float)sqrt(rows)) > EPSILON) {
+    	G_message("Error in G_math_f_euclid_norm");
+	sum++;
+    }
+    
+    /*test the ATALS implemenation*/
+    a = G_math_snrm2(x, rows);
+    b = G_math_sasum(x, rows);
+    c = G_math_sdot(x, y, rows);
+
+    if(fabs(a - sqrt(rows)) > EPSILON) {
+    	G_message("Error in G_math_snrm2 %f != %f", sqrt(rows), a);
+	sum++;
+    }
+
+    if(b != rows) {
+    	G_message("Error in G_math_sasum %f != %f", 2.0*rows, b);
+	sum++;
+    }
+
+    if(c != 2.0*rows) {
+    	G_message("Error in G_math_sdot %f != %f", 2.0*rows, c);
+	sum++;
+    }
+
+    fill_f_vector_range_1(x, 1.0, rows);
+    fill_f_vector_range_2(y, 1.0, rows);
+
+    /*grass function*/
+    G_math_f_max_norm(x, &a, rows);
+    /*atlas function*/
+    b = G_math_isamax(x, rows);
+
+    if(a != 1.0*(rows - 1)) {
+    	G_message("Error in G_math_f_max_norm: %f != %f", (float)1.0*(rows - 1), a);
+	sum++;
+    }
+
+    if(b != 1.0*(rows - 1)) {
+    	G_message("Error in G_math_isamax: %f != %f", (float)1.0*(rows - 1), b);
+	sum++;
+    }
+
+#pragma omp parallel  default(shared)
+{
+    G_math_f_ax_by(x, y, z, 1.0, 1.0, rows);
+}
+    G_math_f_asum_norm(z, &a, rows);
+#pragma omp parallel  default(shared)
+{
+    G_math_f_ax_by(x, y, z, 1.0, -1.0, rows);
+}
+G_math_f_asum_norm(z, &b, rows);
+#pragma omp parallel  default(shared)
+{
+    G_math_f_ax_by(x, y, z, 2.0, 1.0, rows);
+}
+    G_math_f_asum_norm(z, &c, rows);
+
+
+    if(fabs(a - 1.0*(rows - 1)* rows) > EPSILON) {
+    	G_message("Error in G_math_f_ax_by 1: %f != %f", (float)1.0*(rows - 1)* rows, a);
+	sum++;
+    }
+
+    if(fabs(b - 5.0*(rows)*(rows/10)) > EPSILON) {
+    	G_message("Error in G_math_f_ax_by 2: %f != %f", (float)5.0*(rows)*(rows/10), b);
+	sum++;
+    }
+
+    if(fabs(c - 1498500) > EPSILON) {
+    	G_message("Error in G_math_f_ax_by 3: 14998500 != %f", c);
+	sum++;
+    }
+
+
+#pragma omp parallel default(shared)
+{
+    /*scale x with 1*/
+    G_math_f_ax_by(x, z, z, 1.0, 0.0, rows);
+}
+    G_math_f_asum_norm(x, &a, rows);
+    G_math_f_asum_norm(z, &b, rows);
+    /*scale x with -1*/
+#pragma omp parallel default(shared)
+{
+    G_math_f_ax_by(x, z, z, -1.0, 0.0, rows);
+}
+    G_math_f_asum_norm(z, &c, rows);
+
+    /*ATLAS implementation*/
+    G_math_sscal(x, 1.0, rows);
+    G_math_f_asum_norm(x, &d, rows);
+
+    /*ATLAS implementation*/
+    fill_f_vector_range_1(x, 1.0, rows);
+    fill_f_vector_scalar(z, 0.0, rows);
+    G_math_saxpy(x, z, 1.0, rows);
+    G_math_f_asum_norm(z, &e, rows);
+
+
+    if(fabs(a - 499500) > EPSILON) {
+    	G_message("Error in G_math_f_ax_by 4: 4999500 != %f", a);
+	sum++;
+    }
+    if(fabs(b - 499500) > EPSILON) {
+    	G_message("Error in G_math_f_ax_by 4: 4999500 != %f", b);
+	sum++;
+    }
+    if(fabs(c - 499500) > EPSILON) {
+    	G_message("Error in G_math_f_ax_by 4: 4999500 != %f", c);
+	sum++;
+    }
+    if(fabs(d - 499500) > EPSILON) {
+    	G_message("Error in G_math_sscal: 4999500 != %f", d);
+	sum++;
+    }
+
+    if(fabs(e - 499500) > EPSILON) {
+    	G_message("Error in G_math_saxpy: 4999500 != %f", e);
+	sum++;
+    }
+    
+    fill_f_vector_range_1(x, 1.0, rows);
+    fill_f_vector_scalar(z, 0, rows);
+
+    G_math_f_copy(x, z, rows);
+    G_math_f_asum_norm(x, &a, rows);
+    G_math_scopy(x, z, rows);
+    G_math_f_asum_norm(x, &b, rows);
+
+    if(fabs(a - 499500) > EPSILON) {
+    	G_message("Error in G_math_f_copy: 4999500 != %f", a);
+	sum++;
+    }
+
+    if(fabs(b - 499500) > EPSILON) {
+    	G_message("Error in G_math_scopy: 4999500 != %f", b);
+	sum++;
+    }
+
+    G_free_fvector(x);
+    G_free_fvector(y);
+    G_free_fvector(z);
+
+    return sum;
+}
+
+
+/* *************************************************************** */
+/* ************** I N T E G E R ********************************** */
+/* *************************************************************** */
+int test_blas_level_1_int(void)
+{
+
+    int sum = 0;
+    int rows = 10000;
+    int *x, *y, *z, max;
+    double a, b, c;
+
+    x = G_alloc_ivector(rows);
+    y = G_alloc_ivector(rows);
+    z = G_alloc_ivector(rows);
+
+    fill_i_vector_scalar(x, 1, rows);
+    fill_i_vector_scalar(y, 2, rows);
+
+
+    G_math_i_x_dot_y(x, y, &a, rows);
+    G_math_i_asum_norm(x, &b, rows);
+    G_math_i_euclid_norm(x, &c, rows);
+
+
+    if(a != 2*rows) {
+    	G_message("Error in G_math_i_x_dot_y");
+	sum++;
+    }
+
+    if(b != rows) {
+    	G_message("Error in G_math_i_asum_norm");
+	sum++;
+    }
+
+    if(c != sqrt((double)rows)) {
+    	G_message("Error in G_math_i_euclid_norm");
+	sum++;
+    }
+
+    fill_i_vector_range_1(x, 1, rows);
+    fill_i_vector_range_2(y, 1, rows);
+
+    G_math_i_max_norm(x, &max, rows);
+
+    if(max != (rows - 1)) {
+    	G_message("Error in G_math_i_max_norm: %i != %i", (rows - 1), max);
+	sum++;
+    }
+
+#pragma omp parallel default(shared)
+{
+    G_math_i_ax_by(x, y, z, 1, 1, rows);
+}
+    G_math_i_asum_norm(z, &a, rows);
+#pragma omp parallel default(shared)
+{
+    G_math_i_ax_by(x, y, z, 1, -1, rows);
+}
+    G_math_i_asum_norm(z, &b, rows);
+#pragma omp parallel default(shared)
+{
+    G_math_i_ax_by(x, y, z, 2, 1, rows);
+}
+    G_math_i_asum_norm(z, &c, rows);
+
+
+    if(a != 1.0*(rows - 1)* rows) {
+    	G_message("Error in G_math_i_ax_by: %f != %f", 1.0*(rows - 1)* rows, a);
+	sum++;
+    }
+
+    if(b != 5.0*(rows)*(rows/10)) {
+    	G_message("Error in G_math_i_ax_by: %f != %f", 5.0*(rows)*(rows/10), b);
+	sum++;
+    }
+
+    if(c != 149985000) {
+    	G_message("Error in G_math_i_ax_by: 149985000 != %f", c);
+	sum++;
+    }
+
+
+#pragma omp parallel default(shared)
+{
+    /*scale x with 1*/
+    G_math_i_ax_by(x, z, z, 1, 0, rows);
+}
+    G_math_i_asum_norm(x, &a, rows);
+    G_math_i_asum_norm(z, &b, rows);
+    
+    /*scale a with -1*/
+#pragma omp parallel default(shared)
+{
+    G_math_i_ax_by(x, z, z, -1, 0, rows);
+}
+    G_math_i_asum_norm(z, &c, rows);
+
+
+    if(a != 49995000 || a != b || b != c) {
+    	G_message("Error in G_math_i_ax_by: 49995000 != %f", a);
+	sum++;
+    }
+
+    return sum;
+}

Added: grass/branches/develbranch_6/lib/gmath/test/test_blas2.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_blas2.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_blas2.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,285 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      Unit tests for les creation
+*
+* COPYRIGHT:    (C) 2000 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include <math.h>
+#include "test_gmath_lib.h"
+
+#define EPSILON 0.0000001
+
+/* prototypes */
+static int test_blas_level_2_double(void);
+static int test_blas_level_2_float(void);
+
+
+/* *************************************************************** */
+/* Perfrome the blas level 2 unit tests ************************** */
+/* *************************************************************** */
+int unit_test_blas_level_2(void)
+{
+    int sum = 0;
+
+    G_message(_("\n++ Running blas level 2 unit tests ++"));
+
+    sum += test_blas_level_2_double();
+    sum += test_blas_level_2_float();
+
+    if (sum > 0)
+	G_warning(_("\n-- blas level 2 unit tests failure --"));
+    else
+	G_message(_("\n-- blas level 2 unit tests finished successfully --"));
+
+    return sum;
+}
+/*
+extern double *G_math_Ax_sparse(G_math_spvector **, double *, double *, int );
+extern double *G_math_d_Ax(double **, double *, double *, int , int );
+extern float *G_math_f_Ax(float **, float *, float *, int , int );
+extern double **G_math_d_x_dyad_y(double *, double *, double **, int, int );
+extern float **G_math_f_x_dyad_y(float *, float *, float **, int, int );
+extern double *G_math_d_aAx_by(double **, double *, double *, double , double , double *, int , int );
+extern float *G_math_f_aAx_by(float **, float *, float *, float , float , float *, int , int );
+*/
+
+/* *************************************************************** */
+/* ************** D O U B L E ************************************ */
+/* *************************************************************** */
+int test_blas_level_2_double(void)
+{
+
+    int sum = 0;
+    int rows = TEST_NUM_ROWS;
+    double **A, **B, **C, *x, *y, *z, a = 0.0, b = 0.0, c = 0.0, d = 0.0, e = 0.0, f = 0.0, g = 0.0, h = 0.0, i = 0.0;
+
+    G_math_les *les;
+    les = create_normal_unsymmetric_les(rows);
+    G_math_les *sples;
+    sples = create_sparse_unsymmetric_les(rows);
+
+    x = G_alloc_vector(rows);
+    y = G_alloc_vector(rows);
+    z = G_alloc_vector(rows);
+
+    A = G_alloc_matrix(rows, rows);
+    B = G_alloc_matrix(rows, rows);
+    C = G_alloc_matrix(rows, rows);
+
+    fill_d_vector_scalar(x, 1, rows);
+    fill_d_vector_scalar(y, 0, rows);
+
+
+#pragma omp parallel default(shared)
+{
+    G_math_Ax_sparse(sples->Asp, x, z, rows);
+    G_math_d_asum_norm(z, &a, rows);
+
+    G_math_d_Ax(les->A, x, z, rows, rows);
+    G_math_d_asum_norm(z, &b, rows);
+
+    G_math_d_aAx_by(les->A, x, y, 1.0, 1.0, z, rows, rows);
+    G_math_d_asum_norm(z, &c, rows);
+
+    G_math_d_aAx_by(les->A, x, y, -1.0, 1.0, z, rows, rows);
+    G_math_d_asum_norm(z, &d, rows);
+
+    G_math_d_aAx_by(les->A, x, y, 1.0, 0.0, z, rows, rows);
+    G_math_d_asum_norm(z, &e, rows);
+
+    G_math_d_aAx_by(les->A, x, y, -1.0, -1.0, z, rows, rows);
+    G_math_d_asum_norm(z, &f, rows);
+
+    G_math_d_x_dyad_y(x, x, A, rows, rows);
+    G_math_d_Ax(A, x, z, rows, rows);
+    G_math_d_asum_norm(z, &g, rows);
+
+    G_math_d_x_dyad_y(x, x, C, rows, rows);
+    G_math_d_Ax(A, x, z, rows, rows);
+    G_math_d_asum_norm(z, &h, rows);
+}
+
+    G_math_d_asum_norm(les->b, &i, rows);
+
+    if(a - i > EPSILON) {
+    	G_message("Error in G_math_Ax_sparse: %f != %f", i, a);
+	sum++;
+    }
+
+    if(b - i > EPSILON) {
+    	G_message("Error in G_math_d_Ax: %f != %f", i, b);
+	sum++;
+    }
+
+    if(c - i > EPSILON) {
+    	G_message("Error in G_math_aAx_by: %f != %f", i, c);
+	sum++;
+    }
+
+    if(d - i > EPSILON) {
+    	G_message("Error in G_math_aAx_by: %f != %f", i, d);
+	sum++;
+    }
+
+    if(e - i > EPSILON) {
+    	G_message("Error in G_math_aAx_by: %f != %f", i, e);
+	sum++;
+    }
+
+    if(f - i > EPSILON) {
+    	G_message("Error in G_math_aAx_by: %f != %f", i, f);
+	sum++;
+    }
+
+    if(g - (double)rows*rows > EPSILON) {
+    	G_message("Error in G_math_d_x_dyad_y: %f != %f", (double)rows*rows, g);
+	sum++;
+    }
+
+    if(h - (double)rows*rows > EPSILON) {
+    	G_message("Error in G_math_d_x_dyad_y: %f != %f", (double)rows*rows, h);
+	sum++;
+    }
+
+    if(x)
+      G_free_vector(x);
+    if(y)
+      G_free_vector(y);
+    if(z)
+    G_free_vector(z);
+
+    G_math_free_les(les);
+    G_math_free_les(sples);
+
+    if(A)
+      G_free_matrix(A);
+    if(B)
+      G_free_matrix(B);
+    if(C)
+      G_free_matrix(C);
+
+    return sum;
+}
+
+
+/* *************************************************************** */
+/* ************** F L O A T ************************************** */
+/* *************************************************************** */
+int test_blas_level_2_float(void)
+{
+
+    int sum = 0;
+    int rows =TEST_NUM_ROWS;
+    float **A, **B, **C, *x, *y, *z, b = 0.0, c = 0.0, d = 0.0, e = 0.0, f = 0.0, g = 0.0, h = 0.0, i = 0.0;
+
+    G_math_f_les *les;
+    les = create_normal_unsymmetric_f_les(rows);
+
+    x = G_alloc_fvector(rows);
+    y = G_alloc_fvector(rows);
+    z = G_alloc_fvector(rows);
+
+    A = G_alloc_fmatrix(rows, rows);
+    B = G_alloc_fmatrix(rows, rows);
+    C = G_alloc_fmatrix(rows, rows);
+
+    fill_f_vector_scalar(x, 1, rows);
+    fill_f_vector_scalar(y, 0, rows);
+
+
+#pragma omp parallel default(shared)
+{
+    G_math_f_Ax(les->A, x, z, rows, rows);
+    G_math_f_asum_norm(z, &b, rows);
+
+    G_math_f_aAx_by(les->A, x, y, 1.0, 1.0, z, rows, rows);
+    G_math_f_asum_norm(z, &c, rows);
+
+    G_math_f_aAx_by(les->A, x, y, -1.0, 1.0, z, rows, rows);
+    G_math_f_asum_norm(z, &d, rows);
+
+    G_math_f_aAx_by(les->A, x, y, 1.0, 0.0, z, rows, rows);
+    G_math_f_asum_norm(z, &e, rows);
+
+    G_math_f_aAx_by(les->A, x, y, -1.0, -1.0, z, rows, rows);
+    G_math_f_asum_norm(z, &f, rows);
+
+    G_math_f_x_dyad_y(x, x, A, rows, rows);
+    G_math_f_Ax(A, x, z, rows, rows);
+    G_math_f_asum_norm(z, &g, rows);
+
+    G_math_f_x_dyad_y(x, x, C, rows, rows);
+     G_math_f_Ax(A, x, z, rows, rows);
+    G_math_f_asum_norm(z, &h, rows);
+}
+
+
+    G_math_f_asum_norm(les->b, &i, rows);
+
+    if(b - i > EPSILON) {
+    	G_message("Error in G_math_f_Ax: %f != %f", i, b);
+	sum++;
+    }
+
+    if(c - i > EPSILON) {
+    	G_message("Error in G_math_f_aAx_by: %f != %f", i, c);
+	sum++;
+    }
+
+    if(d - i > EPSILON) {
+    	G_message("Error in G_math_f_aAx_by: %f != %f", i, d);
+	sum++;
+    }
+
+    if(e - i > EPSILON) {
+    	G_message("Error in G_math_f_aAx_by: %f != %f", i, e);
+	sum++;
+    }
+
+    if(f - i > EPSILON) {
+    	G_message("Error in G_math_f_aAx_by: %f != %f", i, f);
+	sum++;
+    }
+
+    if(g - (float)rows*rows > EPSILON) {
+    	G_message("Error in G_math_f_x_dyad_y: %f != %f", (float)rows*rows, g);
+	sum++;
+    }
+
+    if(h - (float)rows*rows > EPSILON) {
+    	G_message("Error in G_math_f_x_dyad_y: %f != %f", (float)rows*rows, h);
+	sum++;
+    }
+
+    if(x)
+      G_free_fvector(x);
+    if(y)
+      G_free_fvector(y);
+    if(z)
+    G_free_fvector(z);
+
+    G_math_free_f_les(les);
+
+    if(A)
+      G_free_fmatrix(A);
+    if(B)
+      G_free_fmatrix(B);
+    if(C)
+      G_free_fmatrix(C);
+
+    return sum;
+}

Added: grass/branches/develbranch_6/lib/gmath/test/test_blas3.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_blas3.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_blas3.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,259 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass PDE Numerical Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2007
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:      Unit tests for les creation
+*
+* COPYRIGHT:    (C) 2007 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include <math.h>
+#include "test_gmath_lib.h"
+
+#define EPSILON 0.0000001
+
+/* prototypes */
+static int test_blas_level_3_double(void);
+static int test_blas_level_3_float(void);
+
+
+/* *************************************************************** */
+/* Perfrome the blas level 3 unit tests ************************** */
+/* *************************************************************** */
+int unit_test_blas_level_3(void)
+{
+    int sum = 0;
+
+    G_message(_("\n++ Running blas level 3 unit tests ++"));
+
+    sum += test_blas_level_3_double();
+    sum += test_blas_level_3_float();
+
+    if (sum > 0)
+	G_warning(_("\n-- blas level 3 unit tests failure --"));
+    else
+	G_message(_("\n-- blas level 3 unit tests finished successfully --"));
+
+    return sum;
+}
+
+/* *************************************************************** */
+/* ************** D O U B L E ************************************ */
+/* *************************************************************** */
+int test_blas_level_3_double(void)
+{
+
+    int sum = 0;
+    int rows = TEST_NUM_ROWS;
+    int cols = TEST_NUM_COLS;
+    double **A, **B, **C, *x, *y, a = 0.0, b = 0.0, c = 0.0, d = 0.0;
+
+    x = G_alloc_vector(cols);
+    y = G_alloc_vector(rows);
+
+    A = G_alloc_matrix(rows, cols);
+    B = G_alloc_matrix(rows, cols);
+    C = G_alloc_matrix(rows, cols);
+
+    fill_d_vector_scalar(x, 1, cols);
+    fill_d_vector_scalar(y, 0, rows);
+
+    fill_d_vector_scalar(A[0], 1, rows*cols);
+    fill_d_vector_scalar(B[0], 2, rows*cols);
+
+#pragma omp parallel default(shared)
+{
+    G_math_d_aA_B(A, B, 1.0 , C, rows , cols );
+    G_math_d_Ax(C, x, y, rows, cols);
+}
+    G_math_d_asum_norm(y, &a, rows);
+
+
+    if(a != 3*rows*cols) {
+    	G_message("Error in G_math_d_aA_B: %f != %f", a, (double)3*rows*cols);
+	sum++;
+    }
+#pragma omp parallel default(shared)
+{
+    G_math_d_aA_B(A, B, -1.0 , C, rows , cols );
+    G_math_d_Ax(C, x, y, rows, cols);
+}
+    G_math_d_asum_norm(y, &b, rows);
+
+
+    if(b != rows*cols) {
+    	G_message("Error in G_math_d_aA_B: %f != %f", b, (double)rows*cols);
+	sum++;
+    }
+#pragma omp parallel default(shared)
+{
+    G_math_d_aA_B(A, B, 2.0 , C, rows , cols );
+    G_math_d_Ax(C, x, y, rows, cols);
+}
+    G_math_d_asum_norm(y, &c, rows);
+
+
+    if(c != 4*rows*cols) {
+    	G_message("Error in G_math_d_aA_B: %f != %f", c, (double)4*rows*cols);
+	sum++;
+    }
+
+    G_free_matrix(A);
+    G_free_matrix(B);
+    G_free_matrix(C);
+    A = G_alloc_matrix(rows, cols);
+    B = G_alloc_matrix(cols, rows);
+    C = G_alloc_matrix(rows, rows);
+
+    G_free_vector(x);
+    G_free_vector(y);
+    x = G_alloc_vector(rows);
+    y = G_alloc_vector(rows);
+
+    fill_d_vector_scalar(x, 1, rows);
+    fill_d_vector_scalar(y, 0, rows);
+    fill_d_vector_scalar(A[0], 1, rows*cols);
+    fill_d_vector_scalar(B[0], 2, rows*cols);
+
+#pragma omp parallel default(shared)
+{
+    G_math_d_AB(A, B, C, rows , cols , cols );
+    G_math_d_Ax(C, x, y, rows, cols);
+}
+    G_math_d_asum_norm(y, &d, rows);
+
+
+    if(d != 2*rows*cols*cols) {
+    	G_message("Error in G_math_d_AB: %f != %f", d, (double)2*rows*cols*cols);
+	sum++;
+    }
+
+    if(x)
+      G_free_vector(x);
+    if(y)
+      G_free_vector(y);
+
+    if(A)
+      G_free_matrix(A);
+    if(B)
+      G_free_matrix(B);
+    if(C)
+      G_free_matrix(C);
+
+    return sum;
+}
+
+
+/* *************************************************************** */
+/* ************** F L O A T ************************************** */
+/* *************************************************************** */
+int test_blas_level_3_float(void)
+{
+
+    int sum = 0;
+    int rows = TEST_NUM_ROWS;
+    int cols = TEST_NUM_COLS;
+    float **A, **B, **C, *x, *y, a = 0.0, b = 0.0, c = 0.0, d = 0.0;
+
+    x = G_alloc_fvector(cols);
+    y = G_alloc_fvector(rows);
+
+    A = G_alloc_fmatrix(rows, cols);
+    B = G_alloc_fmatrix(rows, cols);
+    C = G_alloc_fmatrix(rows, cols);
+
+    fill_f_vector_scalar(x, 1, cols);
+    fill_f_vector_scalar(y, 0, rows);
+
+    fill_f_vector_scalar(A[0], 1, rows*cols);
+    fill_f_vector_scalar(B[0], 2, rows*cols);
+
+#pragma omp parallel default(shared)
+{
+    G_math_f_aA_B(A, B, 1.0 , C, rows , cols );
+    G_math_f_Ax(C, x, y, rows, cols);
+}
+    G_math_f_asum_norm(y, &a, rows);
+
+    if(a != 3*rows*cols) {
+    	G_message("Error in G_math_f_aA_B: %f != %f", a, (double)3*rows*cols);
+	sum++;
+    }
+#pragma omp parallel default(shared)
+{
+    G_math_f_aA_B(A, B, -1.0 , C, rows , cols );
+    G_math_f_Ax(C, x, y, rows, cols);
+}
+    G_math_f_asum_norm(y, &b, rows);
+
+    if(b != rows*cols) {
+    	G_message("Error in G_math_f_aA_B: %f != %f", b, (double)rows*cols);
+	sum++;
+    }
+#pragma omp parallel default(shared)
+{
+    G_math_f_aA_B(A, B, 2.0 , C, rows , cols );
+    G_math_f_Ax(C, x, y, rows, cols);
+}
+    G_math_f_asum_norm(y, &c, rows);
+
+    if(c != 4*rows*cols) {
+    	G_message("Error in G_math_f_aA_B: %f != %f", c, (double)4*rows*cols);
+	sum++;
+    }
+
+    G_free_fmatrix(A);
+    G_free_fmatrix(B);
+    G_free_fmatrix(C);
+    A = G_alloc_fmatrix(rows, cols);
+    B = G_alloc_fmatrix(cols, rows);
+    C = G_alloc_fmatrix(rows, rows);
+
+    G_free_fvector(x);
+    G_free_fvector(y);
+    x = G_alloc_fvector(rows);
+    y = G_alloc_fvector(rows);
+
+    fill_f_vector_scalar(x, 1, rows);
+    fill_f_vector_scalar(y, 0, rows);
+    fill_f_vector_scalar(A[0], 1, rows*cols);
+    fill_f_vector_scalar(B[0], 2, rows*cols);
+
+#pragma omp parallel default(shared)
+{
+    G_math_f_AB(A, B, C, rows , cols , cols );
+    G_math_f_Ax(C, x, y, rows, cols);
+}
+    G_math_f_asum_norm(y, &d, rows);
+
+
+    if(d != 2*rows*cols*cols) {
+    	G_message("Error in G_math_f_AB: %f != %f", d, (double)2*rows*cols*cols);
+	sum++;
+    }
+
+    if(x)
+      G_free_fvector(x);
+    if(y)
+      G_free_fvector(y);
+
+    if(A)
+      G_free_fmatrix(A);
+    if(B)
+      G_free_fmatrix(B);
+    if(C)
+      G_free_fmatrix(C);
+
+    return sum;
+}

Added: grass/branches/develbranch_6/lib/gmath/test/test_ccmath_wrapper.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_ccmath_wrapper.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_ccmath_wrapper.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,230 @@
+/*****************************************************************************
+ *
+ * MODULE:       Grass PDE Numerical Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      Unit tests for les solving
+ *
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include "test_gmath_lib.h"
+
+#define EPSILON_DIRECT 1.0E-10
+#define EPSILON_ITER 1.0E-4
+
+/* prototypes */
+static int test_ccmath_wrapper(void);
+
+/* ************************************************************************* */
+/* Performe the solver unit tests ****************************************** */
+/* ************************************************************************* */
+int unit_test_ccmath_wrapper(void)
+{
+	int sum = 0;
+
+	G_message(_("\n++ Running ccmath wrapper unit tests ++"));
+
+	sum += test_ccmath_wrapper();
+
+	if (sum > 0)
+		G_warning(_("\n-- ccmath wrapper unit tests failure --"));
+	else
+		G_message(_("\n-- ccmath wrapper unit tests finished successfully --"));
+
+	return sum;
+}
+
+/* *************************************************************** */
+/* Test all implemented ccmath wrapper  *** */
+/* *************************************************************** */
+int test_ccmath_wrapper(void)
+{
+	G_math_les *les;
+	int sum = 0;
+	double val = 0.0, val2 = 0.0;
+
+	G_message("\t * testing ccmath lu solver with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+
+        G_math_d_copy(les->b, les->x, les->rows);
+	G_math_solv(les->A, les->x, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solv abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+
+	G_message("\t * testing ccmath lu solver with unsymmetric matrix\n");
+
+	les = create_normal_unsymmetric_les(TEST_NUM_ROWS);
+
+        G_math_d_copy(les->b, les->x, les->rows);
+	G_math_solvps(les->A, les->x, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solv abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+
+	G_message("\t * testing ccmath positive definite solver with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+
+        G_math_d_copy(les->b, les->x, les->rows);
+	G_math_solvps(les->A, les->x, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solvps abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+
+	G_message("\t * testing ccmath matrix inversion with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_minv(les->A, les->rows);
+        G_math_d_Ax(les->A, les->b, les->x, les->rows, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_minv abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+
+	G_message("\t * testing ccmath matrix inversion with unsymmetric matrix\n");
+
+	les = create_normal_unsymmetric_les(TEST_NUM_ROWS);
+
+	G_math_minv(les->A, les->rows);
+        G_math_d_Ax(les->A, les->b, les->x, les->rows, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_minv abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+
+
+	G_message("\t * testing ccmath positive definite matrix inversion with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_psinv(les->A, les->rows);
+        G_math_d_Ax(les->A, les->b, les->x, les->rows, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_psinv abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+
+
+	G_message("\t * testing ccmath eigenvalue solver with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+        // Results of the eigenvalue computation with ocatve
+        les->b[9] =   0.043264;
+        les->b[8] =   0.049529;
+        les->b[7] =   0.057406;
+        les->b[6] =   0.067696;
+        les->b[5] =   0.081639;
+        les->b[4] =   0.101357;
+        les->b[3] =   0.130298;
+        les->b[2] =   0.174596;
+        les->b[1] =   0.256157;
+        les->b[0] =   0.502549;
+
+	G_math_eigval(les->A, les->x, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->b, &val, les->rows);
+	G_math_d_asum_norm(les->x, &val2, les->rows);
+	if ((val  - val2) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_eigv abs %2.20f != %i", val,
+				val2);
+		sum++;
+	}
+
+	G_math_free_les(les);
+
+
+	G_message("\t * testing ccmath eigenvector computation with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+        // Results of the eigenvalue computation with ocatve
+        les->b[9] =   0.043264;
+        les->b[8] =   0.049529;
+        les->b[7] =   0.057406;
+        les->b[6] =   0.067696;
+        les->b[5] =   0.081639;
+        les->b[4] =   0.101357;
+        les->b[3] =   0.130298;
+        les->b[2] =   0.174596;
+        les->b[1] =   0.256157;
+        les->b[0] =   0.502549;
+        
+        G_math_eigen(les->A, les->x, les->rows);
+        G_math_print_les(les);
+	G_math_d_asum_norm(les->b, &val, les->rows);
+	G_math_d_asum_norm(les->x, &val2, les->rows);
+	if ((val  - val2) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_eigen abs %2.20f != %i", val,
+				val2);
+		sum++;
+	}
+	G_math_free_les(les);
+
+	G_message("\t * testing ccmath singulare value decomposition with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+
+        G_math_svdval(les->x, les->A, les->rows, les->rows);
+        G_math_print_les(les);
+
+
+	G_math_free_les(les);
+
+	return sum;
+}
+

Added: grass/branches/develbranch_6/lib/gmath/test/test_gmath_lib.h
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_gmath_lib.h	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_gmath_lib.h	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,130 @@
+
+/*****************************************************************************
+*
+* MODULE:       Grass gmath Library
+* AUTHOR(S):    Soeren Gebbert, Berlin (GER) Oct 2007
+* 		soerengebbert <at> gmx <dot> de
+*               
+* PURPOSE:	Unit and Integration tests
+*
+* COPYRIGHT:    (C) 2000 by the GRASS Development Team
+*
+*               This program is free software under the GNU General Public
+*               License (>=v2). Read the file COPYING that comes with GRASS
+*               for details.
+*
+*****************************************************************************/
+
+#ifndef _TEST_GMATH_LIB_H_
+#define _TEST_GMATH_LIB_H_
+
+#include <grass/gmath.h>
+#include <grass/gis.h>
+
+#define TEST_NUM_ROWS 10  
+#define TEST_NUM_COLS 9
+#define TEST_NUM_DEPTHS 8
+
+struct timeval;
+
+typedef struct
+{
+    double *x;			/*the value vector */
+    double *b;			/*the right side of Ax = b */
+    double **A;			/*the normal quadratic matrix */
+    double *data;		/*the pointer to the quadratic matrix data*/
+    G_math_spvector **Asp;		/*the sparse matrix */
+    int rows;			/*number of rows */
+    int cols;			/*number of cols */
+    int quad;			/*is the matrix quadratic (1-quadratic, 0 not)*/
+    int type;			/*the type of the les, normal == 0, sparse == 1 */
+    int bandwith;		/*the bandwith of the matrix (0 < bandwidth <= cols)*/
+    int symm;			/*0 if matrix unsymmetric, 1 if symmetric*/
+} G_math_les;
+
+typedef struct
+{
+    float *x;			/*the value vector */
+    float *b;			/*the right side of Ax = b */
+    float **A;			/*the normal quadratic matrix */
+    float *data;		/*the pointer to the quadratic matrix data*/
+    int rows;			/*number of rows */
+    int cols;			/*number of cols */
+    int quad;			/*is the matrix quadratic (1-quadratic, 0 not)*/
+    int type;			/*the type of the les, normal == 0, sparse == 1 */
+    int bandwith;		/*the bandwith of the matrix (0 < bandwidth <= cols)*/
+    int symm;			/*0 if matrix unsymmetric, 1 if symmetric*/
+} G_math_f_les;
+
+extern G_math_les *G_math_alloc_nquad_les(int cols, int rows, int type);
+extern G_math_les *G_math_alloc_nquad_les_Ax(int cols, int rows, int type);
+extern G_math_les *G_math_alloc_nquad_les_A(int cols, int rows, int type);
+extern G_math_les *G_math_alloc_nquad_les_Ax_b(int cols, int rows, int type);
+extern G_math_les *G_math_alloc_les(int rows, int type);
+extern G_math_les *G_math_alloc_les_Ax(int rows, int type);
+extern G_math_les *G_math_alloc_les_A(int rows, int type);
+extern G_math_les *G_math_alloc_les_Ax_b(int rows, int type);
+extern G_math_les *G_math_alloc_les_param(int cols, int rows, int type, int parts);
+extern int G_math_add_spvector_to_les(G_math_les * les, G_math_spvector * spvector, int row);
+extern void G_math_print_les(G_math_les * les);
+extern void G_math_free_les(G_math_les * les);
+
+
+extern void fill_d_vector_range_1(double *x, double a, int rows);
+extern void fill_f_vector_range_1(float *x, float a, int rows);
+extern void fill_i_vector_range_1(int *x, int a, int rows);
+extern void fill_d_vector_range_2(double *x, double a, int rows);
+extern void fill_f_vector_range_2(float *x, float a, int rows);
+extern void fill_i_vector_range_2(int *x, int a, int rows);
+extern void fill_d_vector_scalar(double *x, double a, int rows);
+extern void fill_f_vector_scalar(float *x, float a, int rows);
+extern void fill_i_vector_scalar(int *x, int a, int rows);
+
+extern G_math_les *create_normal_symmetric_les(int rows);
+extern G_math_les *create_normal_symmetric_pivot_les(int rows);
+extern G_math_les *create_normal_unsymmetric_les(int rows);
+extern G_math_les *create_sparse_symmetric_les(int rows);
+extern G_math_les *create_sparse_unsymmetric_les(int rows);
+extern G_math_les *create_normal_unsymmetric_nquad_les_A(int rows, int cols);
+
+
+/*float*/
+extern G_math_f_les *G_math_alloc_f_les(int rows, int type);
+extern G_math_f_les *G_math_alloc_f_nquad_les_A(int rows, int cols, int type);
+extern G_math_f_les *G_math_alloc_f_les_param(int cols, int rows, int type, int parts);
+extern void G_math_free_f_les(G_math_f_les * les);
+extern G_math_f_les *create_normal_symmetric_f_les(int rows);
+extern G_math_f_les *create_normal_unsymmetric_f_les(int rows);
+extern G_math_f_les *create_normal_unsymmetric_f_nquad_les_A(int rows, int cols);
+
+/* direct and iterative solvers */
+extern int unit_test_solvers(void);
+
+/* ccmath wrapper tests*/
+int unit_test_ccmath_wrapper(void);
+
+/* blas level 1 routines */
+extern int unit_test_blas_level_1(void);
+
+/* blas level 2 routines */
+extern int unit_test_blas_level_2(void);
+
+/* blas level 3 routines */
+extern int unit_test_blas_level_3(void);
+
+/* benchmarking iterative krylov solvers */
+extern int bench_solvers_krylov(int);
+
+/* benchmarking direct solvers */
+extern int bench_solvers_direct(int);
+
+/* benchmarking level 2 blas functions */
+int bench_blas_level_2(int rows); 
+
+/* benchmarking level 3 blas functions */
+int bench_blas_level_3(int rows); 
+
+/* Compute time difference */
+extern double compute_time_difference(struct timeval start, struct timeval end);
+
+#endif

Added: grass/branches/develbranch_6/lib/gmath/test/test_main.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_main.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_main.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,197 @@
+/****************************************************************************
+ *
+ * MODULE:       test.gpde.lib
+ *   	    	
+ * AUTHOR(S):    Original author 
+ *               Soeren Gebbert soerengebbert <at> gmx <dot> de
+ * 		05 Sep 2007 Berlin
+ *
+ * PURPOSE:      Unit and integration tests for the gmath library
+ *
+ * COPYRIGHT:    (C) 2007 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *   	    	License (>=v2). Read the file COPYING that comes with GRASS
+ *   	    	for details.
+ *
+ *****************************************************************************/
+#include <stdlib.h>
+#include <string.h>
+#include <grass/gis.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include "test_gmath_lib.h"
+#include "grass/gmath.h"
+
+/*- Parameters and global variables -----------------------------------------*/
+typedef struct {
+    struct Option *unit, *integration, *solverbenchmark, *blasbenchmark, *rows;
+    struct Flag *full, *testunit, *testint;
+} paramType;
+
+paramType param; /*Parameters */
+
+/*- prototypes --------------------------------------------------------------*/
+static void set_params(void); /*Fill the paramType structure */
+
+/* ************************************************************************* */
+/* Set up the arguments we are expecting ********************************** */
+
+/* ************************************************************************* */
+void set_params(void) {
+    param.unit = G_define_option();
+    param.unit->key = "unit";
+    param.unit->type = TYPE_STRING;
+    param.unit->required = NO;
+    param.unit->options = "blas1,blas2,blas3,solver,ccmath";
+    param.unit->description = _("Choose the unit tests to run");
+
+    param.integration = G_define_option();
+    param.integration->key = "integration";
+    param.integration->type = TYPE_STRING;
+    param.integration->required = NO;
+    param.integration->options = "";
+    param.integration->description = _("Choose the integration tests to run");
+
+    param.rows = G_define_option();
+    param.rows->key = "rows";
+    param.rows->type = TYPE_INTEGER;
+    param.rows->required = NO;
+    param.rows->answer = "1000";
+    param.rows->description = _("The size of the matrices and vectors for benchmarking");
+
+    param.solverbenchmark = G_define_option();
+    param.solverbenchmark->key = "solverbench";
+    param.solverbenchmark->type = TYPE_STRING;
+    param.solverbenchmark->required = NO;
+    param.solverbenchmark->options = "krylov,direct";
+    param.solverbenchmark->description = _("Choose solver benchmark");
+
+    param.blasbenchmark = G_define_option();
+    param.blasbenchmark->key = "blasbench";
+    param.blasbenchmark->type = TYPE_STRING;
+    param.blasbenchmark->required = NO;
+    param.blasbenchmark->options = "blas2,blas3";
+    param.blasbenchmark->description = _("Choose blas benchmark");
+    
+    param.testunit = G_define_flag();
+    param.testunit->key = 'u';
+    param.testunit->description = _("Run all unit tests");
+
+    param.testint = G_define_flag();
+    param.testint->key = 'i';
+    param.testint->description = _("Run all integration tests");
+
+    param.full = G_define_flag();
+    param.full->key = 'a';
+    param.full->description = _("Run all unit and integration tests");
+
+}
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+
+/* ************************************************************************* */
+int main(int argc, char *argv[]) {
+    struct GModule *module;
+    int returnstat = 0, i;
+    int rows = 3000;
+
+
+    /* Initialize GRASS */
+    G_gisinit(argv[0]);
+
+    module = G_define_module();
+    module->keywords = _("test, gmath");
+    module->description
+            = _("Performs benchmarks, unit and integration tests for the gmath library");
+
+    /* Get parameters from user */
+    set_params();
+
+    if (G_parser(argc, argv))
+        exit(EXIT_FAILURE);
+
+
+    if (param.rows->answer)
+        sscanf(param.rows->answer, "%i", &rows);
+
+    /*Run the unit tests */
+    if (param.testunit->answer || param.full->answer) {
+        returnstat += unit_test_blas_level_1();
+        returnstat += unit_test_blas_level_2();
+        returnstat += unit_test_blas_level_3();
+        returnstat += unit_test_solvers();
+        returnstat += unit_test_ccmath_wrapper();
+
+    }
+
+    /*Run the integration tests */
+    if (param.testint->answer || param.full->answer) {
+        ;
+    }
+
+    /*Run single tests */
+    if (!param.full->answer) {
+        /*unit tests */
+        if (!param.testunit->answer) {
+            i = 0;
+            if (param.unit->answers)
+                while (param.unit->answers[i]) {
+                    if (strcmp(param.unit->answers[i], "blas1") == 0)
+                        returnstat += unit_test_blas_level_1();
+
+                    if (strcmp(param.unit->answers[i], "blas2") == 0)
+                        returnstat += unit_test_blas_level_2();
+
+                    if (strcmp(param.unit->answers[i], "blas3") == 0)
+                        returnstat += unit_test_blas_level_3();
+
+                    if (strcmp(param.unit->answers[i], "solver") == 0)
+                        returnstat += unit_test_solvers();
+
+                    if (strcmp(param.unit->answers[i], "ccmath") == 0)
+                        returnstat += unit_test_ccmath_wrapper();
+
+                    i++;
+                }
+        }
+        /*integration tests */
+        if (!param.testint->answer) {
+            i = 0;
+            if (param.integration->answers)
+                while (param.integration->answers[i]) {
+                    ;
+                }
+
+        }
+    }
+
+    i = 0;
+    if (param.solverbenchmark->answers)
+        while (param.solverbenchmark->answers[i]) {
+            if (strcmp(param.solverbenchmark->answers[i], "krylov") == 0)
+                bench_solvers_krylov(rows);
+            if (strcmp(param.solverbenchmark->answers[i], "direct") == 0)
+                bench_solvers_direct(rows);
+            i++;
+        }
+
+    i = 0;
+    if (param.blasbenchmark->answers)
+        while (param.blasbenchmark->answers[i]) {
+            if (strcmp(param.blasbenchmark->answers[i], "blas2") == 0)
+                bench_blas_level_2(rows);
+            if (strcmp(param.blasbenchmark->answers[i], "blas3") == 0)
+                bench_blas_level_3(rows);
+
+            i++;
+        }
+    
+    if (returnstat != 0)
+        G_warning("Errors detected while testing the gmath lib");
+    else
+        G_message("\n-- gmath lib tests finished successfully --");
+
+    return (returnstat);
+}

Added: grass/branches/develbranch_6/lib/gmath/test/test_solvers.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_solvers.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_solvers.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,457 @@
+/*****************************************************************************
+ *
+ * MODULE:       Grass PDE Numerical Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      Unit tests for les solving
+ *
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include "test_gmath_lib.h"
+
+#define EPSILON_DIRECT 1.0E-10
+#define EPSILON_ITER 1.0E-4
+
+/* prototypes */
+static int test_solvers(void);
+
+/* ************************************************************************* */
+/* Performe the solver unit tests ****************************************** */
+/* ************************************************************************* */
+int unit_test_solvers(void)
+{
+	int sum = 0;
+
+	G_message(_("\n++ Running solver unit tests ++"));
+
+	sum += test_solvers();
+
+	if (sum > 0)
+		G_warning(_("\n-- Solver unit tests failure --"));
+	else
+		G_message(_("\n-- Solver unit tests finished successfully --"));
+
+	return sum;
+}
+
+/* *************************************************************** */
+/* Test all implemented solvers for sparse and normal matrix *** */
+/* *************************************************************** */
+int test_solvers(void)
+{
+	G_math_les *les;
+	G_math_les *sples;
+	int sum = 0;
+	double val = 0.0;
+
+	G_message("\t * testing jacobi solver with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_jacobi(les->A, les->x, les->b, les->rows, 250, 1, 0.1e-10);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_jacobi abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_solver_sparse_jacobi(sples->Asp, sples->x, sples->b, les->rows, 250,
+			1, 0.1e-10);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_jacobi abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing jacobi solver with unsymmetric matrix\n");
+
+	les = create_normal_unsymmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_unsymmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_jacobi(les->A, les->x, les->b, les->rows, 250, 1, 0.1e-10);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_jacobi abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+
+	G_math_solver_sparse_jacobi(sples->Asp, sples->x, sples->b, les->rows, 250,
+			1, 0.1e-10);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_jacobi abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing gauss seidel solver with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_gs(les->A, les->x, les->b, les->rows, 150, 1, 0.1e-9);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_gs abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+
+	G_math_solver_sparse_gs(sples->Asp, sples->x, sples->b, les->rows, 150, 1,
+			0.1e-9);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_gs abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing gauss seidel solver with unsymmetric matrix\n");
+
+	les = create_normal_unsymmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_unsymmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_gs(les->A, les->x, les->b, les->rows, 150, 1, 0.1e-9);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_gs abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+
+	G_math_solver_sparse_gs(sples->Asp, sples->x, sples->b, les->rows, 150, 1,
+			0.1e-9);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_gs abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing pcg solver with symmetric bad conditioned matrix and preconditioner 3\n");
+
+	les = create_normal_symmetric_pivot_les(TEST_NUM_ROWS);
+
+	G_math_solver_pcg(les->A, les->x, les->b, les->rows, 250, 0.1e-9, 3);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_pcg abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing pcg solver with symmetric matrix and preconditioner 1\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_pcg(les->A, les->x, les->b, les->rows, 250, 0.1e-9, 1);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_pcg abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+
+	G_math_solver_sparse_pcg(sples->Asp, sples->x, sples->b, les->rows, 250,
+			0.1e-9, 1);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_pcg abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+	G_math_print_les(sples);
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing pcg solver with symmetric matrix and preconditioner 2\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_pcg(les->A, les->x, les->b, les->rows, 250, 0.1e-9, 2);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_pcg abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+
+	G_math_solver_sparse_pcg(sples->Asp, sples->x, sples->b, les->rows, 250,
+			0.1e-9, 2);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_pcg abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+	G_math_print_les(sples);
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing pcg solver with symmetric matrix and preconditioner 3\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_pcg(les->A, les->x, les->b, les->rows, 250, 0.1e-9, 3);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_pcg abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+
+	G_math_solver_sparse_pcg(sples->Asp, sples->x, sples->b, les->rows, 250,
+			0.1e-9, 3);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_pcg abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+	G_math_print_les(sples);
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing cg solver with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_cg(les->A, les->x, les->b, les->rows, 250, 0.1e-9);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_cg abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+
+	G_math_solver_sparse_cg(sples->Asp, sples->x, sples->b, les->rows, 250,
+			0.1e-9);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_cg abs %2.20f != %i", val,
+				sples->rows);
+		sum++;
+	}
+	G_math_print_les(sples);
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing cg solver with symmetric bad conditioned matrix\n");
+
+	les = create_normal_symmetric_pivot_les(TEST_NUM_ROWS);
+
+	G_math_solver_cg(les->A, les->x, les->b, les->rows, 250, 0.1e-9);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_cg abs %2.20f != %i", val, les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing bicgstab solver with symmetric matrix\n");
+
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_symmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_bicgstab(les->A, les->x, les->b, les->rows, 250, 0.1e-9);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_bicgstab abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+
+	G_math_solver_sparse_bicgstab(sples->Asp, sples->x, sples->b, les->rows,
+			250, 0.1e-9);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)sples->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_bicgstab abs %2.20f != %i",
+				val, sples->rows);
+		sum++;
+	}
+	G_math_print_les(sples);
+
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing bicgstab solver with unsymmetric matrix\n");
+
+	les = create_normal_unsymmetric_les(TEST_NUM_ROWS);
+	sples = create_sparse_unsymmetric_les(TEST_NUM_ROWS);
+
+	G_math_solver_bicgstab(les->A, les->x, les->b, les->rows, 250, 0.1e-9);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_bicgstab abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+
+	G_math_solver_sparse_bicgstab(sples->Asp, sples->x, sples->b, les->rows,
+			250, 0.1e-9);
+	G_math_d_asum_norm(sples->x, &val, sples->rows);
+	if ((val - (double)les->rows) > EPSILON_ITER)
+	{
+		G_warning("Error in G_math_solver_sparse_bicgstab abs %2.20f != %i",
+				val, sples->rows);
+		sum++;
+	}
+
+	G_math_print_les(sples);
+	G_math_free_les(les);
+	G_math_free_les(sples);
+
+	G_message("\t * testing gauss elimination solver with symmetric matrix\n");
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	G_math_solver_gauss(les->A, les->x, les->b, les->rows);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_DIRECT)
+	{
+		G_warning("Error in G_math_solver_gauss abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing lu decomposition solver with symmetric matrix\n");
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	G_math_solver_lu(les->A, les->x, les->b, les->rows);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_DIRECT)
+	{
+		G_warning("Error in G_math_solver_gauss abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing gauss elimination solver with unsymmetric matrix\n");
+	les = create_normal_unsymmetric_les(TEST_NUM_ROWS);
+	G_math_solver_gauss(les->A, les->x, les->b, les->rows);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_DIRECT)
+	{
+		G_warning("Error in G_math_solver_gauss abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing lu decomposition solver with unsymmetric matrix\n");
+	les = create_normal_unsymmetric_les(TEST_NUM_ROWS);
+	G_math_solver_lu(les->A, les->x, les->b, les->rows);
+
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_DIRECT)
+	{
+		G_warning("Error in G_math_solver_gauss abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing gauss elimination solver with symmetric bad conditioned matrix\n");
+	les = create_normal_symmetric_pivot_les(TEST_NUM_ROWS);
+	G_math_solver_gauss(les->A, les->x, les->b, les->rows);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_DIRECT)
+	{
+		G_warning("Error in G_math_solver_gauss abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing lu decomposition solver with symmetric bad conditioned matrix\n");
+	les = create_normal_symmetric_pivot_les(TEST_NUM_ROWS);
+	G_math_solver_lu(les->A, les->x, les->b, les->rows);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_DIRECT)
+	{
+		G_warning("Error in G_math_solver_gauss abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	G_message("\t * testing cholesky decomposition solver with symmetric matrix\n");
+	les = create_normal_symmetric_les(TEST_NUM_ROWS);
+	/*cholesky*/G_math_solver_cholesky(les->A, les->x, les->b, les->rows,
+			les->rows);
+	G_math_d_asum_norm(les->x, &val, les->rows);
+	if ((val - (double)les->rows) > EPSILON_DIRECT)
+	{
+		G_warning("Error in G_math_solver_gauss abs %2.20f != %i", val,
+				les->rows);
+		sum++;
+	}
+	G_math_print_les(les);
+	G_math_free_les(les);
+
+	return sum;
+}
+

Added: grass/branches/develbranch_6/lib/gmath/test/test_tools.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_tools.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_tools.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,458 @@
+/*****************************************************************************
+ *
+ * MODULE:       Grass PDE Numerical Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      Unit tests for les solving
+ *
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <grass/glocale.h>
+#include <grass/gmath.h>
+#include "test_gmath_lib.h"
+#include <sys/time.h>
+
+/* *************************************************************** */
+/* Compute the difference between two time steps ***************** */
+
+/* *************************************************************** */
+double compute_time_difference(struct timeval start, struct timeval end) {
+    int sec;
+    int usec;
+
+    sec = end.tv_sec - start.tv_sec;
+    usec = end.tv_usec - start.tv_usec;
+
+    return (double) sec + (double) usec / 1000000;
+}
+
+/* *************************************************************** */
+/* create a normal matrix with values ** Hilbert matrix ********** */
+/* *************************************************************** */
+G_math_les *create_normal_symmetric_les(int rows)
+{
+	G_math_les *les;
+	int i, j;
+	int size =rows;
+	double val;
+
+	les = G_math_alloc_les(rows, G_MATH_NORMAL_LES);
+	for (i = 0; i < size; i++)
+	{
+		val = 0.0;
+		for (j = 0; j < size; j++)
+		{
+			if (j == i)
+				les->A[i][j] = (double)(1.0/(((double)i + 1.0) + ((double)j
+						+ 1.0)));
+			else
+				les->A[i][j] = (double)(1.0/((((double)i + 1.0) + ((double)j
+						+ 1.0) + 100.0)));
+
+			val += les->A[i][j];
+		}
+		les->b[i] = val;
+		les->x[i] = 0.5;
+	}
+
+	return les;
+}
+
+/* ********************************************************************* */
+/* create a bad conditioned normal matrix with values ** Hilbert matrix  */
+/* ********************************************************************* */
+G_math_les *create_normal_symmetric_pivot_les(int rows)
+{
+	G_math_les *les;
+	int i, ii, j, jj;
+	double val;
+
+	les = G_math_alloc_les(rows, G_MATH_NORMAL_LES);
+	for (i = 0, ii = rows - 1; i < rows; i++, ii--)
+	{
+		val = 0.0;
+		for (j = 0, jj = rows - 1; j < rows; j++, jj--)
+		{
+			if (j == i)
+				les->A[i][j] = (double)(1.0/(((double)ii*ii*ii*ii*ii + 1.0)*1.1
+						+ ((double)jj*jj*jj*jj*jj + 1.0)*1.1));
+			else
+				les->A[i][j] = (double)(1.0/((((double)ii*ii*ii + 1.0)
+						+ ((double)jj*jj*jj + 1.0))));
+
+			val += les->A[i][j];
+		}
+		les->b[i] = val;
+		les->x[i] = 0.0;
+	}
+
+	return les;
+}
+
+/* *************************************************************** */
+/* create a normal matrix with values ** Hilbert matrix ********** */
+/* *************************************************************** */
+G_math_f_les *create_normal_symmetric_f_les(int rows)
+{
+	G_math_f_les *les;
+	int i, j;
+	int size =rows;
+	float val;
+
+	les = G_math_alloc_f_les(rows, G_MATH_NORMAL_LES);
+	for (i = 0; i < size; i++)
+	{
+		val = 0.0;
+		for (j = 0; j < size; j++)
+		{
+			if (j == i)
+				les->A[i][j] = (float)(1.0
+						/(((float)i + 1.0) + ((float)j + 1.0)));
+			else
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0)
+						+ ((float)j + 1.0) + 100.0)));
+
+			val += les->A[i][j];
+		}
+		les->b[i] = val;
+		les->x[i] = 0.5;
+	}
+
+	return les;
+}
+
+/* *************************************************************** */
+/* create a sparse matrix with values ** Hilbert matrix ********** */
+/* *************************************************************** */
+G_math_les *create_sparse_unsymmetric_les(int rows)
+{
+	G_math_les *les;
+	G_math_spvector *spvector;
+	int i, j;
+	double val;
+
+	les = G_math_alloc_les(rows, G_MATH_SPARSE_LES);
+
+	for (i = 0; i < rows; i++)
+	{
+		spvector = G_math_alloc_spvector(rows);
+		val = 0;
+
+		for (j = 0; j < rows; j++)
+		{
+			if (j == i)
+			{
+				spvector->values[j] = (double)(1.0/((((double)i + 1.0)
+						+ ((double)j))));
+				spvector->index[j] = j;
+			}
+			if (j < i)
+			{
+				spvector->values[j] = (double)(1.0/((((double)i + 1.0)
+						+ ((double)j + 1.0) + 100)));
+				spvector->index[j] = j;
+			}
+			if (j > i)
+			{
+				spvector->values[j] = (double)(1.0/((((double)i + 1.0)
+						+ ((double)j + 1.0) + 120)));
+				spvector->index[j] = j;
+			}
+
+			val += spvector->values[j];
+		}
+
+		G_math_add_spvector_to_les(les, spvector, i);
+		les->b[i] = val;
+		les->x[i] = 0.5;
+	}
+
+	return les;
+}
+
+/* *************************************************************** */
+/* create a normal matrix with values ** Hilbert matrix ********** */
+/* *************************************************************** */
+G_math_les *create_normal_unsymmetric_les(int rows)
+{
+	G_math_les *les;
+	int i, j;
+	int size =rows;
+	double val;
+
+	les = G_math_alloc_les(rows, G_MATH_NORMAL_LES);
+	for (i = 0; i < size; i++)
+	{
+		val = 0.0;
+		for (j = 0; j < size; j++)
+		{
+			if (j == i)
+				les->A[i][j]
+						= (double)(1.0/((((double)i + 1.0) + ((double)j))));
+			if (j < i)
+				les->A[i][j] = (double)(1.0/((((double)i + 1.0) + ((double)j
+						+ 1.0) + 100)));
+			if (j > i)
+				les->A[i][j] = (double)(1.0/((((double)i + 1.0) + ((double)j
+						+ 1.0) + 120)));
+
+			val += les->A[i][j];
+		}
+		les->b[i] = val;
+		les->x[i] = 0.5;
+	}
+
+	return les;
+}
+
+/* *********************************************************************** */
+/* create a non quadratic unsymmetric matrix with values ** Hilbert matrix */
+/* *********************************************************************** */
+G_math_les *create_normal_unsymmetric_nquad_les_A(int rows, int cols)
+{
+	G_math_les *les;
+	int i, j;
+
+	les = G_math_alloc_nquad_les_A(rows, cols, G_MATH_NORMAL_LES);
+	for (i = 0; i < rows; i++)
+	{
+		for (j = 0; j < cols; j++)
+		{
+			if (j == i)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0) + ((float)j))));
+			if (j < i && j < cols && i < rows)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0)
+						+ ((float)j + 1.0) + 100)));
+			if (j > i && j < cols && i < rows)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0)
+						+ ((float)j + 1.0) + 120)));
+		}
+	}
+
+	return les;
+}
+
+/* ***************************************************************************** */
+/* create a non quadratic unsymmetric float matrix with values ** Hilbert matrix */
+/* ***************************************************************************** */
+G_math_f_les *create_normal_unsymmetric_f_nquad_les_A(int rows, int cols)
+{
+	G_math_f_les *les;
+	int i, j;
+
+	les = G_math_alloc_f_nquad_les_A(rows, cols, G_MATH_NORMAL_LES);
+	for (i = 0; i < rows; i++)
+	{
+		for (j = 0; j < cols; j++)
+		{
+			if (j == i)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0) + ((float)j))));
+			if (j < i&& j < cols && i < rows)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0)
+						+ ((float)j + 1.0) + 100)));
+			if (j > i&& j < cols && i < rows)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0)
+						+ ((float)j + 1.0) + 120)));
+		}
+	}
+
+	return les;
+}
+
+/* *************************************************************** */
+/* create a normal matrix with values ** Hilbert matrix ********** */
+/* *************************************************************** */
+G_math_f_les *create_normal_unsymmetric_f_les(int rows)
+{
+	G_math_f_les *les;
+	int i, j;
+	int size =rows;
+	float val;
+
+	les = G_math_alloc_f_les(rows, G_MATH_NORMAL_LES);
+	for (i = 0; i < size; i++)
+	{
+		val = 0.0;
+		for (j = 0; j < size; j++)
+		{
+			if (j == i)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0) + ((float)j))));
+			if (j < i)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0)
+						+ ((float)j + 1.0) + 100)));
+			if (j > i)
+				les->A[i][j] = (float)(1.0/((((float)i + 1.0)
+						+ ((float)j + 1.0) + 120)));
+
+			val += les->A[i][j];
+		}
+		les->b[i] = val;
+		les->x[i] = 0.5;
+	}
+
+	return les;
+}
+
+/* *************************************************************** */
+/* create a sparse matrix with values ** Hilbert matrix ********** */
+/* *************************************************************** */
+G_math_les *create_sparse_symmetric_les(int rows)
+{
+	G_math_les *les;
+	G_math_spvector *spvector;
+	int i, j;
+	double val;
+
+	les = G_math_alloc_les(rows, G_MATH_SPARSE_LES);
+
+	for (i = 0; i < rows; i++)
+	{
+		spvector = G_math_alloc_spvector(rows);
+		val = 0;
+
+		for (j = 0; j < rows; j++)
+		{
+			if (j == i)
+			{
+				spvector->values[j] = (double)(1.0/((((double)i + 1.0)
+						+ ((double)j + 1.0))));
+				spvector->index[j] = j;
+			}
+			else
+			{
+				spvector->values[j] = (double)(1.0/(((((double)i + 1.0)
+						+ ((double)j + 1.0)) + 100)));
+				spvector->index[j] = j;
+			}
+
+			val += spvector->values[j];
+		}
+
+		G_math_add_spvector_to_les(les, spvector, i);
+		les->b[i] = val;
+		les->x[i] = 0.5;
+	}
+
+	return les;
+}
+
+/* *************************************************************** */
+void fill_d_vector_range_1(double *x, double a, int rows)
+{
+	int i = 0;
+
+	for (i = 0; i < rows; i++)
+        {
+		x[i] = a*(double)i;
+        }
+
+}
+
+/* *************************************************************** */
+void fill_d_vector_range_2(double *x, double a, int rows)
+{
+	int i = 0, count = 0;
+
+	for (i = rows - 1; i >= 0; i--)
+	{
+		x[i] = a*(double)count;
+		count ++;
+	}
+
+}
+
+/* *************************************************************** */
+void fill_d_vector_scalar(double *x, double a, int rows)
+{
+	int i = 0;
+
+	for (i = 0; i < rows; i++)
+        {
+		x[i] = a;
+        }
+}
+
+/* *************************************************************** */
+void fill_f_vector_range_1(float *x, float a, int rows)
+{
+	int i = 0;
+
+	for (i = 0; i < rows; i++)
+        {
+		x[i] = a*(float)i;
+                //printf("%f ", x[i]);
+        }
+}
+
+/* *************************************************************** */
+void fill_f_vector_range_2(float *x, float a, int rows)
+{
+	int i = 0, count = 0;
+
+	for (i = rows - 1; i >= 0; i--)
+	{
+		x[i] = a*(float)count;
+                //printf("%f ", x[i]);
+		count ++;
+	}
+
+}
+
+/* *************************************************************** */
+void fill_f_vector_scalar(float *x, float a, int rows)
+{
+	int i = 0;
+
+	for (i = 0; i < rows; i++)
+        {
+		x[i] = a;
+                //printf("%f ", x[i]);
+        }
+}
+
+/* *************************************************************** */
+void fill_i_vector_range_1(int *x, int a, int rows)
+{
+	int i = 0;
+
+	for (i = 0; i < rows; i++)
+        {
+		x[i] = a*i;
+        }
+}
+
+/* *************************************************************** */
+void fill_i_vector_range_2(int *x, int a, int rows)
+{
+	int i = 0, count = 0;
+
+	for (i = rows - 1; i >= 0; i--)
+	{
+		x[i] = a*count;
+		count ++;
+	}
+
+}
+
+/* *************************************************************** */
+void fill_i_vector_scalar(int *x, int a, int rows)
+{
+	int i = 0;
+
+	for (i = 0; i < rows; i++)
+        {
+		x[i] = a;
+        }
+}
+

Added: grass/branches/develbranch_6/lib/gmath/test/test_tools_les.c
===================================================================
--- grass/branches/develbranch_6/lib/gmath/test/test_tools_les.c	                        (rev 0)
+++ grass/branches/develbranch_6/lib/gmath/test/test_tools_les.c	2009-08-27 20:57:08 UTC (rev 38890)
@@ -0,0 +1,479 @@
+/*****************************************************************************
+ *
+ * MODULE:       Grass Gmath Library
+ * AUTHOR(S):    Soeren Gebbert, Berlin (GER) Dec 2006
+ * 		soerengebbert <at> gmx <dot> de
+ *               
+ * PURPOSE:      functions to manage linear equation systems
+ * 		part of the gmath library
+ *               
+ * COPYRIGHT:    (C) 2000 by the GRASS Development Team
+ *
+ *               This program is free software under the GNU General Public
+ *               License (>=v2). Read the file COPYING that comes with GRASS
+ *               for details.
+ *
+ *****************************************************************************/
+
+#include "test_gmath_lib.h"
+#include <stdlib.h>
+#include <math.h>
+
+/*!
+ * \brief Allocate memory for a (not) quadratic linear equation system which includes the Matrix A, vector x and vector b
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param cols int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_nquad_les(int rows, int cols, int type)
+{
+	return G_math_alloc_les_param(rows, cols, type, 2);
+}
+
+/*!
+ * \brief Allocate memory for a (not) quadratic linear equation system which includes the Matrix A and vector x
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param cols int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_nquad_les_Ax(int rows, int cols, int type)
+{
+	return G_math_alloc_les_param(rows, cols, type, 1);
+}
+
+/*!
+ * \brief Allocate memory for a (not) quadratic linear equation system which includes the Matrix A
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param cols int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_nquad_les_A(int rows, int cols, int type)
+{
+	return G_math_alloc_les_param(rows, cols, type, 0);
+}
+
+/*!
+ * \brief Allocate memory for a (not) quadratic linear equation system which includes the Matrix A, vector x and vector b
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param cols int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_nquad_les_Ax_b(int rows, int cols, int type)
+{
+	return G_math_alloc_les_param(rows, cols, type, 2);
+}
+
+/*!
+ * \brief Allocate memory for a quadratic linear equation system which includes the Matrix A, vector x and vector b
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_les(int rows, int type)
+{
+	return G_math_alloc_les_param(rows, rows, type, 2);
+}
+
+/*!
+ * \brief Allocate memory for a quadratic linear equation system which includes the Matrix A and vector x
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_les_Ax(int rows, int type)
+{
+	return G_math_alloc_les_param(rows, rows, type, 1);
+}
+
+/*!
+ * \brief Allocate memory for a quadratic linear equation system which includes the Matrix A
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_les_A(int rows, int type)
+{
+	return G_math_alloc_les_param(rows, rows, type, 0);
+}
+
+/*!
+ * \brief Allocate memory for a quadratic linear equation system which includes the Matrix A, vector x and vector b
+ *
+ * This function calls #G_math_alloc_les_param
+ *
+ * \param rows int
+ * \param type int
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_les_Ax_b(int rows, int type)
+{
+	return G_math_alloc_les_param(rows, rows, type, 2);
+}
+
+/*!
+ * \brief Allocate memory for a quadratic or not quadratic linear equation system
+ *
+ * The type of the linear equation system must be G_MATH_NORMAL_LES for
+ * a regular quadratic matrix or G_MATH_SPARSE_LES for a sparse matrix
+ *
+ * <p>
+ * In case of G_MATH_NORMAL_LES
+ * 
+ * A quadratic matrix of size rows*rows*sizeof(double) will allocated
+ *
+ * <p>
+ * In case of G_MATH_SPARSE_LES
+ *
+ * a vector of size row will be allocated, ready to hold additional allocated sparse vectors.
+ * each sparse vector may have a different size.
+ *
+ * Parameter parts defines which parts of the les should be allocated.
+ * The number of columns and rows defines if the matrix is quadratic.
+ *
+ * \param rows int
+ * \param cols int
+ * \param type int
+ * \param parts int -- 2 = A, x and b; 1 = A and x; 0 = A allocated
+ * \return G_math_les *
+ *
+ * */
+G_math_les *G_math_alloc_les_param(int rows, int cols, int type, int parts)
+{
+	G_math_les *les;
+
+	if (type == G_MATH_SPARSE_LES)
+		G_debug(
+				2,
+				"Allocate memory for a sparse linear equation system with %i rows\n",
+				rows);
+	else
+		G_debug(
+				2,
+				"Allocate memory for a regular linear equation system with %i rows and %i cols\n",
+				rows, cols);
+
+	les = (G_math_les *) G_calloc(1, sizeof(G_math_les));
+	les->x = NULL;
+	les->b = NULL;
+
+	if (parts > 0)
+	{
+		les->x = (double *)G_calloc(cols, sizeof(double));
+	}
+
+	if (parts > 1)
+	{
+		les->b = (double *)G_calloc(cols, sizeof(double));
+	}
+
+	les->A = NULL;
+	les->data = NULL;
+	les->Asp = NULL;
+	les->rows = rows;
+	les->cols = cols;
+	les->symm = 0;
+	les->bandwith = cols;
+	if (rows == cols)
+		les->quad = 1;
+	else
+		les->quad = 0;
+
+	if (type == G_MATH_SPARSE_LES)
+	{
+		les->Asp = (G_math_spvector **) G_calloc(rows,
+				sizeof(G_math_spvector *));
+		les->type = G_MATH_SPARSE_LES;
+	}
+	else
+	{
+		les->A = G_alloc_matrix(rows, cols);
+		/*save the start pointer of the matrix*/
+		les->data = les->A[0];
+		les->type = G_MATH_NORMAL_LES;
+	}
+
+	return les;
+}
+
+/***************** Floating point version ************************/
+
+G_math_f_les *G_math_alloc_f_les(int rows, int type)
+{
+	return G_math_alloc_f_les_param(rows, rows, type, 2);
+}
+
+G_math_f_les *G_math_alloc_f_nquad_les_A(int rows, int cols, int type)
+{
+	return G_math_alloc_f_les_param(rows, cols, type, 0);
+}
+
+G_math_f_les *G_math_alloc_f_les_param(int rows, int cols, int type, int parts)
+{
+	G_math_f_les *les;
+
+	G_debug(
+			2,
+			"Allocate memory for a regular float linear equation system with %i rows\n",
+			rows);
+
+	les = (G_math_f_les *) G_calloc(1, sizeof(G_math_f_les));
+	les->x = NULL;
+	les->b = NULL;
+
+	if (parts > 0)
+	{
+		les->x = (float *)G_calloc(cols, sizeof(float));
+	}
+
+	if (parts > 1)
+	{
+		les->b = (float *)G_calloc(cols, sizeof(float));
+	}
+
+	les->A = NULL;
+	les->data = NULL;
+	les->rows = rows;
+	les->cols = cols;
+	les->symm = 0;
+	les->bandwith = cols;
+	if (rows == cols)
+		les->quad = 1;
+	else
+		les->quad = 0;
+
+	les->A = G_alloc_fmatrix(rows, cols);
+	/*save the start pointer of the matrix*/
+	les->data = les->A[0];
+	les->type = G_MATH_NORMAL_LES;
+
+	return les;
+}
+
+/*!
+ * \brief Adds a sparse vector to a sparse linear equation system at position row
+ *
+ * Return 1 for success and -1 for failure
+ *
+ * \param les G_math_les *
+ * \param spvector G_math_spvector * 
+ * \param row int
+ * \return int 0 success, -1 failure
+ *
+ * */
+int G_math_add_spvector_to_les(G_math_les * les, G_math_spvector * spvector,
+		int row)
+{
+
+	if (les != NULL)
+	{
+		if (les->type != G_MATH_SPARSE_LES)
+			return -1;
+
+		if (les->rows > row)
+		{
+			G_debug(
+					5,
+					"Add sparse vector %p to the sparse linear equation system at row %i\n",
+					spvector, row);
+			les->Asp[row] = spvector;
+		}
+		else
+			return -1;
+
+	}
+	else
+	{
+		return -1;
+	}
+
+	return 1;
+}
+
+/*!
+ *
+ * \brief prints the linear equation system to stdout
+ *
+ * <p>
+ * Format:
+ * A*x = b
+ *
+ * <p>
+ * Example
+ \verbatim
+ 
+ 2 1 1 1 * 2 = 0.1
+ 1 2 0 0 * 3 = 0.2
+ 1 0 2 0 * 3 = 0.2
+ 1 0 0 2 * 2 = 0.1
+ 
+ \endverbatim
+ *
+ * \param les G_math_les * 
+ * \return void
+ *  
+ * */
+void G_math_print_les(G_math_les * les)
+{
+	int i, j, k, out;
+
+        if (les->type == G_MATH_SPARSE_LES)
+	{
+		for (i = 0; i < les->rows; i++)
+		{
+			for (j = 0; j < les->cols; j++)
+			{
+				out = 0;
+				for (k = 0; k < les->Asp[i]->cols; k++)
+				{
+					if (les->Asp[i]->index[k] == j)
+					{
+						fprintf(stdout, "%4.5f ", les->Asp[i]->values[k]);
+						out = 1;
+					}
+				}
+				if (!out)
+					fprintf(stdout, "%4.5f ", 0.0);
+			}
+			if (les->x)
+				fprintf(stdout, "  *  %4.5f", les->x[i]);
+			if (les->b)
+				fprintf(stdout, " =  %4.5f ", les->b[i]);
+
+			fprintf(stdout, "\n");
+		}
+	}
+	else
+	{
+
+		for (i = 0; i < les->rows; i++)
+		{
+			for (j = 0; j < les->cols; j++)
+			{
+				fprintf(stdout, "%4.5f ", les->A[i][j]);
+			}
+			if (les->x)
+				fprintf(stdout, "  *  %4.5f", les->x[i]);
+			if (les->b)
+				fprintf(stdout, " =  %4.5f ", les->b[i]);
+
+			fprintf(stdout, "\n");
+		}
+
+	}
+	return;
+}
+
+/*!
+ * \brief Release the memory of the linear equation system
+ *
+ * \param les G_math_les *            
+ * \return void
+ *
+ * */
+
+void G_math_free_les(G_math_les * les)
+{
+	int i;
+
+	if (les->type == G_MATH_SPARSE_LES)
+		G_debug(2, "Releasing memory of a sparse linear equation system\n");
+	else
+		G_debug(2, "Releasing memory of a regular linear equation system\n");
+
+	if (les)
+	{
+
+		if (les->x)
+			G_free(les->x);
+		if (les->b)
+			G_free(les->b);
+
+		if (les->type == G_MATH_SPARSE_LES)
+		{
+
+			if (les->Asp)
+			{
+				for (i = 0; i < les->rows; i++)
+					if (les->Asp[i])
+						G_math_free_spvector(les->Asp[i]);
+
+				G_free(les->Asp);
+			}
+		}
+		else
+		{
+			/*We dont know if the rows have been changed by pivoting, 
+			 * so we restore the data pointer*/
+			les->A[0] = les->data;
+			G_free_matrix(les->A);
+		}
+
+		free(les);
+	}
+
+	return;
+}
+
+/*!
+ * \brief Release the memory of the float linear equation system
+ *
+ * \param les G_math_f_les *            
+ * \return void
+ *
+ * */
+
+void G_math_free_f_les(G_math_f_les * les)
+{
+	G_debug(2, "Releasing memory of a regular float linear equation system\n");
+
+	if (les)
+	{
+
+		if (les->x)
+			G_free(les->x);
+		if (les->b)
+			G_free(les->b);
+
+		/*We dont know if the rows have been changed by pivoting, 
+		 * so we restore the data pointer*/
+		les->A[0] = les->data;
+		G_free_fmatrix(les->A);
+
+		free(les);
+	}
+
+	return;
+}



More information about the grass-commit mailing list