[GRASS-SVN] r51056 - in grass-addons/grass7/imagery: . i.vi.mpi

svn_grass at osgeo.org svn_grass at osgeo.org
Wed Mar 14 06:48:51 EDT 2012


Author: ychemin
Date: 2012-03-14 03:48:51 -0700 (Wed, 14 Mar 2012)
New Revision: 51056

Added:
   grass-addons/grass7/imagery/i.vi.mpi/
   grass-addons/grass7/imagery/i.vi.mpi/Makefile
   grass-addons/grass7/imagery/i.vi.mpi/i.vi.mpi.html
   grass-addons/grass7/imagery/i.vi.mpi/main.c
   grass-addons/grass7/imagery/i.vi.mpi/mpi.h
   grass-addons/grass7/imagery/i.vi.mpi/run.sh
Log:
Replacement of version for grass6, tested with ndvi mode

Added: grass-addons/grass7/imagery/i.vi.mpi/Makefile
===================================================================
--- grass-addons/grass7/imagery/i.vi.mpi/Makefile	                        (rev 0)
+++ grass-addons/grass7/imagery/i.vi.mpi/Makefile	2012-03-14 10:48:51 UTC (rev 51056)
@@ -0,0 +1,12 @@
+MODULE_TOPDIR = ../..
+
+
+PGM = i.vi.mpi
+
+LIBES = $(RASTERLIB) $(GISLIB) $(MATHLIB)
+DEPENDENCIES = $(RASTERDEP) $(GISDEP) 
+
+include $(MODULE_TOPDIR)/include/Make/Module.make
+
+default: cmd
+CC = mpicc 

Added: grass-addons/grass7/imagery/i.vi.mpi/i.vi.mpi.html
===================================================================
--- grass-addons/grass7/imagery/i.vi.mpi/i.vi.mpi.html	                        (rev 0)
+++ grass-addons/grass7/imagery/i.vi.mpi/i.vi.mpi.html	2012-03-14 10:48:51 UTC (rev 51056)
@@ -0,0 +1,62 @@
+<h2>DESCRIPTION</h2>
+
+<em>i.vi</em> calculates vegetation indices based on biophysical parameters. 
+
+
+	  <ul>
+		<li>RVI: ratio vegetation index:</li>
+		<li>NDVI: Normalized Difference Vegetation Index</li>
+		<li>IPVI: Infrared Percentage Vegetation Index</li>
+		<li>DVI: Difference Vegetation Index</li>
+		<li>PVI: Perpendicular Vegetation Index</li>
+		<li>WDVI: Weighted Difference Vegetation Index</li>
+		<li>SAVI: Soil Adjusted Vegetation Index</li>
+		<li>GARI: Green atmospherically resistant vegetation index</li>
+		<li>MSAVI: Modified Soil Adjusted Vegetation Index</li>
+		<li>MSAVI2: second Modified Soil Adjusted Vegetation Index</li>
+		<li>GEMI: Global Environmental Monitoring Index</li>
+		<li>ARVI: atmospherically resistant vegetation indices</li>
+		<li>GVI: Green Vegetation Index</li>
+	  </ul>
+
+<h2>Description</h2>
+
+NDVI
+	  <ul> 
+	      <li>Data Type Band Numbers ([IR, Red])  </li>
+	      <li>TM Bands= [4,3] </li>
+	      <li>MSS Bands = [7, 5] </li>
+	      <li>AVHRR Bands = [2, 1] </li>
+	      <li>SPOT XS Bands = [3, 2] </li>
+	      <li>AVIRIS Bands = [51, 29] </li>
+	      <li> example: (AVHRR) NDVI = (channel 2 - channel 1) / (channel 2 + channel 1)</li>
+	  </ul>
+
+<h2>NOTES</h2>
+Originally from kepler.gps.caltech.edu <br>
+A FAQ on Vegetation in Remote Sensing  <br>
+Written by Terrill W. Ray <br>
+	   Div. of Geological and Planetary Sciences<br>
+	   California Institute of Technology<br>
+email: terrill at mars1.gps.caltech.edu<br>
+Snail Mail:  Terrill Ray<br>
+	     Division of Geological and Planetary Sciences<br>
+	     Caltech<br>
+	     Mail Code 170-25<br>
+	     Pasadena, CA  91125<br>
+
+<h2>TODO</h2>
+
+
+<h2>SEE ALSO</h2>
+
+<em>
+<a href="i.albedo.html">i.albedo</a><br>
+</em>
+
+
+<h2>AUTHORS</h2>
+GRASS Development Team.
+
+<p>
+<i>Last changed: $Date: 2012-03-14 </i>

Added: grass-addons/grass7/imagery/i.vi.mpi/main.c
===================================================================
--- grass-addons/grass7/imagery/i.vi.mpi/main.c	                        (rev 0)
+++ grass-addons/grass7/imagery/i.vi.mpi/main.c	2012-03-14 10:48:51 UTC (rev 51056)
@@ -0,0 +1,941 @@
+
+/****************************************************************************
+ *
+ * MODULE:     i.vi.mpi
+ * AUTHOR(S):  Shamim Akhter shamimakhter at gmail.com
+		 Baburao Kamble baburaokamble at gmail.com
+ *		 Yann Chemin - ychemin at gmail.com
+ * PURPOSE:    Calculates 13 vegetation indices 
+ * 		 based on biophysical parameters. 
+ *
+ * COPYRIGHT:  (C) 2006 by the Tokyo Institute of Technology, Japan
+ * 	       (C) 2002-2006 by the GRASS Development Team
+ *
+ *             This program is free software under the GNU General Public
+ *   	    	 License (>=v2). Read the file COPYING that comes with GRASS
+ *   	    	 for details.
+ * 
+ * Remark:              
+ *		 These are generic indices that use red and nir for most of them. 
+ *             Those can be any use by standard satellite having V and IR.
+ *		 However arvi uses red, nir and blue; 
+ *		 GVI uses B,G,R,NIR, chan5 and chan 7 of landsat;
+ *		 and GARI uses B,G,R and NIR.      
+ *
+ *****************************************************************************/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include "grass/gis.h"
+#include "grass/raster.h"
+#include "grass/glocale.h"
+#include "mpi.h"
+
+int main(int argc, char *argv[])
+{
+    int me, host_n, nrows, ncols;
+    int NUM_HOSTS;
+    MPI_Status status;
+    MPI_Init(&argc, &argv);
+    MPI_Comm_size(MPI_COMM_WORLD, &NUM_HOSTS);
+    MPI_Comm_rank(MPI_COMM_WORLD, &me);
+
+    if (!me) {
+	struct Cell_head cellhd;	/*region+header info */
+	char *mapset;		/*mapset name */
+	int row, col, row_n;
+	char *viflag;		/*Switch for particular index */
+	struct GModule *module;
+	struct Option *input1, *input2, *input3, *input4, *input5, *input6,
+	    *input7, *input8, *output;
+	struct Flag *flag1;
+	struct History history;	/*metadata */
+	struct Colors colors;	/*colors */
+	char *name;		/*input raster name */
+	char *result;		/*output raster name */
+	/*File Descriptors */
+	int infd_redchan, infd_nirchan, infd_greenchan, infd_bluechan,
+	    infd_chan5chan, infd_chan7chan;
+	int outfd;
+	char *bluechan, *greenchan, *redchan, *nirchan, *chan5chan,
+	    *chan7chan;
+	int i = 0, j = 0, temp;
+	void *inrast_redchan, *inrast_nirchan, *inrast_greenchan,
+	    *inrast_bluechan, *inrast_chan5chan, *inrast_chan7chan;
+	DCELL *outrast;
+	RASTER_MAP_TYPE data_type_output = DCELL_TYPE;
+	RASTER_MAP_TYPE data_type_redchan;
+	RASTER_MAP_TYPE data_type_nirchan;
+	RASTER_MAP_TYPE data_type_greenchan;
+	RASTER_MAP_TYPE data_type_bluechan;
+	RASTER_MAP_TYPE data_type_chan5chan;
+	RASTER_MAP_TYPE data_type_chan7chan;
+	CELL val1, val2;
+	/************************************/
+	G_gisinit(argv[0]);
+
+	module = G_define_module();
+	G_add_keyword(_("vegetation index"));
+	G_add_keyword(_("biophysical parameters"));
+	module->label = _("Calculates different types of vegetation indices (mpi)");
+	module->description = _("13 types of vegetation indices from red and nir,"
+				"and only some requiring additional bands");
+
+	/* Define the different options */
+	input1 = G_define_option();
+	input1->key = _("viname");
+	input1->type = TYPE_STRING;
+	input1->required = YES;
+	input1->gisprompt = _("Name of VI");
+	input1->description =_("Name of VI");
+	input1->descriptions =_("dvi;Difference Vegetation Index;"
+			    "evi;Enhanced Vegetation Index;"
+			    "gvi;Green Vegetation Index;"
+			    "gari;Green atmospherically resistant vegetation index;"
+			    "gemi;Global Environmental Monitoring Index;"
+			    "ipvi;Infrared Percentage Vegetation Index;"
+			    "msavi;Modified Soil Adjusted Vegetation Index;"
+			    "msavi2;second Modified Soil Adjusted Vegetation Index;"
+			    "ndvi;Normalized Difference Vegetation Index;"
+			    "pvi;Perpendicular Vegetation Index;"
+			    "savi;Soil Adjusted Vegetation Index;"
+                            "sr;Simple Ratio;"
+                            "vari;Visible Atmospherically Resistant Index;"
+			    "wdvi;Weighted Difference Vegetation Index;");
+	input1->options = "dvi,evi,gvi,gari,gemi,ipvi,msavi,msavi2,ndvi,pvi,savi,sr,vari,wdvi";
+	input1->answer = "ndvi";
+
+	input2 = G_define_standard_option(G_OPT_R_INPUT);
+	input2->key = "red";
+	input2->label =
+	    _("Name of the RED Channel surface reflectance map [0.0;1.0]");
+	input2->description = ("Range: [0.0;1.0]");
+
+	input3 = G_define_standard_option(G_OPT_R_INPUT);
+	input3->key = "nir";
+	input3->label =
+	    _("Name of the NIR Channel surface reflectance map [0.0;1.0]");
+	input3->description = ("Range: [0.0;1.0]");
+
+	input4 = G_define_standard_option(G_OPT_R_INPUT);
+	input4->key = "green";
+	input4->required = NO;
+	input4->label =
+	    _("Name of the GREEN Channel surface reflectance map [0.0;1.0]");
+	input4->description = ("Range: [0.0;1.0]");
+
+	input5 = G_define_standard_option(G_OPT_R_INPUT);
+	input5->key = "blue";
+	input5->required = NO;
+	input5->label =
+	    _("Name of the BLUE Channel surface reflectance map [0.0;1.0]");
+	input5->description = ("Range: [0.0;1.0]");
+
+	input6 = G_define_standard_option(G_OPT_R_INPUT);
+	input6->key = "chan5";
+	input6->required = NO;
+	input6->label =
+	    _("Name of the CHAN5 Channel surface reflectance map [0.0;1.0]");
+	input6->description = ("Range: [0.0;1.0]");
+
+	input7 = G_define_standard_option(G_OPT_R_INPUT);
+	input7->key = "chan7";
+	input7->required = NO;
+	input7->label =
+	    _("Name of the CHAN7 Channel surface reflectance map [0.0;1.0]");
+	input7->description = ("Range: [0.0;1.0]");
+
+	input8 = G_define_option();
+	input8->key = "tmp";
+	input8->type = TYPE_INTEGER;
+	input8->required = NO;
+	input8->gisprompt = _("no of operation value");
+	input8->label = _("User input for number of operation");
+
+	output = G_define_standard_option(G_OPT_R_OUTPUT);
+	output->label = _("Name of the output vi layer");
+
+		/********************/
+	if (G_parser(argc, argv))
+	    exit(EXIT_FAILURE);
+	viflag = input1->answer;
+	redchan = input2->answer;
+	nirchan = input3->answer;
+	greenchan = input4->answer;
+	bluechan = input5->answer;
+	chan5chan = input6->answer;
+	chan7chan = input7->answer;
+	temp = atoi(input8->answer);
+
+	result = output->answer;
+
+	if (!strcasecmp(viflag, "sr") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("sr index requires red and nir maps"));
+	if (!strcasecmp(viflag, "ndvi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("ndvi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "ipvi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("ipvi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "dvi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("dvi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "pvi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("pvi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "wdvi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("wdvi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "savi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("savi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "msavi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("msavi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "msavi2") && (!(input2->answer) || !(input3->answer)
+		||!(input8->answer)) )
+		G_fatal_error(_("msavi2 index requires red and nir maps, and 3 parameters related to soil line"));
+	if (!strcasecmp(viflag, "gemi") && (!(input2->answer) || !(input3->answer)) )
+		G_fatal_error(_("gemi index requires red and nir maps"));
+	if (!strcasecmp(viflag, "arvi") && (!(input2->answer) || !(input3->answer)
+                || !(input5->answer)) )
+		G_fatal_error(_("arvi index requires blue, red and nir maps"));
+	if (!strcasecmp(viflag, "evi") && (!(input2->answer) || !(input3->answer)
+                || !(input5->answer)) )
+		G_fatal_error(_("evi index requires blue, red and nir maps"));
+	if (!strcasecmp(viflag, "vari") && (!(input2->answer) || !(input4->answer)
+                || !(input5->answer)) )
+		G_fatal_error(_("vari index requires blue, green and red maps"));
+	if (!strcasecmp(viflag, "gari") && (!(input2->answer) || !(input3->answer)
+                || !(input4->answer) || !(input5->answer)) )
+		G_fatal_error(_("gari index requires blue, green, red and nir maps"));
+	if (!strcasecmp(viflag, "gvi") && (!(input2->answer) || !(input3->answer)
+                || !(input4->answer) || !(input5->answer)
+                || !(input6->answer) || !(input7->answer)) )
+		G_fatal_error(_("gvi index requires blue, green, red, nir, chan5 and chan7 maps"));
+	/***************************************************/
+	infd_redchan = Rast_open_old(redchan, "");
+	data_type_redchan = Rast_map_type(redchan, "");
+	inrast_redchan = Rast_allocate_buf(data_type_redchan);
+        infd_nirchan = Rast_open_old(nirchan, "");
+        data_type_nirchan = Rast_map_type(nirchan, "");
+        inrast_nirchan = Rast_allocate_buf(data_type_nirchan);
+	if (greenchan) {
+        	infd_greenchan = Rast_open_old(greenchan, "");
+        	data_type_greenchan = Rast_map_type(greenchan, "");
+        	inrast_greenchan = Rast_allocate_buf(data_type_greenchan);
+    	}
+	if (bluechan) {
+	        infd_bluechan = Rast_open_old(bluechan, "");
+        	data_type_bluechan = Rast_map_type(bluechan, "");
+        	inrast_bluechan = Rast_allocate_buf(data_type_bluechan);
+    	}
+	if (chan5chan) {
+        	infd_chan5chan = Rast_open_old(chan5chan, "");
+        	data_type_chan5chan = Rast_map_type(chan5chan, "");
+        	inrast_chan5chan = Rast_allocate_buf(data_type_chan5chan);
+    	}
+	if (chan7chan) {
+        	infd_chan7chan = Rast_open_old(chan7chan, "");
+      	 	data_type_chan7chan = Rast_map_type(chan7chan, "");
+        	inrast_chan7chan = Rast_allocate_buf(data_type_chan7chan);
+    	}
+	nrows = Rast_window_rows();
+    	ncols = Rast_window_cols();
+	/* Create New raster files */
+    	outfd = Rast_open_new(result, DCELL_TYPE);
+    	outrast = Rast_allocate_d_buf();
+	/***************************************************/
+	double db[6][ncols], R[ncols + 1], outputImage[NUM_HOSTS][ncols];
+	int I[ncols + 1];
+	host_n = 1;
+	G_message("tmp=%d", temp);
+	for (i = 1; i < NUM_HOSTS; i++) {
+	    MPI_Send(&temp, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
+	    MPI_Send(&nrows, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
+	    MPI_Send(&ncols, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
+	}
+	/* Process pixels */
+	int k, r, nh, cn;
+	for (r = 1; r * (NUM_HOSTS - 1) <= nrows; r++) {
+	    for (k = 1; k < NUM_HOSTS; k++) {
+		row = (r - 1) * (NUM_HOSTS - 1) + k - 1;
+		DCELL d_bluechan;
+		DCELL d_greenchan;
+		DCELL d_redchan;
+		DCELL d_nirchan;
+		DCELL d_chan5chan;
+		DCELL d_chan7chan;
+		G_percent(row, nrows, 2);
+		/* read input maps */
+		Rast_get_row(infd_redchan,inrast_redchan,row,data_type_redchan);
+	    	Rast_get_row(infd_nirchan,inrast_nirchan,row,data_type_nirchan);
+		if (bluechan) {
+	    	Rast_get_row(infd_bluechan,inrast_bluechan,row,data_type_bluechan);
+		}
+		if (greenchan) {
+	    	Rast_get_row(infd_greenchan,inrast_greenchan,row,data_type_greenchan);
+		}
+		if (chan5chan) {
+	    	Rast_get_row(infd_chan5chan,inrast_chan5chan,row,data_type_chan5chan);
+		}
+		if (chan7chan) {
+	    	Rast_get_row(infd_chan7chan,inrast_chan7chan,row,data_type_chan7chan);
+		}
+		/*process the data */
+		for (col = 0; col < ncols; col++) {
+		    switch (data_type_redchan) {
+		    case CELL_TYPE:
+			d_redchan = (double)((CELL *) inrast_redchan)[col];
+			break;
+		    case FCELL_TYPE:
+			d_redchan = (double)((FCELL *) inrast_redchan)[col];
+			break;
+		    case DCELL_TYPE:
+			d_redchan = ((DCELL *) inrast_redchan)[col];
+			break;
+		    }
+		    switch (data_type_nirchan) {
+		    case CELL_TYPE:
+			d_nirchan = (double)((CELL *) inrast_nirchan)[col];
+			break;
+		    case FCELL_TYPE:
+			d_nirchan = (double)((FCELL *) inrast_nirchan)[col];
+			break;
+		    case DCELL_TYPE:
+			d_nirchan = ((DCELL *) inrast_nirchan)[col];
+			break;
+		    }
+		    if (greenchan) {
+			switch (data_type_greenchan) {
+			case CELL_TYPE:
+			    d_greenchan =
+				(double)((CELL *) inrast_greenchan)[col];
+			    break;
+			case FCELL_TYPE:
+			    d_greenchan =
+				(double)((FCELL *) inrast_greenchan)[col];
+			    break;
+			case DCELL_TYPE:
+			    d_greenchan = ((DCELL *) inrast_greenchan)[col];
+			    break;
+			}
+		    }
+		    if (bluechan) {
+			switch (data_type_bluechan) {
+			case CELL_TYPE:
+			    d_bluechan =
+				(double)((CELL *) inrast_bluechan)[col];
+			    break;
+			case FCELL_TYPE:
+			    d_bluechan =
+				(double)((FCELL *) inrast_bluechan)[col];
+			    break;
+			case DCELL_TYPE:
+			    d_bluechan = ((DCELL *) inrast_bluechan)[col];
+			    break;
+			}
+		    }
+		    if (chan5chan) {
+			switch (data_type_chan5chan) {
+			case CELL_TYPE:
+			    d_chan5chan =
+				(double)((CELL *) inrast_chan5chan)[col];
+			    break;
+			case FCELL_TYPE:
+			    d_chan5chan =
+				(double)((FCELL *) inrast_chan5chan)[col];
+			    break;
+			case DCELL_TYPE:
+			    d_chan5chan = ((DCELL *) inrast_chan5chan)[col];
+			    break;
+			}
+		    }
+		    if (chan7chan) {
+			switch (data_type_chan7chan) {
+			case CELL_TYPE:
+			    d_chan7chan =
+				(double)((CELL *) inrast_chan7chan)[col];
+			    break;
+			case FCELL_TYPE:
+			    d_chan7chan =
+				(double)((FCELL *) inrast_chan7chan)[col];
+			    break;
+			case DCELL_TYPE:
+			    d_chan7chan = ((DCELL *) inrast_chan7chan)[col];
+			    break;
+			}
+		    }
+
+		    db[0][col] = d_redchan;
+		    db[1][col] = d_nirchan;
+		    db[2][col] = d_greenchan;
+		    db[3][col] = d_bluechan;
+		    db[4][col] = d_chan5chan;
+		    db[5][col] = d_chan7chan;
+		    if (Rast_is_d_null_value(&d_redchan)) {
+			i = 0;
+		    }
+		    else if (Rast_is_d_null_value(&d_nirchan)) {
+			i = 0;
+		    }
+		    else if ((greenchan) && Rast_is_d_null_value(&d_greenchan)) {
+			i = 0;
+		    }
+		    else if ((bluechan) && Rast_is_d_null_value(&d_bluechan)) {
+			i = 0;
+		    }
+		    else if ((chan5chan) && Rast_is_d_null_value(&d_chan5chan)) {
+			i = 0;
+		    }
+		    else if ((chan7chan) && Rast_is_d_null_value(&d_chan7chan)) {
+			i = 0;
+		    }
+		    else {
+			/************************************/
+			/*calculate simple_ratio        */
+			if (!strcasecmp(viflag, "sr")) {
+			    i = 1;
+			}
+			/*calculate ndvi                    */
+			if (!strcasecmp(viflag, "ndvi")) {
+			    i = 2;
+			}
+			/*calculate ipvi                    */
+			if (!strcasecmp(viflag, "ipvi")) {
+			    i = 3;
+			}
+			/*calculate dvi             */
+			if (!strcasecmp(viflag, "dvi")) {
+			    i = 4;
+			}
+			/*calculate pvi             */
+			if (!strcasecmp(viflag, "pvi")) {
+			    i = 5;
+			}
+			/*calculate wdvi                    */
+			if (!strcasecmp(viflag, "wdvi")) {
+			    i = 6;
+			}
+			/*calculate savi                    */
+			if (!strcasecmp(viflag, "savi")) {
+			    i = 7;
+			}
+			/*calculate msavi                   */
+			if (!strcasecmp(viflag, "msavi")) {
+			    i = 8;
+			}
+			/*calculate msavi2            */
+			if (!strcasecmp(viflag, "msavi2")) {
+			    i = 9;
+			}
+			/*calculate gemi                    */
+			if (!strcasecmp(viflag, "gemi")) {
+			    i = 10;
+			}
+			/*calculate arvi                    */
+			if (!strcasecmp(viflag, "arvi")) {
+			    i = 11;
+			}
+			/*calculate gvi            */
+			if (!strcasecmp(viflag, "gvi")) {
+			    i = 12;
+			}
+			/*calculate gari                    */
+			if (!strcasecmp(viflag, "gari")) {
+			    i = 13;
+			}
+			I[col] = i;
+
+		    }	/*else */
+
+		}	/*col */
+		/*G_message("Row data was generated"); */
+		row_n = k - 1;
+		I[ncols] = row_n;
+		/*MPI_Send(&row_n,1,MPI_INT,k,1,MPI_COMM_WORLD); */
+		MPI_Send(I, ncols + 1, MPI_INT, k, 1, MPI_COMM_WORLD);
+		MPI_Send(db, 6 * ncols, MPI_DOUBLE, k, 1, MPI_COMM_WORLD);
+	    }	/*k */
+	    for (k = 1; k < NUM_HOSTS; k++) {
+		/*MPI_Recv(&row_n,1,MPI_INT,k,1,MPI_COMM_WORLD,&status); */
+		MPI_Recv(R, ncols + 1, MPI_DOUBLE, k, 1, MPI_COMM_WORLD,
+			 &status);
+		row_n = R[ncols];
+		for (cn = 0; cn < ncols; cn++)
+		    outputImage[row_n][cn] = R[cn];
+	    }
+
+	    for (k = 0; k < (NUM_HOSTS - 1); k++) {
+		for (j = 0; j < ncols; j++)
+		    outrast[j] = outputImage[k][j];
+		Rast_put_d_row(outfd, outrast);
+	    }
+	}/*r */
+	k = 1;
+	int lm = 0;
+
+	for (r = row + 1; r < nrows; r++) {
+	    /* printf("row %d, node %d\n",r,k); */
+	    DCELL d_bluechan;
+	    DCELL d_greenchan;
+	    DCELL d_redchan;
+	    DCELL d_nirchan;
+	    DCELL d_chan5chan;
+	    DCELL d_chan7chan;
+	    G_percent(row, nrows, 2);
+
+	    /* read input maps */
+	    Rast_get_row(infd_redchan,inrast_redchan,row,data_type_redchan);
+	    Rast_get_row(infd_nirchan,inrast_nirchan,row,data_type_nirchan);
+	    if (bluechan) {
+	    Rast_get_row(infd_bluechan,inrast_bluechan,row,data_type_bluechan);
+	    }
+	    if (greenchan) {
+	    Rast_get_row(infd_greenchan,inrast_greenchan,row,data_type_greenchan);
+	    }
+	    if (chan5chan) {
+	    Rast_get_row(infd_chan5chan,inrast_chan5chan,row,data_type_chan5chan);
+	    }
+	    if (chan7chan) {
+	    Rast_get_row(infd_chan7chan,inrast_chan7chan,row,data_type_chan7chan);
+	    }
+
+	    /*process the data */
+
+	    for (col = 0; col < ncols; col++) {
+		switch (data_type_redchan) {
+		case CELL_TYPE:
+		    d_redchan = (double)((CELL *) inrast_redchan)[col];
+		    break;
+		case FCELL_TYPE:
+		    d_redchan = (double)((FCELL *) inrast_redchan)[col];
+		    break;
+		case DCELL_TYPE:
+		    d_redchan = ((DCELL *) inrast_redchan)[col];
+		    break;
+		}
+		switch (data_type_nirchan) {
+		case CELL_TYPE:
+		    d_nirchan = (double)((CELL *) inrast_nirchan)[col];
+		    break;
+		case FCELL_TYPE:
+		    d_nirchan = (double)((FCELL *) inrast_nirchan)[col];
+		    break;
+		case DCELL_TYPE:
+		    d_nirchan = ((DCELL *) inrast_nirchan)[col];
+		    break;
+		}
+		if (greenchan) {
+		    switch (data_type_greenchan) {
+		    case CELL_TYPE:
+			d_greenchan =
+			    (double)((CELL *) inrast_greenchan)[col];
+			break;
+		    case FCELL_TYPE:
+			d_greenchan =
+			    (double)((FCELL *) inrast_greenchan)[col];
+			break;
+		    case DCELL_TYPE:
+			d_greenchan = ((DCELL *) inrast_greenchan)[col];
+			break;
+		    }
+		}
+		if (bluechan) {
+		    switch (data_type_bluechan) {
+		    case CELL_TYPE:
+			d_bluechan = (double)((CELL *) inrast_bluechan)[col];
+			break;
+		    case FCELL_TYPE:
+			d_bluechan = (double)((FCELL *) inrast_bluechan)[col];
+			break;
+		    case DCELL_TYPE:
+			d_bluechan = ((DCELL *) inrast_bluechan)[col];
+			break;
+		    }
+		}
+		if (chan5chan) {
+
+		    switch (data_type_chan5chan) {
+		    case CELL_TYPE:
+			d_chan5chan =
+			    (double)((CELL *) inrast_chan5chan)[col];
+			break;
+		    case FCELL_TYPE:
+			d_chan5chan =
+			    (double)((FCELL *) inrast_chan5chan)[col];
+			break;
+		    case DCELL_TYPE:
+			d_chan5chan = ((DCELL *) inrast_chan5chan)[col];
+			break;
+		    }
+		}
+		if (chan7chan) {
+		    switch (data_type_chan7chan) {
+		    case CELL_TYPE:
+			d_chan7chan =
+			    (double)((CELL *) inrast_chan7chan)[col];
+			break;
+		    case FCELL_TYPE:
+			d_chan7chan =
+			    (double)((FCELL *) inrast_chan7chan)[col];
+			break;
+		    case DCELL_TYPE:
+			d_chan7chan = ((DCELL *) inrast_chan7chan)[col];
+			break;
+		    }
+		}
+
+		db[0][col] = d_redchan;
+		db[1][col] = d_nirchan;
+		db[2][col] = d_greenchan;
+		db[3][col] = d_bluechan;
+		db[4][col] = d_chan5chan;
+		db[5][col] = d_chan7chan;
+
+		if (Rast_is_d_null_value(&d_redchan)) {
+		    i = 0;
+		}
+		else if (Rast_is_d_null_value(&d_nirchan)) {
+		    i = 0;
+		}
+		else if ((greenchan) && Rast_is_d_null_value(&d_greenchan)) {
+		    i = 0;
+		}
+		else if ((bluechan) && Rast_is_d_null_value(&d_bluechan)) {
+		    i = 0;
+		}
+		else if ((chan5chan) && Rast_is_d_null_value(&d_chan5chan)) {
+		    i = 0;
+		}
+		else if ((chan7chan) && Rast_is_d_null_value(&d_chan7chan)) {
+		    i = 0;
+		}
+		else {
+		    /************************************/
+		    /*calculate simple_ratio        */
+		    if (!strcasecmp(viflag, "sr")) {
+			i = 1;
+		    }
+		    /*calculate ndvi                    */
+		    if (!strcasecmp(viflag, "ndvi")) {
+			i = 2;
+		    }
+		    /*calculate ipvi                    */
+		    if (!strcasecmp(viflag, "ipvi")) {
+			i = 3;
+		    }
+		    /*calculate dvi             */
+		    if (!strcasecmp(viflag, "dvi")) {
+			i = 4;
+		    }
+		    /*calculate pvi             */
+		    if (!strcasecmp(viflag, "pvi")) {
+			i = 5;
+		    }
+		    /*calculate wdvi                    */
+		    if (!strcasecmp(viflag, "wdvi")) {
+			i = 6;
+		    }
+		    /*calculate savi                    */
+		    if (!strcasecmp(viflag, "savi")) {
+			i = 7;
+		    }
+		    /*calculate msavi                   */
+		    if (!strcasecmp(viflag, "msavi")) {
+			i = 8;
+		    }
+		    /*calculate msavi2            */
+		    if (!strcasecmp(viflag, "msavi2")) {
+			i = 9;
+		    }
+		    /*calculate gemi                    */
+		    if (!strcasecmp(viflag, "gemi")) {
+			i = 10;
+		    }
+		    /*calculate arvi                    */
+		    if (!strcasecmp(viflag, "arvi")) {
+			i = 11;
+		    }
+		    /*calculate gvi            */
+		    if (!strcasecmp(viflag, "gvi")) {
+			i = 12;
+		    }
+		    /*calculate gari                    */
+		    if (!strcasecmp(viflag, "gari")) {
+			i = 13;
+		    }
+		}
+		I[col] = i;
+	    }	/*col */
+	    row_n = k - 1;
+	    I[ncols] = row_n;
+	    /*MPI_Send(&row_n,1,MPI_INT,k,1,MPI_COMM_WORLD); */
+	    MPI_Send(I, ncols + 1, MPI_INT, k, 1, MPI_COMM_WORLD);
+	    MPI_Send(db, 6 * ncols, MPI_DOUBLE, k, 1, MPI_COMM_WORLD);
+	    k++;
+	    lm = 1;
+	}/*r */
+	if (lm) {
+	    for (nh = 1; nh < k; nh++) {
+		/*MPI_Recv(&row_n,1,MPI_INT,nh,1,MPI_COMM_WORLD,&status); */
+		MPI_Recv(R, ncols + 1, MPI_DOUBLE, nh, 1, MPI_COMM_WORLD,
+			 &status);
+		row_n = R[ncols];
+		for (cn = 0; cn < ncols; cn++)
+		    outputImage[row_n][cn] = R[cn];
+	    }
+	    for (nh = 0; nh < (k - 1); nh++) {
+		for (j = 0; j < ncols; j++)
+		    outrast[j] = outputImage[nh][j];
+		Rast_put_d_row(outfd, outrast);
+	    }
+	}
+	MPI_Finalize();
+	G_free(inrast_redchan);
+	Rast_close(infd_redchan);
+	G_free(inrast_nirchan);
+	Rast_close(infd_nirchan);
+	if (greenchan) {
+	    G_free(inrast_greenchan);
+	    Rast_close(infd_greenchan);
+	}
+	if (bluechan) {
+	    G_free(inrast_bluechan);
+	    Rast_close(infd_bluechan);
+	}
+	if (chan5chan) {
+	    G_free(inrast_chan5chan);
+	    Rast_close(infd_chan5chan);
+	}
+	if (chan7chan) {
+	    G_free(inrast_chan7chan);
+	    Rast_close(infd_chan7chan);
+	}
+	G_free(outrast);
+	Rast_close(outfd);
+
+    	/* Color from -1.0 to +1.0 in grey */
+    	Rast_init_colors(&colors);
+    	val1 = -1;
+    	val2 = 1;
+    	Rast_add_c_color_rule(&val1, 0, 0, 0, &val2, 255, 255, 255, &colors);
+    	Rast_short_history(result, "raster", &history);
+    	Rast_command_history(&history);
+	Rast_write_history(result, &history);
+
+	exit(EXIT_SUCCESS);
+    }				/*if end */
+    else if (me) {
+
+	int col, n_rows, i, row_n, modv, nrows, ncols, t, temp;
+	int *I;
+	double *a, *b, *c, *d, *e, *f, *r;
+	/*double *r; */
+	MPI_Recv(&temp, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
+	MPI_Recv(&nrows, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
+	MPI_Recv(&ncols, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
+	/*printf("Slave->%d: nrows=%d, ncols=%d \n",me,nrows,ncols); */
+
+	I = (int *)malloc((ncols + 2) * sizeof(int));
+	a = (double *)malloc((ncols + 1) * sizeof(double));
+	b = (double *)malloc((ncols + 1) * sizeof(double));
+	c = (double *)malloc((ncols + 1) * sizeof(double));
+	d = (double *)malloc((ncols + 1) * sizeof(double));
+	e = (double *)malloc((ncols + 1) * sizeof(double));
+	f = (double *)malloc((ncols + 1) * sizeof(double));
+
+	r = (double *)malloc((ncols + 2) * sizeof(double));
+	double db[6][ncols];
+
+	n_rows = nrows / (NUM_HOSTS - 1);
+	modv = nrows % (NUM_HOSTS - 1);
+	/*temp=10; */
+	/*printf("%d\n",temp); */
+
+	/*int temp;     */
+	if (modv >= me)
+	    n_rows++;
+	for (i = 0; i < n_rows; i++) {
+
+	    /*MPI_Recv(&row_n,1,MPI_INT,0,1,MPI_COMM_WORLD,&status); */
+	    MPI_Recv(I, ncols + 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
+	    MPI_Recv(db, 6 * ncols, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD,
+		     &status);
+	    for (col = 0; col < ncols; col++) {
+		a[col] = db[0][col];
+		b[col] = db[1][col];
+		c[col] = db[2][col];
+		d[col] = db[3][col];
+		e[col] = db[4][col];
+		f[col] = db[5][col];
+		for (t = 0; t < temp; t++) {
+		    if (I[col] == 0)
+			r[col] = -999.99;
+		    else if (I[col] == 1) {
+			/*sr */
+			if (a[col] == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] = (b[col] / a[col]);
+			}
+		    }
+		    else if (I[col] == 2) {
+			/*ndvi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] = (b[col] - a[col]) / (b[col] + a[col]);
+			}
+		    }
+		    else if (I[col] == 3) {
+			/*ipvi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] = (b[col]) / (b[col] + a[col]);
+			}
+
+		    }
+		    else if (I[col] == 4) {
+			/*dvi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] = (b[col] - a[col]);
+			}
+		    }
+		    else if (I[col] == 5) {
+			/*pvi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] =
+				(sin(1.0) * b[col]) / (cos(1.0) * a[col]);
+			}
+		    }
+		    else if (I[col] == 6) {
+			/*wdvi */
+			double slope = 1;	/*slope of soil line */
+
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] = (b[col] - slope * a[col]);
+			}
+		    }
+		    else if (I[col] == 7) {
+			/*savi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] =
+				((1 + 0.5) * (b[col] - a[col])) / (b[col] +
+								   a[col] +
+								   0.5);
+			}
+		    }
+		    else if (I[col] == 8) {
+			/*msavi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] =
+				(1 / 2) * (2 * (b[col] + 1) -
+					   sqrt((2 * b[col] +
+						 1) * (2 * b[col] + 1)) -
+					   (8 * (b[col] - a[col])));
+			}
+		    }
+		    else if (I[col] == 9) {
+			/*msavi2 */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] =
+				(1 / 2) * (2 * (b[col] + 1) -
+					   sqrt((2 * b[col] +
+						 1) * (2 * b[col] + 1)) -
+					   (8 * (b[col] - a[col])));
+			}
+		    }
+		    else if (I[col] == 10) {
+			/*gemi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] =
+				(((2 *
+				   ((b[col] * b[col]) - (a[col] * a[col])) +
+				   1.5 * b[col] + 0.5 * a[col]) / (b[col] +
+								   a[col] +
+								   0.5)) *
+				 (1 -
+				  0.25 * (2 *
+					  ((b[col] * b[col]) -
+					   (a[col] * a[col])) + 1.5 * b[col] +
+					  0.5 * a[col]) / (b[col] + a[col] +
+							   0.5))) - ((a[col] -
+								      0.125) /
+								     (1 -
+								      a
+								      [col]));
+			}
+		    }
+		    else if (I[col] == 11) {
+			/*arvi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] =
+				(b[col] - (2 * a[col] - d[col])) / (b[col] +
+								    (2 *
+								     a[col] -
+								     d[col]));
+			}
+		    }
+		    else if (I[col] == 12) {
+			/*gvi */
+			if ((b[col] + a[col]) == 0.0) {
+			    r[col] = -1.0;
+			}
+			else {
+			    r[col] =
+				(-0.2848 * d[col] - 0.2435 * c[col] -
+				 0.5436 * a[col] + 0.7243 * b[col] +
+				 0.0840 * e[col] - 0.1800 * f[col]);
+			}
+		    }
+		    else if (I[col] == 13) {
+			/*gari */
+			r[col] =
+			    (b[col] -
+			     (c[col] - (d[col] - a[col]))) / (b[col] +
+							      (c[col] -
+							       (d[col] -
+								a[col])));
+		    }
+		}	/*for temp */
+	    }		/*col end */
+	    r[ncols] = I[ncols];
+	    /*MPI_Send(&row_n,1,MPI_INT,0,1,MPI_COMM_WORLD); */
+	    MPI_Send(r, ncols + 1, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD);
+	}/*row end */
+
+	free(I);
+	free(a);
+	free(b);
+	free(c);
+	free(d);
+	free(e);
+	free(f);
+	free(r);
+	MPI_Finalize();
+    }	/*if end */
+}/*main end */

Added: grass-addons/grass7/imagery/i.vi.mpi/mpi.h
===================================================================
--- grass-addons/grass7/imagery/i.vi.mpi/mpi.h	                        (rev 0)
+++ grass-addons/grass7/imagery/i.vi.mpi/mpi.h	2012-03-14 10:48:51 UTC (rev 51056)
@@ -0,0 +1,1897 @@
+/* ompi/include/mpi.h.  Generated from mpi.h.in by configure.  */
+/*
+ * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
+ *                         University Research and Technology
+ *                         Corporation.  All rights reserved.
+ * Copyright (c) 2004-2006 The University of Tennessee and The University
+ *                         of Tennessee Research Foundation.  All rights
+ *                         reserved.
+ * Copyright (c) 2004-2007 High Performance Computing Center Stuttgart, 
+ *                         University of Stuttgart.  All rights reserved.
+ * Copyright (c) 2004-2005 The Regents of the University of California.
+ *                         All rights reserved.
+ * Copyright (c) 2007-2009 Cisco Systems, Inc.  All rights reserved.
+ * Copyright (c) 2008-2009 Sun Microsystems, Inc.  All rights reserved.
+ * $COPYRIGHT$
+ * 
+ * Additional copyrights may follow
+ * 
+ * $HEADER$
+ */
+
+#ifndef OMPI_MPI_H
+#define OMPI_MPI_H
+
+/* The comment below (and the ending partner) are for building fat
+   distributions on platforms that support it.  Please do not remove */
+
+/* @OMPI_BEGIN_CONFIGURE_SECTION@ */
+
+#ifndef OMPI_CONFIG_H
+
+/* Only include these if OMPI_CONFIG_H isn't defined (meaning if
+   ompi_config.h hasn't already been included).  Otherwise, we'll
+   duplicate all those symbols.  OMPI coding standards say that
+   ompi_config.h must be included before all other files, so this
+   should be good enough */
+
+/* Define to 1 if you have the ANSI C header files. */
+#define OMPI_STDC_HEADERS 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define OMPI_HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/synch.h> header file. */
+/* #undef OMPI_HAVE_SYS_SYNCH_H */
+
+/* Define to 1 if the system has the type `long long'. */
+#define OMPI_HAVE_LONG_LONG 1
+
+/* The size of a `bool', as computed by sizeof. */
+#define OMPI_SIZEOF_BOOL 1
+
+/* The size of a `int', as computed by sizeof. */
+#define OMPI_SIZEOF_INT 4
+
+/* Whether we have FORTRAN LOGICAL*1 or not */
+#define OMPI_HAVE_FORTRAN_LOGICAL1 1
+
+/* Whether we have FORTRAN LOGICAL*2 or not */
+#define OMPI_HAVE_FORTRAN_LOGICAL2 1
+
+/* Whether we have FORTRAN LOGICAL*4 or not */
+#define OMPI_HAVE_FORTRAN_LOGICAL4 1
+
+/* Whether we have FORTRAN LOGICAL*8 or not */
+#define OMPI_HAVE_FORTRAN_LOGICAL8 1
+
+/* Whether we have FORTRAN INTEGER*1 or not */
+#define OMPI_HAVE_FORTRAN_INTEGER1 1
+
+/* Whether we have FORTRAN INTEGER*16 or not */
+#define OMPI_HAVE_FORTRAN_INTEGER16 0
+
+/* Whether we have FORTRAN INTEGER*2 or not */
+#define OMPI_HAVE_FORTRAN_INTEGER2 1
+
+/* Whether we have FORTRAN INTEGER*4 or not */
+#define OMPI_HAVE_FORTRAN_INTEGER4 1
+
+/* Whether we have FORTRAN INTEGER*8 or not */
+#define OMPI_HAVE_FORTRAN_INTEGER8 1
+
+/* Whether we have FORTRAN REAL*16 or not */
+#define OMPI_HAVE_FORTRAN_REAL16 1
+
+/* Whether we have FORTRAN REAL*2 or not */
+#define OMPI_HAVE_FORTRAN_REAL2 0
+
+/* Whether we have FORTRAN REAL*4 or not */
+#define OMPI_HAVE_FORTRAN_REAL4 1
+
+/* Whether we have FORTRAN REAL*8 or not */
+#define OMPI_HAVE_FORTRAN_REAL8 1
+
+/* Type of MPI_Offset -- has to be defined here and typedef'ed later because mpi.h does not get AC SUBST's */
+#define OMPI_MPI_OFFSET_TYPE long long
+
+/* type to use for ptrdiff_t, if it does not exist, set to ptrdiff_t if it does exist */
+#define OMPI_PTRDIFF_TYPE ptrdiff_t
+
+/* Whether we want MPI cxx support or not */
+#define OMPI_WANT_CXX_BINDINGS 1
+
+/* do we want to try to work around C++ bindings SEEK_* issue? */
+#define OMPI_WANT_MPI_CXX_SEEK 1
+
+/* Whether a const_cast on a 2-d array will work with the C++ compiler */
+#define OMPI_CXX_SUPPORTS_2D_CONST_CAST 1
+
+/* Whether we want the MPI f77 bindings or not */
+#define OMPI_WANT_F77_BINDINGS 1
+
+/* Whether we want the MPI f90 bindings or not */
+#define OMPI_WANT_F90_BINDINGS 1
+
+/* Whether or not we have compiled with C++ exceptions support */
+#define OMPI_HAVE_CXX_EXCEPTION_SUPPORT 0
+
+/* MPI datatype corresponding to MPI_Offset */
+#define OMPI_OFFSET_DATATYPE MPI_LONG_LONG
+
+/* Major, minor, and release version of Open MPI */
+#define OMPI_MAJOR_VERSION 1
+#define OMPI_MINOR_VERSION 4
+#define OMPI_RELEASE_VERSION 3
+
+/* A  type that allows us to have sentinel type values that are still
+   valid */
+#define ompi_fortran_bogus_type_t int
+
+/* C type corresponding to FORTRAN INTEGER */
+#define ompi_fortran_integer_t int
+
+/* Whether C compiler supports -fvisibility */
+#define OMPI_C_HAVE_VISIBILITY 1
+
+/* Whether OMPI should provide MPI File interface */
+#define OMPI_PROVIDE_MPI_FILE_INTERFACE 1
+
+#ifndef OMPI_DECLSPEC
+#  if defined(WIN32) || defined(_WIN32)
+#    if defined(OMPI_IMPORTS)
+#      define OMPI_DECLSPEC        __declspec(dllimport)
+#    else
+#      define OMPI_DECLSPEC
+#    endif  /* defined(OMPI_IMPORTS) */
+#  else
+#    if OMPI_C_HAVE_VISIBILITY == 1
+#       define OMPI_DECLSPEC __attribute__((visibility("default")))
+#    else
+#       define OMPI_DECLSPEC
+#    endif      
+#  endif
+#endif
+
+#ifndef MPI_Fint
+/* MPI_Fint is the same as ompi_fortran_INTEGER_t */
+#define MPI_Fint ompi_fortran_integer_t
+#endif
+
+#endif /* #ifndef OMPI_CONFIG_H */
+
+/* @OMPI_END_CONFIGURE_SECTION@ */
+
+/* include for ptrdiff_t */
+#ifdef OMPI_STDC_HEADERS
+#include <stddef.h>
+#endif
+
+#ifndef OMPI_BUILDING
+#define OMPI_BUILDING 0
+#endif
+
+
+/*
+ * Just in case you need it.  :-)
+ */
+#define OPEN_MPI 1
+
+/*
+ * MPI version
+ */
+#define MPI_VERSION 2
+#define MPI_SUBVERSION 1
+
+/*
+ * To accomodate programs written for MPI implementations that use a
+ * straight ROMIO import
+ */
+#if !OMPI_BUILDING && OMPI_PROVIDE_MPI_FILE_INTERFACE
+#define MPIO_Request MPI_Request
+#define MPIO_Test MPI_Test
+#define MPIO_Wait MPI_Wait
+#endif
+
+/*
+ * When initializing global pointers to Open MPI internally-defined
+ * structs, some compilers warn about type-punning to incomplete
+ * types.  Therefore, when full struct definitions are unavailable
+ * (when not building Open MPI), cast to an opaque (void *) pointer to
+ * disable any strict-aliasing optimizations.  Don't cast to (void *)
+ * when building Open MPI so that we actually get the benefit of type
+ * checking (because we *do* have the full type definitions available
+ * when building OMPI).
+ */
+#if !OMPI_BUILDING
+#define OMPI_PREDEFINED_GLOBAL(type, global) ((type) ((void *) &(global)))
+#else
+#define OMPI_PREDEFINED_GLOBAL(type, global) ((type) &(global))
+#endif
+
+#if defined(c_plusplus) || defined(__cplusplus)
+extern "C" {
+#endif
+/*
+ * Typedefs
+ */
+
+typedef OMPI_PTRDIFF_TYPE MPI_Aint;
+typedef OMPI_MPI_OFFSET_TYPE MPI_Offset;
+typedef struct ompi_communicator_t *MPI_Comm;
+typedef struct ompi_datatype_t *MPI_Datatype;
+typedef struct ompi_errhandler_t *MPI_Errhandler;
+#if OMPI_PROVIDE_MPI_FILE_INTERFACE
+typedef struct ompi_file_t *MPI_File;
+#endif
+typedef struct ompi_group_t *MPI_Group;
+typedef struct ompi_info_t *MPI_Info;
+typedef struct ompi_op_t *MPI_Op;
+typedef struct ompi_request_t *MPI_Request;
+typedef struct ompi_status_public_t MPI_Status;
+typedef struct ompi_win_t *MPI_Win;
+
+/*
+ * MPI_Status
+ */
+struct ompi_status_public_t { 
+  int MPI_SOURCE;
+  int MPI_TAG;
+  int MPI_ERROR;
+  int _count;
+  int _cancelled;
+};
+typedef struct ompi_status_public_t ompi_status_public_t;
+
+/*
+ * User typedefs
+ */
+typedef int (MPI_Copy_function)(MPI_Comm, int, void *,
+                                void *, void *, int *);
+typedef int (MPI_Delete_function)(MPI_Comm, int, void *, void *);
+typedef int (MPI_Datarep_extent_function)(MPI_Datatype, MPI_Aint *, void *);
+typedef int (MPI_Datarep_conversion_function)(void *, MPI_Datatype, 
+                                              int, void *, MPI_Offset, void *);
+typedef void (MPI_Comm_errhandler_fn)(MPI_Comm *, int *, ...);
+#if OMPI_PROVIDE_MPI_FILE_INTERFACE
+    /* This is a little hackish, but errhandler.h needs space for a
+       MPI_File_errhandler_fn.  While it could just be removed, this
+       allows us to maintain a stable ABI within OMPI, at least for
+       apps that don't use MPI I/O. */
+typedef void (ompi_file_errhandler_fn)(MPI_File *, int *, ...);
+typedef ompi_file_errhandler_fn MPI_File_errhandler_fn;
+#else
+struct ompi_file_t;
+typedef void (ompi_file_errhandler_fn)(struct ompi_file_t**, int *, ...);
+#endif
+typedef void (MPI_Win_errhandler_fn)(MPI_Win *, int *, ...);
+typedef void (MPI_Handler_function)(MPI_Comm *, int *, ...);
+typedef void (MPI_User_function)(void *, void *, int *, MPI_Datatype *);
+typedef int (MPI_Comm_copy_attr_function)(MPI_Comm, int, void *,
+                                            void *, void *, int *);
+typedef int (MPI_Comm_delete_attr_function)(MPI_Comm, int, void *, void *);
+typedef int (MPI_Type_copy_attr_function)(MPI_Datatype, int, void *,
+                                            void *, void *, int *);
+typedef int (MPI_Type_delete_attr_function)(MPI_Datatype, int,
+                                              void *, void *);
+typedef int (MPI_Win_copy_attr_function)(MPI_Win, int, void *,
+                                           void *, void *, int *);
+typedef int (MPI_Win_delete_attr_function)(MPI_Win, int, void *, void *);
+typedef int (MPI_Grequest_query_function)(void *, MPI_Status *);
+typedef int (MPI_Grequest_free_function)(void *);
+typedef int (MPI_Grequest_cancel_function)(void *, int); 
+
+/*
+ * Miscellaneous constants
+ */
+#define MPI_ANY_SOURCE         -1      /* match any source rank */
+#define MPI_PROC_NULL          -2      /* rank of null process */
+#define MPI_ROOT               -4
+#define MPI_ANY_TAG            -1      /* match any message tag */
+#define MPI_MAX_PROCESSOR_NAME 256     /* max proc. name length */
+#define MPI_MAX_ERROR_STRING   256     /* max error message length */
+#define MPI_MAX_OBJECT_NAME    64      /* max object name length */
+#define MPI_UNDEFINED          -32766  /* undefined stuff */
+#define MPI_CART               1       /* cartesian topology */
+#define MPI_GRAPH              2       /* graph topology */
+#define MPI_KEYVAL_INVALID     -1      /* invalid key value */
+
+/*
+ * More constants
+ */
+#define MPI_BOTTOM               ((void *) 0)    /* base reference address */
+#define MPI_IN_PLACE             ((void *) 1)    /* in place buffer */
+#define MPI_BSEND_OVERHEAD       128     /* size of bsend header + ptr */
+#define MPI_MAX_INFO_KEY         36      /* max info key length */
+#define MPI_MAX_INFO_VAL         256     /* max info value length */
+#define MPI_ARGV_NULL            ((char **) 0)   /* NULL argument vector */
+#define MPI_ARGVS_NULL           ((char ***) 0)  /* NULL argument vectors */
+#define MPI_ERRCODES_IGNORE      ((int *) 0)    /* don't return error codes */
+#define MPI_MAX_PORT_NAME        1024    /* max port name length */
+#define MPI_MAX_NAME_LEN         MPI_MAX_PORT_NAME /* max port name length */
+#define MPI_ORDER_C              0       /* C row major order */
+#define MPI_ORDER_FORTRAN        1       /* Fortran column major order */
+#define MPI_DISTRIBUTE_BLOCK     0       /* block distribution */
+#define MPI_DISTRIBUTE_CYCLIC    1       /* cyclic distribution */
+#define MPI_DISTRIBUTE_NONE      2       /* not distributed */
+#define MPI_DISTRIBUTE_DFLT_DARG (-1)    /* default distribution arg */
+
+#if OMPI_PROVIDE_MPI_FILE_INTERFACE
+/*
+ * Since these values are arbitrary to Open MPI, we might as well make
+ * them the same as ROMIO for ease of mapping.  These values taken
+ * from ROMIO's mpio.h file.
+ */
+#define MPI_MODE_CREATE              1  /* ADIO_CREATE */ 
+#define MPI_MODE_RDONLY              2  /* ADIO_RDONLY */
+#define MPI_MODE_WRONLY              4  /* ADIO_WRONLY  */
+#define MPI_MODE_RDWR                8  /* ADIO_RDWR  */
+#define MPI_MODE_DELETE_ON_CLOSE    16  /* ADIO_DELETE_ON_CLOSE */
+#define MPI_MODE_UNIQUE_OPEN        32  /* ADIO_UNIQUE_OPEN */
+#define MPI_MODE_EXCL               64  /* ADIO_EXCL */
+#define MPI_MODE_APPEND            128  /* ADIO_APPEND */
+#define MPI_MODE_SEQUENTIAL        256  /* ADIO_SEQUENTIAL */
+
+#define MPI_DISPLACEMENT_CURRENT   -54278278
+
+#define MPI_SEEK_SET            600
+#define MPI_SEEK_CUR            602
+#define MPI_SEEK_END            604
+
+#define MPI_MAX_DATAREP_STRING  128
+#endif /* #if OMPI_PROVIDE_MPI_FILE_INTERFACE */
+
+/*
+ * MPI-2 One-Sided Communications asserts
+ */
+#define MPI_MODE_NOCHECK             1
+#define MPI_MODE_NOPRECEDE           2
+#define MPI_MODE_NOPUT               4
+#define MPI_MODE_NOSTORE             8
+#define MPI_MODE_NOSUCCEED          16
+
+#define MPI_LOCK_EXCLUSIVE           1
+#define MPI_LOCK_SHARED              2
+
+
+/*
+ * Predefined attribute keyvals
+ *
+ * DO NOT CHANGE THE ORDER WITHOUT ALSO CHANGING THE ORDER IN
+ * src/attribute/attribute_predefined.c and mpif.h.in.
+ */
+enum {
+    /* MPI-1 */
+    MPI_TAG_UB,
+    MPI_HOST,
+    MPI_IO,
+    MPI_WTIME_IS_GLOBAL,
+
+    /* MPI-2 */
+    MPI_APPNUM,
+    MPI_LASTUSEDCODE,
+    MPI_UNIVERSE_SIZE,
+    MPI_WIN_BASE,
+    MPI_WIN_SIZE,
+    MPI_WIN_DISP_UNIT,
+
+    /* Even though these four are IMPI attributes, they need to be there
+       for all MPI jobs */
+    IMPI_CLIENT_SIZE,
+    IMPI_CLIENT_COLOR,
+    IMPI_HOST_SIZE,
+    IMPI_HOST_COLOR
+};
+
+/*
+ * Error classes and codes
+ * Do not change the values of these without also modifying mpif.h.in.
+ */
+#define MPI_SUCCESS                   0  
+#define MPI_ERR_BUFFER                1
+#define MPI_ERR_COUNT                 2
+#define MPI_ERR_TYPE                  3
+#define MPI_ERR_TAG                   4
+#define MPI_ERR_COMM                  5
+#define MPI_ERR_RANK                  6
+#define MPI_ERR_REQUEST               7
+#define MPI_ERR_ROOT                  8
+#define MPI_ERR_GROUP                 9
+#define MPI_ERR_OP                    10
+#define MPI_ERR_TOPOLOGY              11
+#define MPI_ERR_DIMS                  12
+#define MPI_ERR_ARG                   13
+#define MPI_ERR_UNKNOWN               14
+#define MPI_ERR_TRUNCATE              15
+#define MPI_ERR_OTHER                 16
+#define MPI_ERR_INTERN                17
+#define MPI_ERR_IN_STATUS             18
+#define MPI_ERR_PENDING               19
+#define MPI_ERR_ACCESS                20
+#define MPI_ERR_AMODE                 21
+#define MPI_ERR_ASSERT                22
+#define MPI_ERR_BAD_FILE              23
+#define MPI_ERR_BASE                  24
+#define MPI_ERR_CONVERSION            25
+#define MPI_ERR_DISP                  26
+#define MPI_ERR_DUP_DATAREP           27
+#define MPI_ERR_FILE_EXISTS           28
+#define MPI_ERR_FILE_IN_USE           29
+#define MPI_ERR_FILE                  30
+#define MPI_ERR_INFO_KEY              31
+#define MPI_ERR_INFO_NOKEY            32
+#define MPI_ERR_INFO_VALUE            33
+#define MPI_ERR_INFO                  34
+#define MPI_ERR_IO                    35
+#define MPI_ERR_KEYVAL                36
+#define MPI_ERR_LOCKTYPE              37
+#define MPI_ERR_NAME                  38
+#define MPI_ERR_NO_MEM                39
+#define MPI_ERR_NOT_SAME              40
+#define MPI_ERR_NO_SPACE              41
+#define MPI_ERR_NO_SUCH_FILE          42
+#define MPI_ERR_PORT                  43
+#define MPI_ERR_QUOTA                 44
+#define MPI_ERR_READ_ONLY             45
+#define MPI_ERR_RMA_CONFLICT          46
+#define MPI_ERR_RMA_SYNC              47
+#define MPI_ERR_SERVICE               48
+#define MPI_ERR_SIZE                  49
+#define MPI_ERR_SPAWN                 50
+#define MPI_ERR_UNSUPPORTED_DATAREP   51
+#define MPI_ERR_UNSUPPORTED_OPERATION 52
+#define MPI_ERR_WIN                   53
+#define MPI_ERR_LASTCODE              54
+
+#define MPI_ERR_SYSRESOURCE          -2
+
+
+/*
+ * Comparison results.  Don't change the order of these, the group
+ * comparison functions rely on it.
+ * Do not change the order of these without also modifying mpif.h.in.
+ */
+enum {
+  MPI_IDENT,
+  MPI_CONGRUENT,
+  MPI_SIMILAR,
+  MPI_UNEQUAL
+};
+
+/*
+ * MPI_Init_thread constants
+ * Do not change the order of these without also modifying mpif.h.in.
+ */
+enum {
+  MPI_THREAD_SINGLE,
+  MPI_THREAD_FUNNELED,
+  MPI_THREAD_SERIALIZED,
+  MPI_THREAD_MULTIPLE
+};
+
+/*
+ * Datatype combiners.
+ * Do not change the order of these without also modifying mpif.h.in.
+ */
+enum {
+  MPI_COMBINER_NAMED,
+  MPI_COMBINER_DUP,
+  MPI_COMBINER_CONTIGUOUS,
+  MPI_COMBINER_VECTOR,
+  MPI_COMBINER_HVECTOR_INTEGER,
+  MPI_COMBINER_HVECTOR,
+  MPI_COMBINER_INDEXED,
+  MPI_COMBINER_HINDEXED_INTEGER,
+  MPI_COMBINER_HINDEXED,
+  MPI_COMBINER_INDEXED_BLOCK,
+  MPI_COMBINER_STRUCT_INTEGER,
+  MPI_COMBINER_STRUCT,
+  MPI_COMBINER_SUBARRAY,
+  MPI_COMBINER_DARRAY,
+  MPI_COMBINER_F90_REAL,
+  MPI_COMBINER_F90_COMPLEX,
+  MPI_COMBINER_F90_INTEGER,
+  MPI_COMBINER_RESIZED
+};
+
+/*
+ * NULL handles
+ */
+#define MPI_GROUP_NULL OMPI_PREDEFINED_GLOBAL(MPI_Group, ompi_mpi_group_null)
+#define MPI_COMM_NULL OMPI_PREDEFINED_GLOBAL(MPI_Comm, ompi_mpi_comm_null)
+#define MPI_REQUEST_NULL OMPI_PREDEFINED_GLOBAL(MPI_Request, ompi_request_null)
+#define MPI_OP_NULL OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_null)
+#define MPI_ERRHANDLER_NULL OMPI_PREDEFINED_GLOBAL(MPI_Errhandler, ompi_mpi_errhandler_null)
+#define MPI_INFO_NULL OMPI_PREDEFINED_GLOBAL(MPI_Info, ompi_mpi_info_null)
+#define MPI_WIN_NULL OMPI_PREDEFINED_GLOBAL(MPI_Win, ompi_mpi_win_null)
+#if OMPI_PROVIDE_MPI_FILE_INTERFACE
+#define MPI_FILE_NULL OMPI_PREDEFINED_GLOBAL(MPI_File, ompi_mpi_file_null)
+#endif
+
+#define MPI_STATUS_IGNORE ((MPI_Status *) 0)
+#define MPI_STATUSES_IGNORE ((MPI_Status *) 0)
+
+/* MPI-2 specifies that the name "MPI_TYPE_NULL_DELETE_FN" (and all
+   related friends) must be accessible in C, C++, and Fortran. This is
+   unworkable if the back-end Fortran compiler uses all caps for its
+   linker symbol convention -- it results in two functions with
+   different signatures that have the same name (i.e., both C and
+   Fortran use the symbol MPI_TYPE_NULL_DELETE_FN).  So we have to
+   #define the C names to be something else, so that they names are
+   *accessed* through MPI_TYPE_NULL_DELETE_FN, but their actual symbol
+   name is different.
+
+   However, this file is included when the fortran wrapper functions
+   are compiled in Open MPI, so we do *not* want these #defines in
+   this case (i.e., we need the Fortran wrapper function to be
+   compiled as MPI_TYPE_NULL_DELETE_FN).  So add some #if kinds of
+   protection for this case. */
+
+#if !defined(OMPI_COMPILING_F77_WRAPPERS)
+#define MPI_NULL_DELETE_FN OMPI_C_MPI_NULL_DELETE_FN
+#define MPI_NULL_COPY_FN OMPI_C_MPI_NULL_COPY_FN
+#define MPI_DUP_FN OMPI_C_MPI_DUP_FN
+
+#define MPI_TYPE_NULL_DELETE_FN OMPI_C_MPI_TYPE_NULL_DELETE_FN
+#define MPI_TYPE_NULL_COPY_FN OMPI_C_MPI_TYPE_NULL_COPY_FN
+#define MPI_TYPE_DUP_FN OMPI_C_MPI_TYPE_DUP_FN
+
+#define MPI_COMM_NULL_DELETE_FN OMPI_C_MPI_COMM_NULL_DELETE_FN
+#define MPI_COMM_NULL_COPY_FN OMPI_C_MPI_COMM_NULL_COPY_FN
+#define MPI_COMM_DUP_FN OMPI_C_MPI_COMM_DUP_FN
+
+#define MPI_WIN_NULL_DELETE_FN OMPI_C_MPI_WIN_NULL_DELETE_FN
+#define MPI_WIN_NULL_COPY_FN OMPI_C_MPI_WIN_NULL_COPY_FN
+#define MPI_WIN_DUP_FN OMPI_C_MPI_WIN_DUP_FN
+
+/* MPI_CONVERSION_FN_NULL is a sentinel value, but it has to be large
+   enough to be the same size as a valid function pointer.  It
+   therefore shares many characteristics between Fortran constants and
+   Fortran sentinel functions.  For example, it shares the problem of
+   having Fortran compilers have all-caps versions of the symbols that
+   must be able to be present, and therefore has to be in this
+   conditional block in mpi.h. */
+#define MPI_CONVERSION_FN_NULL ((MPI_Datarep_conversion_function*) 0)
+#endif
+
+OMPI_DECLSPEC int OMPI_C_MPI_TYPE_NULL_DELETE_FN( MPI_Datatype datatype, 
+                                                  int type_keyval,
+                                                  void* attribute_val_out, 
+                                                  void* extra_state );
+OMPI_DECLSPEC int OMPI_C_MPI_TYPE_NULL_COPY_FN( MPI_Datatype datatype, 
+                                                int type_keyval, 
+                                                void* extra_state,
+                                                void* attribute_val_in, 
+                                                void* attribute_val_out, 
+                                                int* flag );
+OMPI_DECLSPEC int OMPI_C_MPI_TYPE_DUP_FN( MPI_Datatype datatype, 
+                                          int type_keyval, 
+                                          void* extra_state, 
+                                          void* attribute_val_in, 
+                                          void* attribute_val_out, 
+                                          int* flag );
+OMPI_DECLSPEC int OMPI_C_MPI_COMM_NULL_DELETE_FN( MPI_Comm comm, 
+                                                  int comm_keyval,
+                                                  void* attribute_val_out, 
+                                                  void* extra_state );
+OMPI_DECLSPEC int OMPI_C_MPI_COMM_NULL_COPY_FN( MPI_Comm comm, 
+                                                int comm_keyval, 
+                                                void* extra_state, 
+                                                void* attribute_val_in,
+                                                void* attribute_val_out, 
+                                                int* flag );
+OMPI_DECLSPEC int OMPI_C_MPI_COMM_DUP_FN( MPI_Comm comm, int comm_keyval, 
+                                          void* extra_state,
+                                          void* attribute_val_in, 
+                                          void* attribute_val_out,
+                                          int* flag );
+OMPI_DECLSPEC int OMPI_C_MPI_NULL_DELETE_FN( MPI_Comm comm, int comm_keyval,
+                                             void* attribute_val_out, 
+                                             void* extra_state );
+OMPI_DECLSPEC int OMPI_C_MPI_NULL_COPY_FN( MPI_Comm comm, int comm_keyval, 
+                                           void* extra_state,
+                                           void* attribute_val_in, 
+                                           void* attribute_val_out,
+                                           int* flag );
+OMPI_DECLSPEC int OMPI_C_MPI_DUP_FN( MPI_Comm comm, int comm_keyval, 
+                                     void* extra_state,
+                                     void* attribute_val_in, 
+                                     void* attribute_val_out,
+                                     int* flag );
+OMPI_DECLSPEC int OMPI_C_MPI_WIN_NULL_DELETE_FN( MPI_Win window, 
+                                                 int win_keyval,
+                                                 void* attribute_val_out, 
+                                                 void* extra_state );
+OMPI_DECLSPEC int OMPI_C_MPI_WIN_NULL_COPY_FN( MPI_Win window, int win_keyval, 
+                                               void* extra_state, 
+                                               void* attribute_val_in,
+                                               void* attribute_val_out, 
+                                               int* flag );
+OMPI_DECLSPEC int OMPI_C_MPI_WIN_DUP_FN( MPI_Win window, int win_keyval, 
+                                         void* extra_state,
+                                         void* attribute_val_in, 
+                                         void* attribute_val_out,
+                                         int* flag );
+
+/*
+ * External variables
+ *
+ * The below externs use the ompi_predefined_xxx_t structures to maintain
+ * back compatibility between MPI library versions.
+ * See ompi/communicator/communicator.h comments with struct ompi_communicator_t
+ * for full explanation why we chose to use the ompi_predefined_xxx_t structure.
+ */
+OMPI_DECLSPEC extern struct ompi_predefined_communicator_t ompi_mpi_comm_world;
+OMPI_DECLSPEC extern struct ompi_predefined_communicator_t ompi_mpi_comm_self;
+OMPI_DECLSPEC extern struct ompi_predefined_communicator_t ompi_mpi_comm_null;
+
+OMPI_DECLSPEC extern struct ompi_predefined_group_t ompi_mpi_group_empty;
+OMPI_DECLSPEC extern struct ompi_predefined_group_t ompi_mpi_group_null;
+
+OMPI_DECLSPEC extern struct ompi_predefined_request_t ompi_request_null;
+
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_null;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_max, ompi_mpi_op_min;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_sum;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_prod;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_land;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_band;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_lor, ompi_mpi_op_bor;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_lxor;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_bxor;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_maxloc;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_minloc;
+OMPI_DECLSPEC extern struct ompi_predefined_op_t ompi_mpi_op_replace;
+
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_char, ompi_mpi_byte;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_int, ompi_mpi_logic;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_short, ompi_mpi_long;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_float, ompi_mpi_double;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_long_double;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_cplex, ompi_mpi_packed;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_signed_char;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_unsigned_char;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_unsigned_short;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_unsigned, ompi_mpi_datatype_null;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_unsigned_long, ompi_mpi_ldblcplex;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_ub, ompi_mpi_lb;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_float_int, ompi_mpi_double_int;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_long_int, ompi_mpi_2int;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_short_int, ompi_mpi_dblcplex;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_integer, ompi_mpi_real;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_dblprec, ompi_mpi_character;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_2real, ompi_mpi_2dblprec;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_2integer, ompi_mpi_longdbl_int;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_wchar, ompi_mpi_long_long_int;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_unsigned_long_long;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_cxx_cplex, ompi_mpi_cxx_dblcplex;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_cxx_ldblcplex;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_cxx_bool;
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_2cplex, ompi_mpi_2dblcplex;
+/* other MPI2 datatypes */
+#if OMPI_HAVE_FORTRAN_LOGICAL1
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_logical1;
+#endif
+#if OMPI_HAVE_FORTRAN_LOGICAL2
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_logical2;
+#endif
+#if OMPI_HAVE_FORTRAN_LOGICAL4
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_logical4;
+#endif
+#if OMPI_HAVE_FORTRAN_LOGICAL8
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_logical8;
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER1
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_integer1;
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER2
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_integer2;
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER4
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_integer4;
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER8
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_integer8;
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER16
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_integer16;
+#endif
+#if OMPI_HAVE_FORTRAN_REAL2
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_real2;
+#endif
+#if OMPI_HAVE_FORTRAN_REAL4
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_real4; 
+#endif
+#if OMPI_HAVE_FORTRAN_REAL8
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_real8;
+#endif
+#if OMPI_HAVE_FORTRAN_REAL16
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_real16;
+#endif
+#if OMPI_HAVE_FORTRAN_REAL4
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_complex8;
+#endif
+#if OMPI_HAVE_FORTRAN_REAL8
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_complex16;
+#endif
+#if OMPI_HAVE_FORTRAN_REAL16
+OMPI_DECLSPEC extern struct ompi_predefined_datatype_t ompi_mpi_complex32;
+#endif
+
+OMPI_DECLSPEC extern struct ompi_predefined_errhandler_t ompi_mpi_errhandler_null;
+OMPI_DECLSPEC extern struct ompi_predefined_errhandler_t ompi_mpi_errors_are_fatal;
+OMPI_DECLSPEC extern struct ompi_predefined_errhandler_t ompi_mpi_errors_return;
+
+OMPI_DECLSPEC extern struct ompi_predefined_win_t ompi_mpi_win_null;
+OMPI_DECLSPEC extern struct ompi_predefined_file_t ompi_mpi_file_null;
+
+OMPI_DECLSPEC extern struct ompi_predefined_info_t ompi_mpi_info_null;
+
+OMPI_DECLSPEC extern MPI_Fint *MPI_F_STATUS_IGNORE;
+OMPI_DECLSPEC extern MPI_Fint *MPI_F_STATUSES_IGNORE;
+
+/*
+ * MPI predefined handles
+ */
+#define MPI_COMM_WORLD OMPI_PREDEFINED_GLOBAL( MPI_Comm, ompi_mpi_comm_world)
+#define MPI_COMM_SELF OMPI_PREDEFINED_GLOBAL(MPI_Comm, ompi_mpi_comm_self)
+
+#define MPI_GROUP_EMPTY OMPI_PREDEFINED_GLOBAL(MPI_Group, ompi_mpi_group_empty)
+
+#define MPI_MAX OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_max)
+#define MPI_MIN OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_min)
+#define MPI_SUM OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_sum)
+#define MPI_PROD OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_prod)
+#define MPI_LAND OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_land)
+#define MPI_BAND OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_band)
+#define MPI_LOR OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_lor)
+#define MPI_BOR OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_bor)
+#define MPI_LXOR OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_lxor)
+#define MPI_BXOR OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_bxor)
+#define MPI_MAXLOC OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_maxloc)
+#define MPI_MINLOC OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_minloc)
+#define MPI_REPLACE OMPI_PREDEFINED_GLOBAL(MPI_Op, ompi_mpi_op_replace)
+
+/* C datatypes */
+#define MPI_DATATYPE_NULL OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_datatype_null)
+#define MPI_BYTE OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_byte)
+#define MPI_PACKED OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_packed)
+#define MPI_CHAR OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_char)
+#define MPI_SHORT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_short)
+#define MPI_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_int)
+#define MPI_LONG OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_long)
+#define MPI_FLOAT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_float)
+#define MPI_DOUBLE OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_double)
+#define MPI_LONG_DOUBLE OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_long_double)
+#define MPI_UNSIGNED_CHAR OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_unsigned_char)
+#define MPI_SIGNED_CHAR OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_signed_char)
+#define MPI_UNSIGNED_SHORT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_unsigned_short)
+#define MPI_UNSIGNED_LONG OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_unsigned_long)
+#define MPI_UNSIGNED OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_unsigned)
+#define MPI_FLOAT_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_float_int)
+#define MPI_DOUBLE_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_double_int)
+#define MPI_LONG_DOUBLE_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_longdbl_int)
+#define MPI_LONG_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_long_int)
+#define MPI_SHORT_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_short_int)
+#define MPI_2INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_2int)
+#define MPI_UB OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_ub)
+#define MPI_LB OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_lb)
+#define MPI_WCHAR OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_wchar)
+#if OMPI_HAVE_LONG_LONG
+#define MPI_LONG_LONG_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_long_long_int)
+#define MPI_LONG_LONG OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_long_long_int)
+#define MPI_UNSIGNED_LONG_LONG OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_unsigned_long_long)
+#endif  /* OMPI_HAVE_LONG_LONG */
+#define MPI_2COMPLEX OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_2cplex)
+#define MPI_2DOUBLE_COMPLEX OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_2dblcplex)
+
+/* Fortran datatype bindings */
+#define MPI_CHARACTER OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_character)
+#define MPI_LOGICAL OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_logic)
+#if OMPI_HAVE_FORTRAN_LOGICAL1
+#define MPI_LOGICAL1 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_logical1)
+#endif
+#if OMPI_HAVE_FORTRAN_LOGICAL2
+#define MPI_LOGICAL2 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_logical2)
+#endif
+#if OMPI_HAVE_FORTRAN_LOGICAL4
+#define MPI_LOGICAL4 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_logical4)
+#endif
+#if OMPI_HAVE_FORTRAN_LOGICAL8
+#define MPI_LOGICAL8 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_logical8)
+#endif
+#define MPI_INTEGER OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_integer)
+#if OMPI_HAVE_FORTRAN_INTEGER1
+#define MPI_INTEGER1 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_integer1)
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER2
+#define MPI_INTEGER2 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_integer2)
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER4
+#define MPI_INTEGER4 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_integer4)
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER8
+#define MPI_INTEGER8 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_integer8)
+#endif
+#if OMPI_HAVE_FORTRAN_INTEGER16
+#define MPI_INTEGER16 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_integer16)
+#endif
+#define MPI_REAL OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_real)
+#if OMPI_HAVE_FORTRAN_REAL4
+#define MPI_REAL4 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_real4)
+#endif
+#if OMPI_HAVE_FORTRAN_REAL8
+#define MPI_REAL8 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_real8)
+#endif
+#if OMPI_HAVE_FORTRAN_REAL16
+#define MPI_REAL16 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_real16)
+#endif
+#define MPI_DOUBLE_PRECISION OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_dblprec)
+#define MPI_COMPLEX OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_cplex)
+#if OMPI_HAVE_FORTRAN_REAL4
+#define MPI_COMPLEX8 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_complex8)
+#endif
+#if OMPI_HAVE_FORTRAN_REAL8
+#define MPI_COMPLEX16 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_complex16)
+#endif
+#if OMPI_HAVE_FORTRAN_REAL16
+#define MPI_COMPLEX32 OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_complex32)
+#endif
+#define MPI_DOUBLE_COMPLEX OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_dblcplex)
+#define MPI_2REAL OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_2real)
+#define MPI_2DOUBLE_PRECISION OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_2dblprec)
+#define MPI_2INTEGER OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_2integer)
+
+#define MPI_ERRORS_ARE_FATAL OMPI_PREDEFINED_GLOBAL(MPI_Errhandler, ompi_mpi_errors_are_fatal)
+#define MPI_ERRORS_RETURN OMPI_PREDEFINED_GLOBAL(MPI_Errhandler, ompi_mpi_errors_return)
+
+/* Typeclass definition for MPI_Type_match_size */
+#define MPI_TYPECLASS_INTEGER    1
+#define MPI_TYPECLASS_REAL       2
+#define MPI_TYPECLASS_COMPLEX    3
+
+
+/*
+ * MPI API
+ */
+
+OMPI_DECLSPEC  int MPI_Abort(MPI_Comm comm, int errorcode);
+OMPI_DECLSPEC  int MPI_Accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
+                                  int target_rank, MPI_Aint target_disp, int target_count,
+                                  MPI_Datatype target_datatype, MPI_Op op, MPI_Win win); 
+OMPI_DECLSPEC  int MPI_Add_error_class(int *errorclass);
+OMPI_DECLSPEC  int MPI_Add_error_code(int errorclass, int *errorcode);
+OMPI_DECLSPEC  int MPI_Add_error_string(int errorcode, char *string);
+OMPI_DECLSPEC  int MPI_Address(void *location, MPI_Aint *address);
+OMPI_DECLSPEC  int MPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                 void *recvbuf, int recvcount, 
+                                 MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                  void *recvbuf, int *recvcounts, 
+                                  int *displs, MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, 
+                                 void *baseptr);
+OMPI_DECLSPEC  int MPI_Allreduce(void *sendbuf, void *recvbuf, int count, 
+                                 MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); 
+OMPI_DECLSPEC  int MPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                void *recvbuf, int recvcount, 
+                                MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls, 
+                                 MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
+                                 int *rdispls, MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes, 
+                                 void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes,
+                                 MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Attr_delete(MPI_Comm comm, int keyval);
+OMPI_DECLSPEC  int MPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag);
+OMPI_DECLSPEC  int MPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val);
+OMPI_DECLSPEC  int MPI_Barrier(MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Bcast(void *buffer, int count, MPI_Datatype datatype, 
+                             int root, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Bsend(void *buf, int count, MPI_Datatype datatype, 
+                             int dest, int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Bsend_init(void *buf, int count, MPI_Datatype datatype, 
+                                  int dest, int tag, MPI_Comm comm, MPI_Request *request); 
+OMPI_DECLSPEC  int MPI_Buffer_attach(void *buffer, int size);
+OMPI_DECLSPEC  int MPI_Buffer_detach(void *buffer, int *size);
+OMPI_DECLSPEC  int MPI_Cancel(MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int *coords);
+OMPI_DECLSPEC  int MPI_Cart_create(MPI_Comm old_comm, int ndims, int *dims, 
+                                   int *periods, int reorder, MPI_Comm *comm_cart);
+OMPI_DECLSPEC  int MPI_Cart_get(MPI_Comm comm, int maxdims, int *dims, 
+                                int *periods, int *coords);
+OMPI_DECLSPEC  int MPI_Cart_map(MPI_Comm comm, int ndims, int *dims, 
+                                int *periods, int *newrank);
+OMPI_DECLSPEC  int MPI_Cart_rank(MPI_Comm comm, int *coords, int *rank);
+OMPI_DECLSPEC  int MPI_Cart_shift(MPI_Comm comm, int direction, int disp, 
+                                  int *rank_source, int *rank_dest);
+OMPI_DECLSPEC  int MPI_Cart_sub(MPI_Comm comm, int *remain_dims, MPI_Comm *new_comm);
+OMPI_DECLSPEC  int MPI_Cartdim_get(MPI_Comm comm, int *ndims);
+OMPI_DECLSPEC  int MPI_Close_port(char *port_name);
+OMPI_DECLSPEC  int MPI_Comm_accept(char *port_name, MPI_Info info, int root, 
+                                   MPI_Comm comm, MPI_Comm *newcomm);
+OMPI_DECLSPEC  MPI_Fint MPI_Comm_c2f(MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Comm_call_errhandler(MPI_Comm comm, int errorcode);
+OMPI_DECLSPEC  int MPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result);
+OMPI_DECLSPEC  int MPI_Comm_connect(char *port_name, MPI_Info info, int root, 
+                                    MPI_Comm comm, MPI_Comm *newcomm);
+OMPI_DECLSPEC  int MPI_Comm_create_errhandler(MPI_Comm_errhandler_fn *function, 
+                                              MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int MPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn, 
+                                          MPI_Comm_delete_attr_function *comm_delete_attr_fn, 
+                                          int *comm_keyval, void *extra_state);
+OMPI_DECLSPEC  int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm);
+OMPI_DECLSPEC  int MPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval);
+OMPI_DECLSPEC  int MPI_Comm_disconnect(MPI_Comm *comm);
+OMPI_DECLSPEC  int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm);
+OMPI_DECLSPEC  MPI_Comm MPI_Comm_f2c(MPI_Fint comm);
+OMPI_DECLSPEC  int MPI_Comm_free_keyval(int *comm_keyval);
+OMPI_DECLSPEC  int MPI_Comm_free(MPI_Comm *comm);
+OMPI_DECLSPEC  int MPI_Comm_get_attr(MPI_Comm comm, int comm_keyval, 
+                                     void *attribute_val, int *flag);
+OMPI_DECLSPEC  int MPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler *erhandler);
+OMPI_DECLSPEC  int MPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen);
+OMPI_DECLSPEC  int MPI_Comm_get_parent(MPI_Comm *parent);
+OMPI_DECLSPEC  int MPI_Comm_group(MPI_Comm comm, MPI_Group *group);
+OMPI_DECLSPEC  int MPI_Comm_join(int fd, MPI_Comm *intercomm);
+OMPI_DECLSPEC  int MPI_Comm_rank(MPI_Comm comm, int *rank);
+OMPI_DECLSPEC  int MPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group);
+OMPI_DECLSPEC  int MPI_Comm_remote_size(MPI_Comm comm, int *size);
+OMPI_DECLSPEC  int MPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val);
+OMPI_DECLSPEC  int MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int MPI_Comm_set_name(MPI_Comm comm, char *comm_name);
+OMPI_DECLSPEC  int MPI_Comm_size(MPI_Comm comm, int *size);
+OMPI_DECLSPEC  int MPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info, 
+                                  int root, MPI_Comm comm, MPI_Comm *intercomm, 
+                                  int *array_of_errcodes);
+OMPI_DECLSPEC  int MPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_of_argv, 
+                                           int *array_of_maxprocs, MPI_Info *array_of_info, 
+                                           int root, MPI_Comm comm, MPI_Comm *intercomm, 
+                                           int *array_of_errcodes);
+OMPI_DECLSPEC  int MPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm);
+OMPI_DECLSPEC  int MPI_Comm_test_inter(MPI_Comm comm, int *flag);
+OMPI_DECLSPEC  int MPI_Dims_create(int nnodes, int ndims, int *dims);
+OMPI_DECLSPEC  MPI_Fint MPI_Errhandler_c2f(MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int MPI_Errhandler_create(MPI_Handler_function *function, 
+                                         MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  MPI_Errhandler MPI_Errhandler_f2c(MPI_Fint errhandler);
+OMPI_DECLSPEC  int MPI_Errhandler_free(MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int MPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int MPI_Error_class(int errorcode, int *errorclass);
+OMPI_DECLSPEC  int MPI_Error_string(int errorcode, char *string, int *resultlen);
+OMPI_DECLSPEC  int MPI_Exscan(void *sendbuf, void *recvbuf, int count, 
+                              MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+#if OMPI_PROVIDE_MPI_FILE_INTERFACE
+OMPI_DECLSPEC  MPI_Fint MPI_File_c2f(MPI_File file);
+OMPI_DECLSPEC  MPI_File MPI_File_f2c(MPI_Fint file);
+OMPI_DECLSPEC  int MPI_File_call_errhandler(MPI_File fh, int errorcode);
+OMPI_DECLSPEC  int MPI_File_create_errhandler(MPI_File_errhandler_fn *function,
+                                              MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int MPI_File_set_errhandler( MPI_File file, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int MPI_File_get_errhandler( MPI_File file, MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int MPI_File_open(MPI_Comm comm, char *filename, int amode,
+                                 MPI_Info info, MPI_File *fh);
+OMPI_DECLSPEC  int MPI_File_close(MPI_File *fh);
+OMPI_DECLSPEC  int MPI_File_delete(char *filename, MPI_Info info);
+OMPI_DECLSPEC  int MPI_File_set_size(MPI_File fh, MPI_Offset size);
+OMPI_DECLSPEC  int MPI_File_preallocate(MPI_File fh, MPI_Offset size);
+OMPI_DECLSPEC  int MPI_File_get_size(MPI_File fh, MPI_Offset *size);
+OMPI_DECLSPEC  int MPI_File_get_group(MPI_File fh, MPI_Group *group);
+OMPI_DECLSPEC  int MPI_File_get_amode(MPI_File fh, int *amode);
+OMPI_DECLSPEC  int MPI_File_set_info(MPI_File fh, MPI_Info info);
+OMPI_DECLSPEC  int MPI_File_get_info(MPI_File fh, MPI_Info *info_used);
+OMPI_DECLSPEC  int MPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+                                     MPI_Datatype filetype, char *datarep, MPI_Info info);
+OMPI_DECLSPEC  int MPI_File_get_view(MPI_File fh, MPI_Offset *disp,
+                                     MPI_Datatype *etype, 
+                                     MPI_Datatype *filetype, char *datarep);
+OMPI_DECLSPEC  int MPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                    int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,
+                                        int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                     int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf,
+                                         int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_iread_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                     int count, MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                      int count, MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_File_read(MPI_File fh, void *buf, int count,
+                                 MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_read_all(MPI_File fh, void *buf, int count,
+                                     MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write(MPI_File fh, void *buf, int count,
+                                  MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_all(MPI_File fh, void *buf, int count,
+                                      MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_iread(MPI_File fh, void *buf, int count,
+                                  MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_File_iwrite(MPI_File fh, void *buf, int count,
+                                   MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_File_seek(MPI_File fh, MPI_Offset offset, int whence);
+OMPI_DECLSPEC  int MPI_File_get_position(MPI_File fh, MPI_Offset *offset);
+OMPI_DECLSPEC  int MPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset,
+                                            MPI_Offset *disp);
+OMPI_DECLSPEC  int MPI_File_read_shared(MPI_File fh, void *buf, int count,
+                                        MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_shared(MPI_File fh, void *buf, int count,
+					 MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_iread_shared(MPI_File fh, void *buf, int count,
+                                         MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_File_iwrite_shared(MPI_File fh, void *buf, int count,
+                                          MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_File_read_ordered(MPI_File fh, void *buf, int count,
+                                         MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_ordered(MPI_File fh, void *buf, int count,
+                                          MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence);
+OMPI_DECLSPEC  int MPI_File_get_position_shared(MPI_File fh, MPI_Offset *offset);
+OMPI_DECLSPEC  int MPI_File_read_at_all_begin(MPI_File fh, MPI_Offset offset, void *buf,
+                                              int count, MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_File_read_at_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, void *buf,
+                                               int count, MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_File_write_at_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_read_all_begin(MPI_File fh, void *buf, int count,
+                                           MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_File_read_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_all_begin(MPI_File fh, void *buf, int count,
+                                            MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_File_write_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_read_ordered_begin(MPI_File fh, void *buf, int count,
+                                               MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_File_read_ordered_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_write_ordered_begin(MPI_File fh, void *buf, int count,
+                                                MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_File_write_ordered_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_File_get_type_extent(MPI_File fh, MPI_Datatype datatype,
+                                            MPI_Aint *extent);
+OMPI_DECLSPEC  int MPI_File_set_atomicity(MPI_File fh, int flag);
+OMPI_DECLSPEC  int MPI_File_get_atomicity(MPI_File fh, int *flag);
+OMPI_DECLSPEC  int MPI_File_sync(MPI_File fh);
+#endif /* #if OMPI_PROVIDE_MPI_FILE_INTERFACE */
+OMPI_DECLSPEC  int MPI_Finalize(void);
+OMPI_DECLSPEC  int MPI_Finalized(int *flag);
+OMPI_DECLSPEC  int MPI_Free_mem(void *base);
+OMPI_DECLSPEC  int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                              void *recvbuf, int recvcount, MPI_Datatype recvtype, 
+                              int root, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                               void *recvbuf, int *recvcounts, int *displs, 
+                               MPI_Datatype recvtype, int root, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Get_address(void *location, MPI_Aint *address);
+OMPI_DECLSPEC  int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count);
+OMPI_DECLSPEC  int MPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, int *count);
+OMPI_DECLSPEC  int MPI_Get(void *origin_addr, int origin_count, 
+                           MPI_Datatype origin_datatype, int target_rank, 
+                           MPI_Aint target_disp, int target_count, 
+                           MPI_Datatype target_datatype, MPI_Win win);
+OMPI_DECLSPEC  int MPI_Get_processor_name(char *name, int *resultlen);
+OMPI_DECLSPEC  int MPI_Get_version(int *version, int *subversion);
+OMPI_DECLSPEC  int MPI_Graph_create(MPI_Comm comm_old, int nnodes, int *index, 
+                                    int *edges, int reorder, MPI_Comm *comm_graph);
+OMPI_DECLSPEC  int MPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, 
+                                 int *index, int *edges);
+OMPI_DECLSPEC  int MPI_Graph_map(MPI_Comm comm, int nnodes, int *index, int *edges, 
+                                 int *newrank);
+OMPI_DECLSPEC  int MPI_Graph_neighbors_count(MPI_Comm comm, int rank, int *nneighbors);
+OMPI_DECLSPEC  int MPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, 
+                                       int *neighbors);
+OMPI_DECLSPEC  int MPI_Graphdims_get(MPI_Comm comm, int *nnodes, int *nedges);
+OMPI_DECLSPEC  int MPI_Grequest_complete(MPI_Request request);
+OMPI_DECLSPEC  int MPI_Grequest_start(MPI_Grequest_query_function *query_fn,
+                                      MPI_Grequest_free_function *free_fn,
+                                      MPI_Grequest_cancel_function *cancel_fn,
+                                      void *extra_state, MPI_Request *request);
+OMPI_DECLSPEC  MPI_Fint MPI_Group_c2f(MPI_Group group);
+OMPI_DECLSPEC  int MPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result);
+OMPI_DECLSPEC  int MPI_Group_difference(MPI_Group group1, MPI_Group group2, 
+                                        MPI_Group *newgroup);
+OMPI_DECLSPEC  int MPI_Group_excl(MPI_Group group, int n, int *ranks, 
+                                  MPI_Group *newgroup);
+OMPI_DECLSPEC  MPI_Group MPI_Group_f2c(MPI_Fint group);
+OMPI_DECLSPEC  int MPI_Group_free(MPI_Group *group);
+OMPI_DECLSPEC  int MPI_Group_incl(MPI_Group group, int n, int *ranks, 
+                                  MPI_Group *newgroup);
+OMPI_DECLSPEC  int MPI_Group_intersection(MPI_Group group1, MPI_Group group2, 
+                                          MPI_Group *newgroup);
+OMPI_DECLSPEC  int MPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], 
+                                        MPI_Group *newgroup);
+OMPI_DECLSPEC  int MPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], 
+                                        MPI_Group *newgroup);
+OMPI_DECLSPEC  int MPI_Group_rank(MPI_Group group, int *rank);
+OMPI_DECLSPEC  int MPI_Group_size(MPI_Group group, int *size);
+OMPI_DECLSPEC  int MPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, 
+                                             MPI_Group group2, int *ranks2);
+OMPI_DECLSPEC  int MPI_Group_union(MPI_Group group1, MPI_Group group2, 
+                                   MPI_Group *newgroup);
+OMPI_DECLSPEC  int MPI_Ibsend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                              int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  MPI_Fint MPI_Info_c2f(MPI_Info info);
+OMPI_DECLSPEC  int MPI_Info_create(MPI_Info *info);
+OMPI_DECLSPEC  int MPI_Info_delete(MPI_Info info, char *key);
+OMPI_DECLSPEC  int MPI_Info_dup(MPI_Info info, MPI_Info *newinfo);
+OMPI_DECLSPEC  MPI_Info MPI_Info_f2c(MPI_Fint info);
+OMPI_DECLSPEC  int MPI_Info_free(MPI_Info *info);
+OMPI_DECLSPEC  int MPI_Info_get(MPI_Info info, char *key, int valuelen, 
+                                char *value, int *flag);
+OMPI_DECLSPEC  int MPI_Info_get_nkeys(MPI_Info info, int *nkeys);
+OMPI_DECLSPEC  int MPI_Info_get_nthkey(MPI_Info info, int n, char *key);
+OMPI_DECLSPEC  int MPI_Info_get_valuelen(MPI_Info info, char *key, int *valuelen, 
+                                         int *flag);
+OMPI_DECLSPEC  int MPI_Info_set(MPI_Info info, char *key, char *value);
+OMPI_DECLSPEC  int MPI_Init(int *argc, char ***argv);
+OMPI_DECLSPEC  int MPI_Initialized(int *flag);
+OMPI_DECLSPEC  int MPI_Init_thread(int *argc, char ***argv, int required, 
+                                   int *provided);
+OMPI_DECLSPEC  int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, 
+                                        MPI_Comm bridge_comm, int remote_leader, 
+                                        int tag, MPI_Comm *newintercomm);
+OMPI_DECLSPEC  int MPI_Intercomm_merge(MPI_Comm intercomm, int high, 
+                                       MPI_Comm *newintercomm);
+OMPI_DECLSPEC  int MPI_Iprobe(int source, int tag, MPI_Comm comm, int *flag, 
+                              MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, 
+                             int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Irsend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                              int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                             int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Issend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                              int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Is_thread_main(int *flag);
+OMPI_DECLSPEC  int MPI_Keyval_create(MPI_Copy_function *copy_fn, 
+                                     MPI_Delete_function *delete_fn, 
+                                     int *keyval, void *extra_state);
+OMPI_DECLSPEC  int MPI_Keyval_free(int *keyval);
+OMPI_DECLSPEC  int MPI_Lookup_name(char *service_name, MPI_Info info, char *port_name);
+OMPI_DECLSPEC  MPI_Fint MPI_Op_c2f(MPI_Op op); 
+OMPI_DECLSPEC  int MPI_Op_create(MPI_User_function *function, int commute, MPI_Op *op);
+OMPI_DECLSPEC  int MPI_Open_port(MPI_Info info, char *port_name);
+OMPI_DECLSPEC  MPI_Op MPI_Op_f2c(MPI_Fint op);
+OMPI_DECLSPEC  int MPI_Op_free(MPI_Op *op);
+OMPI_DECLSPEC  int MPI_Pack_external(char *datarep, void *inbuf, int incount,
+                                     MPI_Datatype datatype, void *outbuf,
+                                     MPI_Aint outsize, MPI_Aint *position);
+OMPI_DECLSPEC  int MPI_Pack_external_size(char *datarep, int incount, 
+                                          MPI_Datatype datatype, MPI_Aint *size);
+OMPI_DECLSPEC  int MPI_Pack(void *inbuf, int incount, MPI_Datatype datatype, 
+                            void *outbuf, int outsize, int *position, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, 
+                                 int *size);
+OMPI_DECLSPEC  int MPI_Pcontrol(const int level, ...);
+OMPI_DECLSPEC  int MPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Publish_name(char *service_name, MPI_Info info, 
+                                    char *port_name);
+OMPI_DECLSPEC  int MPI_Put(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, 
+                           int target_rank, MPI_Aint target_disp, int target_count, 
+                           MPI_Datatype target_datatype, MPI_Win win);
+OMPI_DECLSPEC  int MPI_Query_thread(int *provided);
+OMPI_DECLSPEC  int MPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int source,
+                                 int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, 
+                            int tag, MPI_Comm comm, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Reduce(void *sendbuf, void *recvbuf, int count, 
+                              MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, 
+                                      MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Register_datarep(char *datarep, 
+                                        MPI_Datarep_conversion_function *read_conversion_fn,
+                                        MPI_Datarep_conversion_function *write_conversion_fn,
+                                        MPI_Datarep_extent_function *dtype_file_extent_fn,
+                                        void *extra_state);
+OMPI_DECLSPEC  MPI_Fint MPI_Request_c2f(MPI_Request request);
+OMPI_DECLSPEC  MPI_Request MPI_Request_f2c(MPI_Fint request);
+OMPI_DECLSPEC  int MPI_Request_free(MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Request_get_status(MPI_Request request, int *flag, 
+                                          MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Rsend(void *ibuf, int count, MPI_Datatype datatype, int dest, 
+                             int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Rsend_init(void *buf, int count, MPI_Datatype datatype, 
+                                  int dest, int tag, MPI_Comm comm, 
+                                  MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Scan(void *sendbuf, void *recvbuf, int count, 
+                            MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                               void *recvbuf, int recvcount, MPI_Datatype recvtype, 
+                               int root, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, 
+                                MPI_Datatype sendtype, void *recvbuf, int recvcount, 
+                                MPI_Datatype recvtype, int root, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Send_init(void *buf, int count, MPI_Datatype datatype, 
+                                 int dest, int tag, MPI_Comm comm, 
+                                 MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dest, 
+                            int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                int dest, int sendtag, void *recvbuf, int recvcount,
+                                MPI_Datatype recvtype, int source, int recvtag, 
+                                MPI_Comm comm,  MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Sendrecv_replace(void * buf, int count, MPI_Datatype datatype, 
+                                        int dest, int sendtag, int source, int recvtag,
+                                        MPI_Comm comm, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Ssend_init(void *buf, int count, MPI_Datatype datatype, 
+                                  int dest, int tag, MPI_Comm comm, 
+                                  MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Ssend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                             int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Start(MPI_Request *request);
+OMPI_DECLSPEC  int MPI_Startall(int count, MPI_Request *array_of_requests);
+OMPI_DECLSPEC  int MPI_Status_c2f(MPI_Status *c_status, MPI_Fint *f_status);
+OMPI_DECLSPEC  int MPI_Status_f2c(MPI_Fint *f_status, MPI_Status *c_status);
+OMPI_DECLSPEC  int MPI_Status_set_cancelled(MPI_Status *status, int flag);
+OMPI_DECLSPEC  int MPI_Status_set_elements(MPI_Status *status, MPI_Datatype datatype,
+                                           int count);
+OMPI_DECLSPEC  int MPI_Testall(int count, MPI_Request array_of_requests[], int *flag, 
+                               MPI_Status array_of_statuses[]);
+OMPI_DECLSPEC  int MPI_Testany(int count, MPI_Request array_of_requests[], int *index, 
+                               int *flag, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Test(MPI_Request *request, int *flag, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Test_cancelled(MPI_Status *status, int *flag);
+OMPI_DECLSPEC  int MPI_Testsome(int incount, MPI_Request array_of_requests[], 
+                                int *outcount, int array_of_indices[], 
+                                MPI_Status array_of_statuses[]);
+OMPI_DECLSPEC  int MPI_Topo_test(MPI_Comm comm, int *status);
+OMPI_DECLSPEC  MPI_Fint MPI_Type_c2f(MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_Type_commit(MPI_Datatype *type);
+OMPI_DECLSPEC  int MPI_Type_contiguous(int count, MPI_Datatype oldtype, 
+                                       MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_darray(int size, int rank, int ndims, 
+                                          int gsize_array[], int distrib_array[], 
+                                          int darg_array[], int psize_array[],
+                                          int order, MPI_Datatype oldtype, 
+                                          MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_f90_complex(int p, int r, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_f90_integer(int r, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_f90_real(int p, int r, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_hindexed(int count, int array_of_blocklengths[], 
+                                            MPI_Aint array_of_displacements[], 
+                                            MPI_Datatype oldtype, 
+                                            MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_hvector(int count, int blocklength, MPI_Aint stride, 
+                                           MPI_Datatype oldtype, 
+                                           MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_keyval(MPI_Type_copy_attr_function *type_copy_attr_fn, 
+                                          MPI_Type_delete_attr_function *type_delete_attr_fn, 
+                                          int *type_keyval, void *extra_state);
+OMPI_DECLSPEC  int MPI_Type_create_indexed_block(int count, int blocklength,
+                                                 int array_of_displacements[],
+                                                 MPI_Datatype oldtype,
+                                                 MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_struct(int count, int array_of_block_lengths[], 
+                                          MPI_Aint array_of_displacements[], 
+                                          MPI_Datatype array_of_types[], 
+                                          MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_subarray(int ndims, int size_array[], int subsize_array[], 
+                                            int start_array[], int order, 
+                                            MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_create_resized(MPI_Datatype oldtype, MPI_Aint lb, 
+                                           MPI_Aint extent, MPI_Datatype *newtype); 
+OMPI_DECLSPEC  int MPI_Type_delete_attr(MPI_Datatype type, int type_keyval);
+OMPI_DECLSPEC  int MPI_Type_dup(MPI_Datatype type, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_extent(MPI_Datatype type, MPI_Aint *extent);
+OMPI_DECLSPEC  int MPI_Type_free(MPI_Datatype *type);
+OMPI_DECLSPEC  int MPI_Type_free_keyval(int *type_keyval);
+OMPI_DECLSPEC  MPI_Datatype MPI_Type_f2c(MPI_Fint datatype);
+OMPI_DECLSPEC  int MPI_Type_get_attr(MPI_Datatype type, int type_keyval, 
+                                     void *attribute_val, int *flag);
+OMPI_DECLSPEC  int MPI_Type_get_contents(MPI_Datatype mtype, int max_integers, 
+                                         int max_addresses, int max_datatypes, 
+                                         int array_of_integers[], 
+                                         MPI_Aint array_of_addresses[], 
+                                         MPI_Datatype array_of_datatypes[]);
+OMPI_DECLSPEC  int MPI_Type_get_envelope(MPI_Datatype type, int *num_integers, 
+                                         int *num_addresses, int *num_datatypes, 
+                                         int *combiner);
+OMPI_DECLSPEC  int MPI_Type_get_extent(MPI_Datatype type, MPI_Aint *lb, 
+                                       MPI_Aint *extent);
+OMPI_DECLSPEC  int MPI_Type_get_name(MPI_Datatype type, char *type_name, 
+                                     int *resultlen);
+OMPI_DECLSPEC  int MPI_Type_get_true_extent(MPI_Datatype datatype, MPI_Aint *true_lb, 
+                                            MPI_Aint *true_extent);
+OMPI_DECLSPEC  int MPI_Type_hindexed(int count, int array_of_blocklengths[], 
+                                     MPI_Aint array_of_displacements[], 
+                                     MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_hvector(int count, int blocklength, MPI_Aint stride, 
+                                    MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_indexed(int count, int array_of_blocklengths[], 
+                                    int array_of_displacements[], 
+                                    MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_lb(MPI_Datatype type, MPI_Aint *lb);
+OMPI_DECLSPEC  int MPI_Type_match_size(int typeclass, int size, MPI_Datatype *type);
+OMPI_DECLSPEC  int MPI_Type_set_attr(MPI_Datatype type, int type_keyval, 
+                                     void *attr_val);
+OMPI_DECLSPEC  int MPI_Type_set_name(MPI_Datatype type, char *type_name);
+OMPI_DECLSPEC  int MPI_Type_size(MPI_Datatype type, int *size);
+OMPI_DECLSPEC  int MPI_Type_struct(int count, int array_of_blocklengths[], 
+                                   MPI_Aint array_of_displacements[], 
+                                   MPI_Datatype array_of_types[], 
+                                   MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Type_ub(MPI_Datatype mtype, MPI_Aint *ub);
+OMPI_DECLSPEC  int MPI_Type_vector(int count, int blocklength, int stride, 
+                                   MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int MPI_Unpack(void *inbuf, int insize, int *position, 
+                              void *outbuf, int outcount, MPI_Datatype datatype, 
+                              MPI_Comm comm);
+OMPI_DECLSPEC  int MPI_Unpublish_name(char *service_name, MPI_Info info, char *port_name);
+OMPI_DECLSPEC  int MPI_Unpack_external (char *datarep, void *inbuf, MPI_Aint insize,
+                                        MPI_Aint *position, void *outbuf, int outcount,
+                                        MPI_Datatype datatype);
+OMPI_DECLSPEC  int MPI_Waitall(int count, MPI_Request *array_of_requests, 
+                               MPI_Status *array_of_statuses);
+OMPI_DECLSPEC  int MPI_Waitany(int count, MPI_Request *array_of_requests, 
+                               int *index, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Wait(MPI_Request *request, MPI_Status *status);
+OMPI_DECLSPEC  int MPI_Waitsome(int incount, MPI_Request *array_of_requests, 
+                                int *outcount, int *array_of_indices, 
+                                MPI_Status *array_of_statuses);
+OMPI_DECLSPEC  MPI_Fint MPI_Win_c2f(MPI_Win win);
+OMPI_DECLSPEC  int MPI_Win_call_errhandler(MPI_Win win, int errorcode);
+OMPI_DECLSPEC  int MPI_Win_complete(MPI_Win win);
+OMPI_DECLSPEC  int MPI_Win_create(void *base, MPI_Aint size, int disp_unit, 
+                                  MPI_Info info, MPI_Comm comm, MPI_Win *win);
+OMPI_DECLSPEC  int MPI_Win_create_errhandler(MPI_Win_errhandler_fn *function, 
+                                             MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int MPI_Win_create_keyval(MPI_Win_copy_attr_function *win_copy_attr_fn, 
+                                         MPI_Win_delete_attr_function *win_delete_attr_fn, 
+                                         int *win_keyval, void *extra_state);
+OMPI_DECLSPEC  int MPI_Win_delete_attr(MPI_Win win, int win_keyval);
+OMPI_DECLSPEC  MPI_Win MPI_Win_f2c(MPI_Fint win);
+OMPI_DECLSPEC  int MPI_Win_fence(int assert, MPI_Win win);
+OMPI_DECLSPEC  int MPI_Win_free(MPI_Win *win);
+OMPI_DECLSPEC  int MPI_Win_free_keyval(int *win_keyval);
+OMPI_DECLSPEC  int MPI_Win_get_attr(MPI_Win win, int win_keyval, 
+                                    void *attribute_val, int *flag);
+OMPI_DECLSPEC  int MPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int MPI_Win_get_group(MPI_Win win, MPI_Group *group);
+OMPI_DECLSPEC  int MPI_Win_get_name(MPI_Win win, char *win_name, int *resultlen);
+OMPI_DECLSPEC  int MPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win);
+OMPI_DECLSPEC  int MPI_Win_post(MPI_Group group, int assert, MPI_Win win);
+OMPI_DECLSPEC  int MPI_Win_set_attr(MPI_Win win, int win_keyval, void *attribute_val);
+OMPI_DECLSPEC  int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int MPI_Win_set_name(MPI_Win win, char *win_name);
+OMPI_DECLSPEC  int MPI_Win_start(MPI_Group group, int assert, MPI_Win win);
+OMPI_DECLSPEC  int MPI_Win_test(MPI_Win win, int *flag);
+OMPI_DECLSPEC  int MPI_Win_unlock(int rank, MPI_Win win);
+OMPI_DECLSPEC  int MPI_Win_wait(MPI_Win win);
+OMPI_DECLSPEC  double MPI_Wtick(void);
+OMPI_DECLSPEC  double MPI_Wtime(void);
+
+
+  /*
+   * Profiling MPI API
+   */
+OMPI_DECLSPEC  int PMPI_Abort(MPI_Comm comm, int errorcode);
+OMPI_DECLSPEC  int PMPI_Accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype,
+                                   int target_rank, MPI_Aint target_disp, int target_count,
+                                   MPI_Datatype target_datatype, MPI_Op op, MPI_Win win); 
+OMPI_DECLSPEC  int PMPI_Add_error_class(int *errorclass);
+OMPI_DECLSPEC  int PMPI_Add_error_code(int errorclass, int *errorcode);
+OMPI_DECLSPEC  int PMPI_Add_error_string(int errorcode, char *string);
+OMPI_DECLSPEC  int PMPI_Address(void *location, MPI_Aint *address);
+OMPI_DECLSPEC  int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                  void *recvbuf, int recvcount, 
+                                  MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                   void *recvbuf, int *recvcounts, 
+                                   int *displs, MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Alloc_mem(MPI_Aint size, MPI_Info info, 
+                                  void *baseptr);
+OMPI_DECLSPEC  int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, 
+                                  MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); 
+OMPI_DECLSPEC  int PMPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                 void *recvbuf, int recvcount, 
+                                 MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Alltoallv(void *sendbuf, int *sendcounts, int *sdispls, 
+                                  MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
+                                  int *rdispls, MPI_Datatype recvtype, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Alltoallw(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendtypes, 
+                    void *recvbuf, int *recvcounts, int *rdispls, MPI_Datatype *recvtypes,
+                    MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Attr_delete(MPI_Comm comm, int keyval);
+OMPI_DECLSPEC  int PMPI_Attr_get(MPI_Comm comm, int keyval, void *attribute_val, int *flag);
+OMPI_DECLSPEC  int PMPI_Attr_put(MPI_Comm comm, int keyval, void *attribute_val);
+OMPI_DECLSPEC  int PMPI_Barrier(MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Bcast(void *buffer, int count, MPI_Datatype datatype, 
+                              int root, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Bsend(void *buf, int count, MPI_Datatype datatype, 
+                              int dest, int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Bsend_init(void *buf, int count, MPI_Datatype datatype, 
+                                   int dest, int tag, MPI_Comm comm, MPI_Request *request); 
+OMPI_DECLSPEC  int PMPI_Buffer_attach(void *buffer, int size);
+OMPI_DECLSPEC  int PMPI_Buffer_detach(void *buffer, int *size);
+OMPI_DECLSPEC  int PMPI_Cancel(MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int *coords);
+OMPI_DECLSPEC  int PMPI_Cart_create(MPI_Comm old_comm, int ndims, int *dims, 
+                                    int *periods, int reorder, MPI_Comm *comm_cart);
+OMPI_DECLSPEC  int PMPI_Cart_get(MPI_Comm comm, int maxdims, int *dims, 
+                                 int *periods, int *coords);
+OMPI_DECLSPEC  int PMPI_Cart_map(MPI_Comm comm, int ndims, int *dims, 
+                                 int *periods, int *newrank);
+OMPI_DECLSPEC  int PMPI_Cart_rank(MPI_Comm comm, int *coords, int *rank);
+OMPI_DECLSPEC  int PMPI_Cart_shift(MPI_Comm comm, int direction, int disp, 
+                                   int *rank_source, int *rank_dest);
+OMPI_DECLSPEC  int PMPI_Cart_sub(MPI_Comm comm, int *remain_dims, MPI_Comm *new_comm);
+OMPI_DECLSPEC  int PMPI_Cartdim_get(MPI_Comm comm, int *ndims);
+OMPI_DECLSPEC  int PMPI_Close_port(char *port_name);
+OMPI_DECLSPEC  int PMPI_Comm_accept(char *port_name, MPI_Info info, int root, 
+                                    MPI_Comm comm, MPI_Comm *newcomm);
+OMPI_DECLSPEC  MPI_Fint PMPI_Comm_c2f(MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Comm_call_errhandler(MPI_Comm comm, int errorcode);
+OMPI_DECLSPEC  int PMPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result);
+OMPI_DECLSPEC  int PMPI_Comm_connect(char *port_name, MPI_Info info, int root, 
+                                     MPI_Comm comm, MPI_Comm *newcomm);
+OMPI_DECLSPEC  int PMPI_Comm_create_errhandler(MPI_Comm_errhandler_fn *function, 
+                                               MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function *comm_copy_attr_fn, 
+                                           MPI_Comm_delete_attr_function *comm_delete_attr_fn, 
+                                           int *comm_keyval, void *extra_state);
+OMPI_DECLSPEC  int PMPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm);
+OMPI_DECLSPEC  int PMPI_Comm_delete_attr(MPI_Comm comm, int comm_keyval);
+OMPI_DECLSPEC  int PMPI_Comm_disconnect(MPI_Comm *comm);
+OMPI_DECLSPEC  int PMPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm);
+OMPI_DECLSPEC  MPI_Comm PMPI_Comm_f2c(MPI_Fint comm);
+OMPI_DECLSPEC  int PMPI_Comm_free_keyval(int *comm_keyval);
+OMPI_DECLSPEC  int PMPI_Comm_free(MPI_Comm *comm);
+OMPI_DECLSPEC  int PMPI_Comm_get_attr(MPI_Comm comm, int comm_keyval, 
+                                      void *attribute_val, int *flag);
+OMPI_DECLSPEC  int PMPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler *erhandler);
+OMPI_DECLSPEC  int PMPI_Comm_get_name(MPI_Comm comm, char *comm_name, int *resultlen);
+OMPI_DECLSPEC  int PMPI_Comm_get_parent(MPI_Comm *parent);
+OMPI_DECLSPEC  int PMPI_Comm_group(MPI_Comm comm, MPI_Group *group);
+OMPI_DECLSPEC  int PMPI_Comm_join(int fd, MPI_Comm *intercomm);
+OMPI_DECLSPEC  int PMPI_Comm_rank(MPI_Comm comm, int *rank);
+OMPI_DECLSPEC  int PMPI_Comm_remote_group(MPI_Comm comm, MPI_Group *group);
+OMPI_DECLSPEC  int PMPI_Comm_remote_size(MPI_Comm comm, int *size);
+OMPI_DECLSPEC  int PMPI_Comm_set_attr(MPI_Comm comm, int comm_keyval, void *attribute_val);
+OMPI_DECLSPEC  int PMPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int PMPI_Comm_set_name(MPI_Comm comm, char *comm_name);
+OMPI_DECLSPEC  int PMPI_Comm_size(MPI_Comm comm, int *size);
+OMPI_DECLSPEC  int PMPI_Comm_spawn(char *command, char **argv, int maxprocs, MPI_Info info, 
+                                   int root, MPI_Comm comm, MPI_Comm *intercomm, 
+                                   int *array_of_errcodes);
+OMPI_DECLSPEC  int PMPI_Comm_spawn_multiple(int count, char **array_of_commands, char ***array_of_argv, 
+                                            int *array_of_maxprocs, MPI_Info *array_of_info, 
+                                            int root, MPI_Comm comm, MPI_Comm *intercomm, 
+                                            int *array_of_errcodes);
+OMPI_DECLSPEC  int PMPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm *newcomm);
+OMPI_DECLSPEC  int PMPI_Comm_test_inter(MPI_Comm comm, int *flag);
+OMPI_DECLSPEC  int PMPI_Dims_create(int nnodes, int ndims, int *dims);
+OMPI_DECLSPEC  MPI_Fint PMPI_Errhandler_c2f(MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int PMPI_Errhandler_create(MPI_Handler_function *function,
+                                          MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  MPI_Errhandler PMPI_Errhandler_f2c(MPI_Fint errhandler);
+OMPI_DECLSPEC  int PMPI_Errhandler_free(MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int PMPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int PMPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int PMPI_Error_class(int errorcode, int *errorclass);
+OMPI_DECLSPEC  int PMPI_Error_string(int errorcode, char *string, int *resultlen);
+OMPI_DECLSPEC  int PMPI_Exscan(void *sendbuf, void *recvbuf, int count,
+                               MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+#if OMPI_PROVIDE_MPI_FILE_INTERFACE
+OMPI_DECLSPEC  MPI_Fint PMPI_File_c2f(MPI_File file);
+OMPI_DECLSPEC  MPI_File PMPI_File_f2c(MPI_Fint file);
+OMPI_DECLSPEC  int PMPI_File_call_errhandler(MPI_File fh, int errorcode);
+OMPI_DECLSPEC  int PMPI_File_create_errhandler(MPI_File_errhandler_fn *function,
+                                               MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int PMPI_File_set_errhandler( MPI_File file, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int PMPI_File_get_errhandler( MPI_File file, MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int PMPI_File_open(MPI_Comm comm, char *filename, int amode,
+                                  MPI_Info info, MPI_File *fh);
+OMPI_DECLSPEC  int PMPI_File_close(MPI_File *fh);
+OMPI_DECLSPEC  int PMPI_File_delete(char *filename, MPI_Info info);
+OMPI_DECLSPEC  int PMPI_File_set_size(MPI_File fh, MPI_Offset size);
+OMPI_DECLSPEC  int PMPI_File_preallocate(MPI_File fh, MPI_Offset size);
+OMPI_DECLSPEC  int PMPI_File_get_size(MPI_File fh, MPI_Offset *size);
+OMPI_DECLSPEC  int PMPI_File_get_group(MPI_File fh, MPI_Group *group);
+OMPI_DECLSPEC  int PMPI_File_get_amode(MPI_File fh, int *amode);
+OMPI_DECLSPEC  int PMPI_File_set_info(MPI_File fh, MPI_Info info);
+OMPI_DECLSPEC  int PMPI_File_get_info(MPI_File fh, MPI_Info *info_used);
+OMPI_DECLSPEC  int PMPI_File_set_view(MPI_File fh, MPI_Offset disp, MPI_Datatype etype,
+                                      MPI_Datatype filetype, char *datarep, MPI_Info info);
+OMPI_DECLSPEC  int PMPI_File_get_view(MPI_File fh, MPI_Offset *disp,
+                                      MPI_Datatype *etype, 
+                                      MPI_Datatype *filetype, char *datarep);
+OMPI_DECLSPEC  int PMPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                     int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf,
+                                         int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                      int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf,
+                                          int count, MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_iread_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                      int count, MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_File_iwrite_at(MPI_File fh, MPI_Offset offset, void *buf,
+                                       int count, MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_File_read(MPI_File fh, void *buf, int count,
+                                  MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_read_all(MPI_File fh, void *buf, int count,
+                                      MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write(MPI_File fh, void *buf, int count,
+                                   MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_all(MPI_File fh, void *buf, int count,
+                                       MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_iread(MPI_File fh, void *buf, int count,
+                                   MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_File_iwrite(MPI_File fh, void *buf, int count,
+                                    MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_File_seek(MPI_File fh, MPI_Offset offset, int whence);
+OMPI_DECLSPEC  int PMPI_File_get_position(MPI_File fh, MPI_Offset *offset);
+OMPI_DECLSPEC  int PMPI_File_get_byte_offset(MPI_File fh, MPI_Offset offset,
+                                             MPI_Offset *disp);
+OMPI_DECLSPEC  int PMPI_File_read_shared(MPI_File fh, void *buf, int count,
+                                         MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_shared(MPI_File fh, void *buf, int count,
+                                          MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_iread_shared(MPI_File fh, void *buf, int count,
+                                          MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_File_iwrite_shared(MPI_File fh, void *buf, int count,
+                                           MPI_Datatype datatype, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_File_read_ordered(MPI_File fh, void *buf, int count,
+                                          MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_ordered(MPI_File fh, void *buf, int count,
+                                           MPI_Datatype datatype, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence);
+OMPI_DECLSPEC  int PMPI_File_get_position_shared(MPI_File fh, MPI_Offset *offset);
+OMPI_DECLSPEC  int PMPI_File_read_at_all_begin(MPI_File fh, MPI_Offset offset, void *buf,
+                                               int count, MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_File_read_at_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_at_all_begin(MPI_File fh, MPI_Offset offset, void *buf,
+                                                int count, MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_File_write_at_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_read_all_begin(MPI_File fh, void *buf, int count,
+                                            MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_File_read_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_all_begin(MPI_File fh, void *buf, int count,
+                                             MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_File_write_all_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_read_ordered_begin(MPI_File fh, void *buf, int count,
+                                                MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_File_read_ordered_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_write_ordered_begin(MPI_File fh, void *buf, int count,
+                                                 MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_File_write_ordered_end(MPI_File fh, void *buf, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_File_get_type_extent(MPI_File fh, MPI_Datatype datatype,
+                                             MPI_Aint *extent);
+OMPI_DECLSPEC  int PMPI_File_set_atomicity(MPI_File fh, int flag);
+OMPI_DECLSPEC  int PMPI_File_get_atomicity(MPI_File fh, int *flag);
+OMPI_DECLSPEC  int PMPI_File_sync(MPI_File fh);
+#endif /* #if OMPI_PROVIDE_MPI_FILE_INTERFACE */
+OMPI_DECLSPEC  int PMPI_Finalize(void);
+OMPI_DECLSPEC  int PMPI_Finalized(int *flag);
+OMPI_DECLSPEC  int PMPI_Free_mem(void *base);
+OMPI_DECLSPEC  int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                               void *recvbuf, int recvcount, MPI_Datatype recvtype, 
+                               int root, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                void *recvbuf, int *recvcounts, int *displs, 
+                                MPI_Datatype recvtype, int root, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Get_address(void *location, MPI_Aint *address);
+OMPI_DECLSPEC  int PMPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count);
+OMPI_DECLSPEC  int PMPI_Get_elements(MPI_Status *status, MPI_Datatype datatype, 
+                                     int *count);
+OMPI_DECLSPEC  int PMPI_Get(void *origin_addr, int origin_count, 
+                            MPI_Datatype origin_datatype, int target_rank, 
+                            MPI_Aint target_disp, int target_count, 
+                            MPI_Datatype target_datatype, MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Get_processor_name(char *name, int *resultlen);
+OMPI_DECLSPEC  int PMPI_Get_version(int *version, int *subversion);
+OMPI_DECLSPEC  int PMPI_Graph_create(MPI_Comm comm_old, int nnodes, int *index, 
+                                     int *edges, int reorder, MPI_Comm *comm_graph);
+OMPI_DECLSPEC  int PMPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, 
+                                  int *index, int *edges);
+OMPI_DECLSPEC  int PMPI_Graph_map(MPI_Comm comm, int nnodes, int *index, int *edges, 
+                                  int *newrank);
+OMPI_DECLSPEC  int PMPI_Graph_neighbors_count(MPI_Comm comm, int rank, int *nneighbors);
+OMPI_DECLSPEC  int PMPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, 
+                                        int *neighbors);
+OMPI_DECLSPEC  int PMPI_Graphdims_get(MPI_Comm comm, int *nnodes, int *nedges);
+OMPI_DECLSPEC  int PMPI_Grequest_complete(MPI_Request request);
+OMPI_DECLSPEC  int PMPI_Grequest_start(MPI_Grequest_query_function *query_fn,
+                                       MPI_Grequest_free_function *free_fn,
+                                       MPI_Grequest_cancel_function *cancel_fn,
+                                       void *extra_state, MPI_Request *request);
+OMPI_DECLSPEC  MPI_Fint PMPI_Group_c2f(MPI_Group group);
+OMPI_DECLSPEC  int PMPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result);
+OMPI_DECLSPEC  int PMPI_Group_difference(MPI_Group group1, MPI_Group group2, 
+                                         MPI_Group *newgroup);
+OMPI_DECLSPEC  int PMPI_Group_excl(MPI_Group group, int n, int *ranks, 
+                                   MPI_Group *newgroup);
+OMPI_DECLSPEC  MPI_Group PMPI_Group_f2c(MPI_Fint group);
+OMPI_DECLSPEC  int PMPI_Group_free(MPI_Group *group);
+OMPI_DECLSPEC  int PMPI_Group_incl(MPI_Group group, int n, int *ranks, 
+                                   MPI_Group *newgroup);
+OMPI_DECLSPEC  int PMPI_Group_intersection(MPI_Group group1, MPI_Group group2, 
+                                           MPI_Group *newgroup);
+OMPI_DECLSPEC  int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], 
+                                         MPI_Group *newgroup);
+OMPI_DECLSPEC  int PMPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], 
+                                         MPI_Group *newgroup);
+OMPI_DECLSPEC  int PMPI_Group_rank(MPI_Group group, int *rank);
+OMPI_DECLSPEC  int PMPI_Group_size(MPI_Group group, int *size);
+OMPI_DECLSPEC  int PMPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, 
+                                              MPI_Group group2, int *ranks2);
+OMPI_DECLSPEC  int PMPI_Group_union(MPI_Group group1, MPI_Group group2, 
+                                    MPI_Group *newgroup);
+OMPI_DECLSPEC  int PMPI_Ibsend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                               int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  MPI_Fint PMPI_Info_c2f(MPI_Info info);
+OMPI_DECLSPEC  int PMPI_Info_create(MPI_Info *info);
+OMPI_DECLSPEC  int PMPI_Info_delete(MPI_Info info, char *key);
+OMPI_DECLSPEC  int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo);
+OMPI_DECLSPEC  MPI_Info PMPI_Info_f2c(MPI_Fint info);
+OMPI_DECLSPEC  int PMPI_Info_free(MPI_Info *info);
+OMPI_DECLSPEC  int PMPI_Info_get(MPI_Info info, char *key, int valuelen, 
+                                 char *value, int *flag);
+OMPI_DECLSPEC  int PMPI_Info_get_nkeys(MPI_Info info, int *nkeys);
+OMPI_DECLSPEC  int PMPI_Info_get_nthkey(MPI_Info info, int n, char *key);
+OMPI_DECLSPEC  int PMPI_Info_get_valuelen(MPI_Info info, char *key, int *valuelen, 
+                                          int *flag);
+OMPI_DECLSPEC  int PMPI_Info_set(MPI_Info info, char *key, char *value);
+OMPI_DECLSPEC  int PMPI_Init(int *argc, char ***argv);
+OMPI_DECLSPEC  int PMPI_Initialized(int *flag);
+OMPI_DECLSPEC  int PMPI_Init_thread(int *argc, char ***argv, int required, 
+                                    int *provided);
+OMPI_DECLSPEC  int PMPI_Intercomm_create(MPI_Comm local_comm, int local_leader, 
+                                         MPI_Comm bridge_comm, int remote_leader, 
+                                         int tag, MPI_Comm *newintercomm);
+OMPI_DECLSPEC  int PMPI_Intercomm_merge(MPI_Comm intercomm, int high, 
+                                        MPI_Comm *newintercomm);
+OMPI_DECLSPEC  int PMPI_Iprobe(int source, int tag, MPI_Comm comm, int *flag, 
+                               MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, 
+                              int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Irsend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                               int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                              int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Issend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                               int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Is_thread_main(int *flag);
+OMPI_DECLSPEC  int PMPI_Keyval_create(MPI_Copy_function *copy_fn, 
+                                      MPI_Delete_function *delete_fn, 
+                                      int *keyval, void *extra_state);
+OMPI_DECLSPEC  int PMPI_Keyval_free(int *keyval);
+OMPI_DECLSPEC  int PMPI_Lookup_name(char *service_name, MPI_Info info, char *port_name);
+OMPI_DECLSPEC  MPI_Fint PMPI_Op_c2f(MPI_Op op); 
+OMPI_DECLSPEC  int PMPI_Op_create(MPI_User_function *function, int commute, 
+                                  MPI_Op *op);
+OMPI_DECLSPEC  int PMPI_Open_port(MPI_Info info, char *port_name);
+OMPI_DECLSPEC  MPI_Op PMPI_Op_f2c(MPI_Fint op);
+OMPI_DECLSPEC  int PMPI_Op_free(MPI_Op *op);
+OMPI_DECLSPEC  int PMPI_Pack_external(char *datarep, void *inbuf, int incount,
+                                      MPI_Datatype datatype, void *outbuf,
+                                      MPI_Aint outsize, MPI_Aint *position);
+OMPI_DECLSPEC  int PMPI_Pack_external_size(char *datarep, int incount, 
+                                           MPI_Datatype datatype, MPI_Aint *size);
+OMPI_DECLSPEC  int PMPI_Pack(void *inbuf, int incount, MPI_Datatype datatype, 
+                             void *outbuf, int outsize, int *position, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, 
+                                  int *size);
+OMPI_DECLSPEC  int PMPI_Pcontrol(const int level, ...);
+OMPI_DECLSPEC  int PMPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Publish_name(char *service_name, MPI_Info info, 
+                                     char *port_name);
+OMPI_DECLSPEC  int PMPI_Put(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, 
+                            int target_rank, MPI_Aint target_disp, int target_count, 
+                            MPI_Datatype target_datatype, MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Query_thread(int *provided);
+OMPI_DECLSPEC  int PMPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int source,
+                                  int tag, MPI_Comm comm, MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, 
+                             int tag, MPI_Comm comm, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, 
+                               MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, 
+                                       MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Register_datarep(char *datarep, 
+                                         MPI_Datarep_conversion_function *read_conversion_fn,
+                                         MPI_Datarep_conversion_function *write_conversion_fn,
+                                         MPI_Datarep_extent_function *dtype_file_extent_fn,
+                                         void *extra_state);
+OMPI_DECLSPEC  MPI_Fint PMPI_Request_c2f(MPI_Request request);
+OMPI_DECLSPEC  MPI_Request PMPI_Request_f2c(MPI_Fint request);
+OMPI_DECLSPEC  int PMPI_Request_free(MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Request_get_status(MPI_Request request, int *flag, 
+                                           MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Rsend(void *ibuf, int count, MPI_Datatype datatype, int dest, 
+                              int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Rsend_init(void *buf, int count, MPI_Datatype datatype, 
+                                   int dest, int tag, MPI_Comm comm, 
+                                   MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Scan(void *sendbuf, void *recvbuf, int count, 
+                             MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                void *recvbuf, int recvcount, MPI_Datatype recvtype, 
+                                int root, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, 
+                                 MPI_Datatype sendtype, void *recvbuf, int recvcount, 
+                                 MPI_Datatype recvtype, int root, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Send_init(void *buf, int count, MPI_Datatype datatype, 
+                                  int dest, int tag, MPI_Comm comm, 
+                                  MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dest, 
+                             int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, 
+                                 int dest, int sendtag, void *recvbuf, int recvcount,
+                                 MPI_Datatype recvtype, int source, int recvtag, 
+                                 MPI_Comm comm,  MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Sendrecv_replace(void * buf, int count, MPI_Datatype datatype, 
+                                         int dest, int sendtag, int source, int recvtag,
+                                         MPI_Comm comm, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Ssend_init(void *buf, int count, MPI_Datatype datatype, 
+                                   int dest, int tag, MPI_Comm comm, 
+                                   MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Ssend(void *buf, int count, MPI_Datatype datatype, int dest, 
+                              int tag, MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Start(MPI_Request *request);
+OMPI_DECLSPEC  int PMPI_Startall(int count, MPI_Request *array_of_requests);
+OMPI_DECLSPEC  int PMPI_Status_c2f(MPI_Status *c_status, MPI_Fint *f_status);
+OMPI_DECLSPEC  int PMPI_Status_f2c(MPI_Fint *f_status, MPI_Status *c_status);
+OMPI_DECLSPEC  int PMPI_Status_set_cancelled(MPI_Status *status, int flag);
+OMPI_DECLSPEC  int PMPI_Status_set_elements(MPI_Status *status, MPI_Datatype datatype,
+                                            int count);
+OMPI_DECLSPEC  int PMPI_Testall(int count, MPI_Request array_of_requests[], int *flag, 
+                                MPI_Status array_of_statuses[]);
+OMPI_DECLSPEC  int PMPI_Testany(int count, MPI_Request array_of_requests[], int *index, int *flag, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Test(MPI_Request *request, int *flag, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Test_cancelled(MPI_Status *status, int *flag);
+OMPI_DECLSPEC  int PMPI_Testsome(int incount, MPI_Request array_of_requests[], 
+                                 int *outcount, int array_of_indices[], 
+                                 MPI_Status array_of_statuses[]);
+OMPI_DECLSPEC  int PMPI_Topo_test(MPI_Comm comm, int *status);
+OMPI_DECLSPEC  MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_Type_commit(MPI_Datatype *type);
+OMPI_DECLSPEC  int PMPI_Type_contiguous(int count, MPI_Datatype oldtype, 
+                                        MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_darray(int size, int rank, int ndims, 
+                                           int gsize_array[], int distrib_array[], 
+                                           int darg_array[], int psize_array[],
+                                           int order, MPI_Datatype oldtype, 
+                                           MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_f90_complex(int p, int r, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_f90_integer(int r, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_f90_real(int p, int r, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_hindexed(int count, int array_of_blocklengths[], 
+                                             MPI_Aint array_of_displacements[], 
+                                             MPI_Datatype oldtype, 
+                                             MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_hvector(int count, int blocklength, MPI_Aint stride, 
+                                            MPI_Datatype oldtype, 
+                                            MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_keyval(MPI_Type_copy_attr_function *type_copy_attr_fn, 
+                                           MPI_Type_delete_attr_function *type_delete_attr_fn, 
+                                           int *type_keyval, void *extra_state);
+OMPI_DECLSPEC  int PMPI_Type_create_indexed_block(int count, int blocklength,
+                                                  int array_of_displacements[],
+                                                  MPI_Datatype oldtype,
+                                                  MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_struct(int count, int array_of_block_lengths[], 
+                                           MPI_Aint array_of_displacements[], 
+                                           MPI_Datatype array_of_types[], 
+                                           MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_subarray(int ndims, int size_array[], int subsize_array[], 
+                                             int start_array[], int order, 
+                                             MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_create_resized(MPI_Datatype oldtype, MPI_Aint lb, 
+                                            MPI_Aint extent, MPI_Datatype *newtype); 
+OMPI_DECLSPEC  int PMPI_Type_delete_attr(MPI_Datatype type, int type_keyval);
+OMPI_DECLSPEC  int PMPI_Type_dup(MPI_Datatype type, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_extent(MPI_Datatype type, MPI_Aint *extent);
+OMPI_DECLSPEC  int PMPI_Type_free(MPI_Datatype *type);
+OMPI_DECLSPEC  int PMPI_Type_free_keyval(int *type_keyval);
+OMPI_DECLSPEC  MPI_Datatype PMPI_Type_f2c(MPI_Fint datatype);
+OMPI_DECLSPEC  int PMPI_Type_get_attr(MPI_Datatype type, int type_keyval, 
+                                      void *attribute_val, int *flag);
+OMPI_DECLSPEC  int PMPI_Type_get_contents(MPI_Datatype mtype, int max_integers, 
+                                          int max_addresses, int max_datatypes, 
+                                          int array_of_integers[], 
+                                          MPI_Aint array_of_addresses[], 
+                                          MPI_Datatype array_of_datatypes[]);
+OMPI_DECLSPEC  int PMPI_Type_get_envelope(MPI_Datatype type, int *num_integers, 
+                                          int *num_addresses, int *num_datatypes, 
+                                          int *combiner);
+OMPI_DECLSPEC  int PMPI_Type_get_extent(MPI_Datatype type, MPI_Aint *lb, 
+                                        MPI_Aint *extent);
+OMPI_DECLSPEC  int PMPI_Type_get_name(MPI_Datatype type, char *type_name, 
+                                      int *resultlen);
+OMPI_DECLSPEC  int PMPI_Type_get_true_extent(MPI_Datatype datatype, MPI_Aint *true_lb, 
+                                             MPI_Aint *true_extent);
+OMPI_DECLSPEC  int PMPI_Type_hindexed(int count, int array_of_blocklengths[], 
+                                      MPI_Aint array_of_displacements[], 
+                                      MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_hvector(int count, int blocklength, MPI_Aint stride, 
+                                     MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_indexed(int count, int array_of_blocklengths[], 
+                                     int array_of_displacements[], 
+                                     MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_lb(MPI_Datatype type, MPI_Aint *lb);
+OMPI_DECLSPEC  int PMPI_Type_match_size(int typeclass, int size, MPI_Datatype *type);
+OMPI_DECLSPEC  int PMPI_Type_set_attr(MPI_Datatype type, int type_keyval, 
+                                      void *attr_val);
+OMPI_DECLSPEC  int PMPI_Type_set_name(MPI_Datatype type, char *type_name);
+OMPI_DECLSPEC  int PMPI_Type_size(MPI_Datatype type, int *size);
+OMPI_DECLSPEC  int PMPI_Type_struct(int count, int array_of_blocklengths[], 
+                                    MPI_Aint array_of_displacements[], 
+                                    MPI_Datatype array_of_types[], 
+                                    MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Type_ub(MPI_Datatype mtype, MPI_Aint *ub);
+OMPI_DECLSPEC  int PMPI_Type_vector(int count, int blocklength, int stride, 
+                                    MPI_Datatype oldtype, MPI_Datatype *newtype);
+OMPI_DECLSPEC  int PMPI_Unpack(void *inbuf, int insize, int *position, 
+                               void *outbuf, int outcount, MPI_Datatype datatype, 
+                               MPI_Comm comm);
+OMPI_DECLSPEC  int PMPI_Unpublish_name(char *service_name, MPI_Info info, 
+                                       char *port_name);
+OMPI_DECLSPEC  int PMPI_Unpack_external (char *datarep, void *inbuf, MPI_Aint insize,
+                                         MPI_Aint *position, void *outbuf, int outcount,
+                                         MPI_Datatype datatype);
+OMPI_DECLSPEC  int PMPI_Waitall(int count, MPI_Request *array_of_requests, 
+                                MPI_Status *array_of_statuses);
+OMPI_DECLSPEC  int PMPI_Waitany(int count, MPI_Request *array_of_requests, 
+                                int *index, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Wait(MPI_Request *request, MPI_Status *status);
+OMPI_DECLSPEC  int PMPI_Waitsome(int incount, MPI_Request *array_of_requests, 
+                                 int *outcount, int *array_of_indices, 
+                                 MPI_Status *array_of_statuses);
+OMPI_DECLSPEC  MPI_Fint PMPI_Win_c2f(MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Win_call_errhandler(MPI_Win win, int errorcode);
+OMPI_DECLSPEC  int PMPI_Win_complete(MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Win_create(void *base, MPI_Aint size, int disp_unit, 
+                                   MPI_Info info, MPI_Comm comm, MPI_Win *win);
+OMPI_DECLSPEC  int PMPI_Win_create_errhandler(MPI_Win_errhandler_fn *function, 
+                                              MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int PMPI_Win_create_keyval(MPI_Win_copy_attr_function *win_copy_attr_fn, 
+                                          MPI_Win_delete_attr_function *win_delete_attr_fn, 
+                                          int *win_keyval, void *extra_state);
+OMPI_DECLSPEC  int PMPI_Win_delete_attr(MPI_Win win, int win_keyval);
+OMPI_DECLSPEC  MPI_Win PMPI_Win_f2c(MPI_Fint win);
+OMPI_DECLSPEC  int PMPI_Win_fence(int assert, MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Win_free(MPI_Win *win);
+OMPI_DECLSPEC  int PMPI_Win_free_keyval(int *win_keyval);
+OMPI_DECLSPEC  int PMPI_Win_get_attr(MPI_Win win, int win_keyval, 
+                                     void *attribute_val, int *flag);
+OMPI_DECLSPEC  int PMPI_Win_get_errhandler(MPI_Win win, MPI_Errhandler *errhandler);
+OMPI_DECLSPEC  int PMPI_Win_get_group(MPI_Win win, MPI_Group *group);
+OMPI_DECLSPEC  int PMPI_Win_get_name(MPI_Win win, char *win_name, int *resultlen);
+OMPI_DECLSPEC  int PMPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Win_post(MPI_Group group, int assert, MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Win_set_attr(MPI_Win win, int win_keyval, void *attribute_val);
+OMPI_DECLSPEC  int PMPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler);
+OMPI_DECLSPEC  int PMPI_Win_set_name(MPI_Win win, char *win_name);
+OMPI_DECLSPEC  int PMPI_Win_start(MPI_Group group, int assert, MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Win_test(MPI_Win win, int *flag);
+OMPI_DECLSPEC  int PMPI_Win_unlock(int rank, MPI_Win win);
+OMPI_DECLSPEC  int PMPI_Win_wait(MPI_Win win);
+OMPI_DECLSPEC  double PMPI_Wtick(void);
+OMPI_DECLSPEC  double PMPI_Wtime(void);
+
+#if defined(c_plusplus) || defined(__cplusplus)
+}
+#endif
+
+/*                                                                             
+ * Conditional MPI 2 C++ bindings support.  Include if:
+ *   - The user does not explicitly request us to skip it (when a C++ compiler
+ *       is used to compile C code).
+ *   - We want C++ bindings support
+ *   - We are not building OMPI itself
+ *   - We are using a C++ compiler
+ */
+#if !defined(OMPI_SKIP_MPICXX) && OMPI_WANT_CXX_BINDINGS && !OMPI_BUILDING
+#if defined(__cplusplus) || defined(c_plusplus) 
+#include "openmpi/ompi/mpi/cxx/mpicxx.h"
+#endif
+#endif
+
+#if !OMPI_PROVIDE_MPI_FILE_INTERFACE && !OMPI_BUILDING
+/* ROMIO requires MPI implementations to set this to 1 if they provide
+   MPI_OFFSET.  We need to provide it because its used throughout the
+   DDT engine */
+#define HAVE_MPI_OFFSET 1
+#endif
+
+#endif /* OMPI_MPI_H */

Added: grass-addons/grass7/imagery/i.vi.mpi/run.sh
===================================================================
--- grass-addons/grass7/imagery/i.vi.mpi/run.sh	                        (rev 0)
+++ grass-addons/grass7/imagery/i.vi.mpi/run.sh	2012-03-14 10:48:51 UTC (rev 51056)
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+# foo will be the number of processors available in the Cluster
+foo=3
+filename=ndvi1-new-$foo
+
+time mpirun -np $foo i.vi.mpi viname=ndvi red=newL71092084_08420100126_B30 nir=newL71092084_08420100126_B40 output=$filename tmp=1
+
+exit 0


Property changes on: grass-addons/grass7/imagery/i.vi.mpi/run.sh
___________________________________________________________________
Added: svn:executable
   + *



More information about the grass-commit mailing list