diff --git a/CHANGELOG.md b/CHANGELOG.md
index c47c13140..ce6473d3a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -37,6 +37,7 @@ All notable changes to this project will be documented in this file.
## [4.5]
### Added
+- correct link edition with fortran mpi under windows juste use the msmpi
- new `mmg` and `parmmg` (parallel mmg) plugins interfacing mmg5 and parmmg libraries, to replace `mmg3d-v4.0` and `freeyams` (Thanks to P-H Tournier)
- a true 3d anisotropic mesh adaptation `examples/3d/Laplace-Adapt-aniso-3d.edp`
- an example to extract surface mesh from isovalue in `examples/3dSurf/Pinochio.edp`
diff --git a/configure.ac b/configure.ac
index cd5092fa4..049ed4ae9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2105,8 +2105,8 @@ if test "$ff_cmake" = yes; then
fi
TOOL_DISABLE(mshmet,"mshmet.$DYLIB_SUFFIX aniso.$DYLIB_SUFFIX")
TOOL_DISABLE(gmm,ilut.$DYLIB_SUFFIX,,enable_gmm=no)
-TOOL_DISABLE(scalapack,MUMPS.$DYLIB_SUFFIX)
-TOOL_DISABLE(mumps,MUMPS.$DYLIB_SUFFIX)
+TOOL_DISABLE(scalapack,MUMPS.$DYLIB_SUFFIX MUMPS_mpi.$DYLIB_SUFFIX)
+TOOL_DISABLE(mumps,MUMPS.$DYLIB_SUFFIX MUMPS_mpi.$DYLIB_SUFFIX)
TOOL_DISABLE(mumps_seq,"MUMPS_seq.$DYLIB_SUFFIX MUMPS.$DYLIB_SUFFIX",mumps-seq)
TOOL_DISABLE(nlopt,ff-NLopt.$DYLIB_SUFFIX)
diff --git a/etc/config/m4/acmpi.m4 b/etc/config/m4/acmpi.m4
index e09529391..5af8beb09 100644
--- a/etc/config/m4/acmpi.m4
+++ b/etc/config/m4/acmpi.m4
@@ -117,13 +117,6 @@ esac
mkdir -p 3rdparty/include/msmpi
mkdir -p 3rdparty/lib/msmpi
- ## add to msmpi 10.0
- echo " hack MSMPI V10.0 "
- echo "void __guard_check_icall_fptr(unsigned long ptr) { }" > 3rdparty/lib/msmpi/cfg_stub.c
- gcc -o 3rdparty/lib/msmpi/cfg_stub.o -c 3rdparty/lib/msmpi/cfg_stub.c
- #gcc -shared -o 3rdparty/lib/msmpi/cfg_stub.dll 3rdparty/lib/msmpi/cfg_stub.o
- #rm 3rdparty/lib/msmpi/cfg_stub.o
- #rm 3rdparty/lib/msmpi/cfg_stub.c
cp "$MSMPI_INC"/*.h 3rdparty/include/msmpi
grep -v INT_PTR_KIND "$MSMPI_INC"/mpif.h >3rdparty/include/msmpi/mpif.h
@@ -139,15 +132,16 @@ esac
# to reinstall msmpi ..
# MSMPI
- if test -x "`which msmpi.dll`"
+ msmpi_dll="`which msmpi.dll`"
+ if test -x "$msmpi_dll"
then
# Remove for scotch and parmetis
ff_MPI_INCLUDE="-I$ff_MPI_INCLUDE_DIR -D__int64=long\ long"
with_mpiinc="$ff_MPI_INCLUDE"
test -z "$MPIRUN" -a -x "$ffMSMPI_BIN/mpiexe.exe" && MPIRUN="$MSMPI_BIN\mpiexe.exe"
- ff_MPI_LIBC="'$ff_msmpi_lib/msmpi.lib'"
- ff_MPI_LIB="'$ff_msmpi_lib/msmpi.lib'"
- ff_MPI_LIBFC="'$ff_msmpi_lib/msmpifec.lib' '$ff_msmpi_lib/msmpi.lib' '$ff_msmpi_lib/cfg_stub.o' "
+ ff_MPI_LIBC="$msmpi_dll"
+ ff_MPI_LIB="$msmpi_dll"
+ ff_MPI_LIBFC="$msmpi_dll"
ff_mpiexec_win="C:\Program Files\Microsoft MPI\Bin\mpiexec.exe"
test -z "$ff_mpiexec_win" && MPIRUN="$ff_mpiexec_win"
test -z "$MPICXX" && MPICXX="$CXX $ff_MPI_INCLUDE"
diff --git a/etc/jenkins/deployDEB-ffpetsc.sh b/etc/jenkins/deployDEB-ffpetsc.sh
index 9b7fcc6e2..233005bb6 100755
--- a/etc/jenkins/deployDEB-ffpetsc.sh
+++ b/etc/jenkins/deployDEB-ffpetsc.sh
@@ -12,14 +12,7 @@ VERSION=`grep AC_INIT configure.ac | cut -d"," -f2`
RELEASE_TAG_NAME="v$VERSION"
distrib=`uname -s`-`uname -r`
-##if [ "$distrib" == "Linux-4.4.0-166-generic" ]; then
- # 16.04
DISTRIB="Ubuntu"
-##elif [ "$distrib" == "Linux-4.15.0-51-generic" ]; then
- # 18.04
-##DISTRIB="Ubuntu_18.04"
-##fi
-
DEB_NAME="freefem_${VERSION}_withPETSc_amd64"
GH_DEB_NAME="FreeFEM_${VERSION}_${DISTRIB}_withPETSc_amd64.deb"
@@ -89,6 +82,6 @@ fi
# clean the VM
rm -rf $DEB_NAME
-rm -rf $GH_DEB_NAME
+rm $GH_DEB_NAME
. ./bin/uninstall-ff++
diff --git a/etc/jenkins/deployDEB.sh b/etc/jenkins/deployDEB.sh
index 77427f744..51d4d7392 100755
--- a/etc/jenkins/deployDEB.sh
+++ b/etc/jenkins/deployDEB.sh
@@ -14,7 +14,7 @@ distrib=`uname -s`-`uname -r`
DISTRIB="Ubuntu"
-DEB_NAME="freefem_${VERSION}-1_amd64.deb"
+DEB_NAME="freefem_${VERSION}-1_amd64"
GH_DEB_NAME="FreeFEM_${VERSION}_${DISTRIB}_amd64.deb"
## DEB build
@@ -79,6 +79,6 @@ fi
# clean the VM
rm -rf $DEB_NAME
-rm -rf $GH_DEB_NAME
+rm $GH_DEB_NAME
. ./bin/uninstall-ff++
diff --git a/examples/mpi/MUMPS.edp b/examples/mpi/MUMPS.edp
index a0fb545a6..f663bbea7 100644
--- a/examples/mpi/MUMPS.edp
+++ b/examples/mpi/MUMPS.edp
@@ -1,7 +1,7 @@
// run with MPI: ff-mpirun -np 4 script.edp
// NBPROC 4
-load "MUMPS"
+load "MUMPS_mpi" // add _mpi for windows do day
// load "symmetrizeCSR"
int[int] l = [1, 1, 2, 2];
mesh Th = square(150, 150, label = l);
diff --git a/plugin/mpi/MUMPS_mpi.cpp b/plugin/mpi/MUMPS_mpi.cpp
new file mode 100644
index 000000000..cf1ba067b
--- /dev/null
+++ b/plugin/mpi/MUMPS_mpi.cpp
@@ -0,0 +1,379 @@
+/****************************************************************************/
+/* This file is part of FreeFEM. */
+/* */
+/* FreeFEM is free software: you can redistribute it and/or modify */
+/* it under the terms of the GNU Lesser General Public License as */
+/* published by the Free Software Foundation, either version 3 of */
+/* the License, or (at your option) any later version. */
+/* */
+/* FreeFEM is distributed in the hope that it will be useful, */
+/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
+/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
+/* GNU Lesser General Public License for more details. */
+/* */
+/* You should have received a copy of the GNU Lesser General Public License */
+/* along with FreeFEM. If not, see . */
+/****************************************************************************/
+// SUMMARY : ...
+// LICENSE : LGPLv3
+// ORG : LJLL Universite Pierre et Marie Curie, Paris, FRANCE
+// AUTHORS : Frederic Hecht
+// E-MAIL : frederic.hecht@sorbonne-unviersite.fr
+
+// *INDENT-OFF* //
+//ff-c++-LIBRARY-dep: mumps parmetis [ptscotch scotch] scalapack blas mpifc fc mpi pthread
+//ff-c++-cpp-dep:
+// *INDENT-ON* //
+
+// F. Hecht december 2011
+// ----------------------------
+// file to add MUMPS sequentiel interface for sparce linear solver with dynamic load.
+#include
+#ifdef _WIN32
+__declspec(dllexport) int toto;
+MPI_Fint* _imp__MPI_F_STATUS_IGNORE;
+MPI_Fint* _imp__MPI_F_STATUSES_IGNORE;
+//__declspec(dllexport) void __guard_check_icall_fptr(unsigned long ptr) { }
+#endif
+#include
+using namespace std;
+
+#include "ff++.hpp"
+
+
+#include
+#include
+
+const int JOB_INIT = -1;
+const int JOB_END = -2;
+const int JOB_ANA = 1;
+const int JOB_FAC = 2;
+const int JOB_ANA_FAC = 4;
+const int JOB_SOLVE = 3;
+const int USE_COMM_WORLD = -987654;
+
+template struct MUMPS_STRUC_TRAIT {typedef void MUMPS; typedef void R; };
+template<> struct MUMPS_STRUC_TRAIT {typedef DMUMPS_STRUC_C MUMPS; typedef double R;};
+template<> struct MUMPS_STRUC_TRAIT {typedef ZMUMPS_STRUC_C MUMPS; typedef ZMUMPS_COMPLEX R;};
+void mumps_c(DMUMPS_STRUC_C *id) { dmumps_c(id);}
+void mumps_c(ZMUMPS_STRUC_C *id) { zmumps_c(id);}
+
+template struct MPI_TYPE {static MPI_Datatype TYPE(){return MPI_BYTE;}};;
+template<> struct MPI_TYPE {static MPI_Datatype TYPE(){return MPI_LONG;}};
+template<> struct MPI_TYPE {static MPI_Datatype TYPE(){return MPI_INT;}};
+template<> struct MPI_TYPE {static MPI_Datatype TYPE(){return MPI_DOUBLE;}};
+template<> struct MPI_TYPE {static MPI_Datatype TYPE(){return MPI_BYTE;}};
+template<> struct MPI_TYPE {static MPI_Datatype TYPE(){return MPI_DOUBLE_COMPLEX;}};
+
+
+static std::string analysis[] = {"AMD", "", "AMF", "SCOTCH", "PORD", "METIS", "QAMD", "automatic sequential", "automatic parallel", "PT-SCOTCH", "ParMetis"};
+
+//template
+template
+class SolveMUMPS_mpi: public VirtualSolver
+{
+public:
+ // 1 unsym , 2 sym, 4 pos , 8 nopos, 16 seq, 32 ompi, 64 mpi ,
+ static const int orTypeSol = 1&2&4&8&16;
+ typedef HashMatrix HMat;
+ typedef R K; //
+ HMat &A;
+
+
+ // typedef double R;
+ long verb;
+ double eps;
+ double tgv;
+ int cn,cs;
+ typedef typename MUMPS_STRUC_TRAIT::R MR;
+ mutable typename MUMPS_STRUC_TRAIT::MUMPS id;
+ KN *rinfog;
+ KN *infog;
+ mutable unsigned char strategy;
+ bool distributed;
+ MPI_Comm comm;
+ int mpirank;
+ int matrank;
+ // int distributed;
+
+ int& ICNTL (int i) const {return id.icntl[i - 1];}
+ double& CNTL (int i) const {return id.cntl[i - 1];}
+ int& INFO (int i) const {return id.info[i - 1];}
+ double& RINFO (int i) const {return id.rinfo[i - 1];}
+ int& INFOG (int i) const {return id.infog[i - 1];}
+ double& RINFOG (int i) const {return id.rinfog[i - 1];}
+
+ void SetVerb () const {
+ ICNTL(1) = 6;// output stream for error messages.
+ ICNTL(2) = 6;// stream for diagnostic printing, statistics, and warning messages.
+ ICNTL(3) = 6;// output stream global information, collected on the host.
+ ICNTL(4) = min(max(verb-2,1L),4L); // the level of printing for error, warning, and diag
+ if(verb ==0 )ICNTL(4) =0;
+ ICNTL(11)=0; // noerroranalysisisperformed(nostatistics).
+ if( id.job ==JOB_SOLVE && verb >99)
+ { //computes statistics related to an error analysis of the linear system
+ if( verb > 999) ICNTL(11)=1; // All Stat (veryexpensive)
+ else ICNTL(11)=2;// compute main statistics
+ }
+
+
+ }
+ void Clean ()
+ {
+ delete [] id.irn;
+ delete [] id.jcn;
+ delete [] id.a;
+ id.irn=0;
+ id.jcn=0;
+ id.a =0;
+ }
+ void to_mumps_mat()
+ {
+ Clean ();
+
+ id.nrhs = 0;//
+ int n = A.n;
+ int nz = A.nnz;
+ ffassert(A.n == A.m);
+ if( distributed || (mpirank == matrank) )
+ {
+ int *irn = new int[nz];
+ int *jcn = new int[nz];
+ R *a = new R[nz];
+ A.COO();
+
+ for (int k = 0; k < nz; ++k) {
+ {
+ irn[k] = A.i[k]+1;
+ jcn[k] = A.j[k] + 1;
+ a[k] = A.aij[k];
+ }
+ }
+
+ id.n = n;
+
+ if(!distributed)
+ {
+ if(mpirank == matrank)
+ {
+ id.nz = nz;
+ id.irn = irn;
+ id.jcn = jcn;
+ id.a = (MR *)(void *)a;
+ }
+ else
+ { // no matrix
+ id.nz=0;
+ id.a =0;
+ id.irn = 0;;
+ id.jcn = 0;
+
+ }
+
+ }
+ else
+ {
+ id.nz_loc = nz;
+ id.irn_loc = irn;
+ id.jcn_loc = jcn;
+ id.a_loc = (MR *)(void *)a;
+
+ }
+ id.rhs = 0;
+ ffassert( A.half == (id.sym != 0) );//
+ ICNTL(5) = 0; // input matrix type
+ ICNTL(7) = 7; // NUMBERING ...
+
+ ICNTL(9) = 1; // 1: A x = b, !1 : tA x = b during slove phase
+ ICNTL(18) = 0;
+ if(strategy > 0 && strategy < 9 && strategy != 2)
+ {
+ ICNTL(28) = 1; // 1: sequential analysis
+ ICNTL(7) = strategy - 1; // 0: AMD
+ }
+ // 1:
+ // 2: AMF
+ // 3: SCOTCH
+ // 4: PORD
+ // 5: METIS
+ // 6: QAMD
+ // 7: automatic
+ else
+ {
+ ICNTL(28) = 1;
+ ICNTL(7) = 7;
+ }
+ if(strategy > 8 && strategy < 12)
+ {
+ ICNTL(28) = 2; // 2: parallel analysis
+ ICNTL(29) = strategy - 9; // 0: automatic
+ } // 1: PT-STOCH
+ // 2: ParMetis
+ ICNTL(9) = 1;
+ ICNTL(11) = 0; // verbose level
+ ICNTL(18) = distributed ? 3: 0; // centralized matrix input if !distributed
+ ICNTL(20) = 0; // dense RHS
+ ICNTL(14) = 30;
+ }
+ }
+ void Check (const char *msg = "mumps_mpi")
+ {
+ if (INFO(1) != 0) {
+ cout << " Erreur Mumps mpi: number " << INFO(1) << endl;
+ cout << " Fatal Erreur " << msg << endl;
+ Clean ();
+ id.job = JOB_END;
+ mumps_c(&id); /* Terminate instance */
+ ErrorExec(msg, INFO(1));
+ }
+ }
+ void CopyInfo()
+ {
+ if (rinfog) {
+ // copy rinfog
+ if (rinfog->N() < 40) {rinfog->resize(40);}
+
+ for (int i = 0; i < 40; ++i) {
+ (*rinfog)[i] = RINFOG(i + 1);
+ }
+ }
+
+ if (infog) {
+ // copy ginfo
+ if (infog->N() < 40) {infog->resize(40);}
+
+ for (int i = 0; i < 40; ++i) {
+ (*infog)[i] = INFOG(i + 1);
+ }
+ }
+ }
+ SolveMUMPS_mpi (HMat &AA, const Data_Sparse_Solver & ds,Stack stack )
+ : A(AA), verb(ds.verb),
+ eps(ds.epsilon),
+ tgv(ds.tgv),cn(0),cs(0),
+ rinfog(ds.rinfo), infog(ds.info),
+ matrank(ds.master),distributed(ds.master<0),
+ strategy(ds.strategy)
+ {
+
+ if(ds.commworld)
+ MPI_Comm_dup(*((MPI_Comm*)ds.commworld), &comm);
+ else
+ MPI_Comm_dup(MPI_COMM_WORLD, &comm);
+
+ MPI_Comm_rank(comm, &mpirank);
+ int master = mpirank==matrank;
+ int myid = 0;
+ MPI_Comm_rank(MPI_COMM_WORLD, &myid);
+
+ id.irn=0;
+ id.jcn=0;
+ id.a =0;
+
+ id.job = JOB_INIT;
+ id.par = 1;
+ id.sym = A.half;
+ id.comm_fortran = MPI_Comm_c2f(comm);
+ SetVerb();
+ mumps_c(&id);
+
+
+ Check("MUMPS_mpi build/init");
+ if (verbosity > 3 && master) {
+ cout << " -- MUMPS n= " << id.n << ", peak Mem: " << INFOG(22) << " Mb" << " sym: " << id.sym << endl;
+ }
+
+
+ }
+
+
+
+ ~SolveMUMPS_mpi () {
+ Clean ();
+ id.job = JOB_END;
+ SetVerb () ;
+ mumps_c(&id); /* Terminate instance */
+ /*int ierr = */
+ MPI_Comm_free(&comm);
+ }
+
+
+ void dosolver(K *x,K*b,int N,int trans)
+ {
+ size_t nN=id.n*N;
+ if (verbosity > 1 && mpirank==0) {
+ cout << " -- MUMPS solve, peak Mem : " << INFOG(22) << " Mb, n = "
+ << id.n << " sym =" << id.sym <<" trans = " << trans << endl;
+ }
+ ICNTL(9) = trans == 0; // 1: A x = b, !1 : tA x = b during slove phase
+ id.nrhs = N;
+ // x = b;
+ if(distributed)
+ {
+ MPI_Reduce( (void *) b,(void *) x , nN , MPI_TYPE::TYPE(),MPI_SUM,0,comm);
+ }
+ else if(mpirank==0) std::copy(b,b+nN,x);
+ id.rhs = (MR *)(void *)(R *)x;
+ id.job = JOB_SOLVE; // performs the analysis. and performs the factorization.
+ SetVerb();
+ mumps_c(&id);
+ Check("MUMPS_mpi dosolver");
+ if(distributed) // send the solution ...
+ MPI_Bcast(reinterpret_cast (x),nN, MPI_TYPE::TYPE(), 0,comm);
+
+
+ if (verb > 9 && mpirank==0) {
+
+ for(int j=0; j B(b+j*id.n,id.n);
+ cout << j <<" b linfty " << B.linfty() << endl;
+ }
+ }
+
+ if (verb > 2) {
+
+ for(int j=0; j B(x+j*id.n,id.n);
+ cout << " x " << j <<" linfty " << B.linfty() << endl;
+ }
+ }
+ CopyInfo();
+
+ }
+
+ void fac_init(){
+ to_mumps_mat();
+ } // n, nzz fixe
+ void fac_symbolic(){
+ id.job = JOB_ANA;
+ SetVerb ();
+ mumps_c(&id);
+ Check("MUMPS_mpi Analyse");
+ CopyInfo();
+ }
+ void fac_numeric(){
+ id.job = JOB_FAC;
+ SetVerb () ;
+ mumps_c(&id);
+ Check("MUMPS_mpi Factorize");
+ CopyInfo();
+ }
+ void UpdateState(){
+ if( A.GetReDoNumerics() ) cn++;
+ if( A.GetReDoSymbolic() ) cs++;
+ this->ChangeCodeState(A.n,cs,cn);
+ }
+
+};
+
+
+static void Load_Init()
+{
+ addsolver>("MUMPS",50,1);
+ addsolver>("MUMPS",50,1);
+ addsolver>("MUMPSMPI",50,1);
+ addsolver>("MUMPSMPI",50,1);
+ setptrstring(def_solver,"MUMPSMPI");
+}
+LOADFUNC(Load_Init)