I have written the below code in C ,the problem I have one proccessor and I am not sure if it will work,

/*#include &ltstdio.h>
#include &ltstdlib.h>  required for randomize() and random() 
#include &ltconio.h>*/

#include <stdlib.h>
#include <stdio.h>
#include "mpi.h"
#include <math.h>

 
//int N = 8;

/*typedef struct 
{
	int x;
	int y;
	int Xarray[8];
	int Yarray[8];
	int thetha[8];

} dataelements;

*/

int main (int argc, char **argv)
{
	
	
	
	int N;
	printf("Type in a number \n");
    scanf("%d", &N);
	
	typedef struct 
	{
		int x;
		int y;
		int Xarray[N-1];
		int Yarray[N-1];
		int thetha[N-1];

	} dataelements;
	
	int x[N]; 
	int y[N];
	int thetha[N];
	int R[N];
	dataelements Inpudata[N-1];
	dataelements Data;
	int k = 0;
	int i = 0;
	int j = 0;
	int thethaArray[N];
	
	//initialise x and y
	for (i=0; i < N; i++)
	{
		x[i]=i+1;
		y[i]=i+1;
		thetha[i]=5*i;
	}
	
	//initialising input data
	for(i=0;i<N-1;i++)
	{
		k = 0;
		Inpudata[i].x = x[i];
		for (j= i+1; j< N-1;j++)
		{
			Inpudata[i].Xarray[k] = x[j];
			Inpudata[i].Yarray[k] = y[j];
			Inpudata[i].thetha[k] = thetha[j];
			k = k + 1;
			
		}
		
	}
	
	k = 0;
	for (i = 0 ;i < N; i++)
	{
		for (j= i+1; j< N;j++)
		{
		    thethaArray[k] = cos(6*thetha[i] - thetha[i]);
	        k = k + 1;		
		}	
	}
	
	
	//After finishing the initialisation of the data we continue to MPI sending and recieving
	int my_rank; 
	int size;
	MPI_Status stat;
	
	MPI_Init(&argc, &argv); /*START MPI */
	MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); /*DETERMINE RANK OF THIS PROCESSOR*/
	MPI_Comm_size(MPI_COMM_WORLD, &size); /*DETERMINE TOTAL NUMBER OF PROCESSORS*/
	
	printf("The number of proccessors is %d \n",size);
	
	//defining struct datatype
	MPI_Datatype Particletype; 
	MPI_Datatype type[5] = {MPI_INT, MPI_INT, MPI_INT,MPI_INT,MPI_INT}; 
	int          block[5] = {1, 1,N-1, N-1,N-1}; 
	MPI_Aint     disp[5];
	
	MPI_Address( Inpudata, disp); 
    MPI_Address( Inpudata[0].Xarray, disp+1);
 	MPI_Address( Inpudata[0].Yarray, disp+2); 
 	MPI_Address( Inpudata[0].thetha, disp+3); 
	MPI_Address( Inpudata+1, disp+4); 

    MPI_Type_struct( 5, block, disp, type, &Particletype); 
    MPI_Type_commit( &Particletype); 
    //end of defining struct datatype


    //sending the data to the processors
	k=1;
    if (my_rank==0)
       {
	     for(i=0; i<N-1;i++)
	     {
			Data = Inpudata[i];
			
			if (k < size && size > 1)
			{
		     MPI_Send(&Data, 1, Particletype, k, 1, MPI_COMM_WORLD); //error here look at what condrad did.
	        }
			else if(size > 1)
			{
				k = 1;
		        MPI_Send(&Data, 1, Particletype, k, 1, MPI_COMM_WORLD); //error here look at what condrad did.
			}
			k=k+1;
		 }
       }
	int DX=0;
	int DY=0;
	int r = 0;

if (my_rank > 0 && size > 1)
{
	k=1;
	for(i=0;i<N-1;i++)
	{
		if(k<size && size > 1)
		{
			MPI_Recv(&Data, 1, Particletype, 0, k, MPI_COMM_WORLD, &stat);
		}
		else if(size > 1)
		{
			k = 1;
	        MPI_Recv(&Data, 1, Particletype, 0, k, MPI_COMM_WORLD, &stat); //error here look at what condrad did.
		}
	  
	}
	for(i=0;i<N-1;i++)
	  {
		if(Data.Xarray[i] != NULL)
		{
		DX = Data.x - Data.Xarray[i];
		DY = Data.y - Data.Yarray[i];
		r = (int)(sqrt(DX*DX + DY*DY));
		MPI_Send(r , 1 ,MPI_INT, 0, 1, MPI_COMM_WORLD);
     	}
	  }
}

	int arrayslenght = sum(N-1);


	int max_r = 0;

if (my_rank==0  && size > 1)
{
	for(i=0;i<arrayslenght;i++)//Still recieving
	{
	  MPI_Recv(r, 1, MPI_INT, 0, k, MPI_COMM_WORLD, &stat);
	  if(r > max_r)
	  {
		max_r = r;	
	  }
	  R[i] = r;
	}

	double g[max_r];
	int count[max_r];
	
	//initialising g and count
	for(i=0;i<max_r;i++)
	{
		g[i]=0;
		count[i]=0;
	}

for(i=0;i<arrayslenght;i++)
{
	g[R[i]] = g[R[i]] + thethaArray[i];
	count[R[i]] = count[R[i]] + 1;
}

for(i=0;i<max_r;i++)
{
	g[i] = g[i]/count[i];
		
}
}
	
	return 0;
	
}

int sum(int input)
{       int sum=0;
	    int i=0;

	for(i = input; i >0;i--)
	{
		sum = sum + i;
	}
              
	return sum;
}

I am new to MPI type of programming so I am trying to see how the parrallel programming works but I have a problem with having on procesecor.
Do any one know the right command for mpirun -np # <filename>

This article has been dead for over six months. Start a new discussion instead.