Parallel Programming Examples: OpenMP and MPI Code Snippets

1. OpenMP Parallel Loop Scheduling

#include <stdio.h>
#include <omp.h>

int main() {
    int n = 16, thread;
    printf("\nEnter the number of tasks: ");
    scanf("%d", &n);
    printf("\nEnter the number of threads: ");
    scanf("%d", &thread);
    omp_set_num_threads(thread);
    printf("\n--------------------------------------\n");
    #pragma omp parallel for schedule(static, 2)
    for (int i = 0; i < n; i++) {
        printf("Thread %d executes iteration %d\n", omp_get_thread_num(), i);
    }
    printf("--------------------------------------\n");
    return 0;
}

Compile & Run:

gcc -fopenmp filename.c -o mergesort
./mergesort

2. Parallel Fibonacci with OpenMP Tasks

#include <stdio.h>
#include <omp.h>
#include <time.h>

int ser_fib(long int n) {
    if (n < 2) return n;
    long int x, y; x = ser_fib(n - 1); y = ser_fib(n - 2);
    return x + y;
}

int fib(long int n) {
    if (n < 2) return n;
    long int x, y;
    #pragma omp task shared(x)
    x = fib(n - 1);
    #pragma omp task shared(y)
    y = fib(n - 2);
    #pragma omp taskwait
    return x + y;
}

int main() {
    long int n = 10, result;
    clock_t start, end; double cpu_time;
    printf("\nEnter the value of n: ");
    scanf("%ld", &n);
    start = clock();
    #pragma omp parallel
    { #pragma omp single
        result = fib(n); }
    end = clock();
    cpu_time = ((double)(end - start)) / CLOCKS_PER_SEC;
    printf("Fibonacci(%ld) = %ld\n", n, result);
    printf("Time used in parallel mode = %f seconds\n", cpu_time);
    return 0;
}

3. MPI Point-to-Point Communication

#include <stdio.h>
#include <mpi.h>

int main(int argc, char *argv[]) {
    int rank, data_send, data_recv;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    data_send = rank;
    if (rank == 0) {
        MPI_Send(&data_send, 1, MPI_INT, 1, 0, MPI_COMM_WORLD);
        MPI_Recv(&data_recv, 1, MPI_INT, 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else if (rank == 1) {
        MPI_Send(&data_send, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
        MPI_Recv(&data_recv, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    }
    printf("Process %d received %d\n", rank, data_recv);
    MPI_Finalize();
    return 0;
}

4. MPI Broadcast Operation

#include <stdio.h>
#include <mpi.h>

int main(int argc, char** argv) {
    int rank, data = 0;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (rank == 0) data = 100;
    MPI_Bcast(&data, 1, MPI_INT, 0, MPI_COMM_WORLD);
    printf("Process %d received data: %d\n", rank, data);
    MPI_Finalize();
    return 0;
}

5. MPI Reduce and Allreduce

#include <stdio.h>
#include <mpi.h>

int main(int argc, char** argv) {
    int rank, value, sum, max;
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    value = rank + 1;
    MPI_Reduce(&value, &sum, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
    if (rank == 0) printf("Sum using Reduce: %d\n", sum);
    MPI_Allreduce(&value, &max, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
    printf("Max using Allreduce (rank %d): %d\n", rank, max);
    MPI_Finalize();
    return 0;
}