| 1 | #ifdef _CIVL
|
|---|
| 2 | #include <civlc.cvh>
|
|---|
| 3 | #endif
|
|---|
| 4 | //http://www.arc.vt.edu/resources/software/cuda/docs/cuda-omp.cu
|
|---|
| 5 |
|
|---|
| 6 | #include <omp.h>
|
|---|
| 7 | #include <cuda.h>
|
|---|
| 8 | #include <stdio.h>
|
|---|
| 9 | #include <stdlib.h>
|
|---|
| 10 |
|
|---|
| 11 |
|
|---|
| 12 | #ifdef _CIVL
|
|---|
| 13 | $input int BLOCKS;
|
|---|
| 14 | $input int BLOCK_B;
|
|---|
| 15 | $assume(1 <= BLOCKS && BLOCKS <= BLOCK_B);
|
|---|
| 16 | $input int THREADS_PER_BLOCK;
|
|---|
| 17 | $input int THREADS_B;
|
|---|
| 18 | $assume(1 <= THREADS_PER_BLOCK && THREADS_PER_BLOCK <= THREADS_B);
|
|---|
| 19 | #else
|
|---|
| 20 | #define BLOCKS 64
|
|---|
| 21 | #define THREADS_PER_BLOCK 128
|
|---|
| 22 | #endif
|
|---|
| 23 |
|
|---|
| 24 | // A kernel that increments each array element by the value b
|
|---|
| 25 |
|
|---|
| 26 | __global__ void kernelAddConstant(int *g_a, const int b)
|
|---|
| 27 | {
|
|---|
| 28 | int idx = blockIdx.x * blockDim.x + threadIdx.x;
|
|---|
| 29 | g_a[idx] += b;
|
|---|
| 30 | }
|
|---|
| 31 |
|
|---|
| 32 | // Check whether each element was incremented by the value b
|
|---|
| 33 | int correctResult(int *data, const int n, const int b)
|
|---|
| 34 | {
|
|---|
| 35 | for(int i = 0; i < n; i++)
|
|---|
| 36 | if(data[i] != i + b)
|
|---|
| 37 | return 0;
|
|---|
| 38 | return 1;
|
|---|
| 39 | }
|
|---|
| 40 |
|
|---|
| 41 | int main(int argc, char *argv[])
|
|---|
| 42 | {
|
|---|
| 43 | // Variable which holds number of GPUs
|
|---|
| 44 | int num_gpus = 0;
|
|---|
| 45 |
|
|---|
| 46 | // Determine the number of CUDA capable GPUs
|
|---|
| 47 | cudaGetDeviceCount(&num_gpus);
|
|---|
| 48 | if(num_gpus < 1)
|
|---|
| 49 | {
|
|---|
| 50 | printf("No CUDA Capable GPU(s) Detected \n");
|
|---|
| 51 | return 1;
|
|---|
| 52 | }
|
|---|
| 53 |
|
|---|
| 54 | // Display the CPU and GPU processor specification
|
|---|
| 55 | int num_procs = omp_get_num_procs();
|
|---|
| 56 | printf("number of host CPUs:\t%d\n", num_procs);
|
|---|
| 57 | printf("number of CUDA devices:\t%d\n", num_gpus);
|
|---|
| 58 | for(int i = 0; i < num_gpus; i++)
|
|---|
| 59 | {
|
|---|
| 60 | cudaDeviceProp dprop;
|
|---|
| 61 | cudaGetDeviceProperties(&dprop, i);
|
|---|
| 62 | printf("\t Device %d is a %s\n", i, dprop.name);
|
|---|
| 63 | }
|
|---|
| 64 |
|
|---|
| 65 |
|
|---|
| 66 | // Initialize the variables
|
|---|
| 67 | unsigned int n = num_gpus * THREADS_PER_BLOCK * BLOCKS;
|
|---|
| 68 | unsigned int nbytes = n * sizeof(int);
|
|---|
| 69 | int *a = 0; // pointer to data on the CPU
|
|---|
| 70 | int b = 3; // value by which each array array element will be incremented
|
|---|
| 71 | a = (int*)malloc(nbytes);
|
|---|
| 72 |
|
|---|
| 73 | if(0 == a)
|
|---|
| 74 | {
|
|---|
| 75 | printf("couldn't allocate CPU memory\n");
|
|---|
| 76 | return 1;
|
|---|
| 77 | }
|
|---|
| 78 |
|
|---|
| 79 | for(unsigned int i = 0; i < n; i++)
|
|---|
| 80 | a[i] = i;
|
|---|
| 81 |
|
|---|
| 82 | // Set the number of threads to the number of GPUs on the system
|
|---|
| 83 | omp_set_num_threads(num_gpus);
|
|---|
| 84 |
|
|---|
| 85 | #pragma omp parallel
|
|---|
| 86 | {
|
|---|
| 87 | unsigned int cpu_thread_id = omp_get_thread_num();
|
|---|
| 88 | unsigned int num_cpu_threads = omp_get_num_threads();
|
|---|
| 89 |
|
|---|
| 90 | // Assign and check the GPU device for each thread
|
|---|
| 91 | int gpu_id = -1;
|
|---|
| 92 | cudaSetDevice(cpu_thread_id % num_gpus);
|
|---|
| 93 | cudaGetDevice(&gpu_id);
|
|---|
| 94 |
|
|---|
| 95 | printf("CPU thread %d (of %d) uses CUDA device %d\n", cpu_thread_id, num_cpu_threads, gpu_id);
|
|---|
| 96 |
|
|---|
| 97 | // Variable on the device associated with this CPU thread
|
|---|
| 98 | int *d_a = 0;
|
|---|
| 99 |
|
|---|
| 100 | // Variable for the CPU
|
|---|
| 101 | int *sub_a = a + cpu_thread_id * n / num_cpu_threads;
|
|---|
| 102 |
|
|---|
| 103 | unsigned int nbytes_per_kernel = nbytes / num_cpu_threads;
|
|---|
| 104 | dim3 gpu_threads = {THREADS_PER_BLOCK, 1, 1}; // 128 threads per block
|
|---|
| 105 | dim3 gpu_blocks = {(n / (gpu_threads.x * num_cpu_threads)), 1, 1};
|
|---|
| 106 |
|
|---|
| 107 | //Allocate memory on the device
|
|---|
| 108 | cudaMalloc((void**)&d_a, nbytes_per_kernel);
|
|---|
| 109 |
|
|---|
| 110 | //Initialize the array on the device with zeros
|
|---|
| 111 | cudaMemset(d_a, 0, nbytes_per_kernel);
|
|---|
| 112 |
|
|---|
| 113 | //Copy data from host to device
|
|---|
| 114 | cudaMemcpy(d_a, sub_a, nbytes_per_kernel, cudaMemcpyHostToDevice);
|
|---|
| 115 |
|
|---|
| 116 | //Launch the kernel
|
|---|
| 117 | kernelAddConstant<<<gpu_blocks, gpu_threads>>>(d_a, b);
|
|---|
| 118 |
|
|---|
| 119 | //Copy the result from the device to the host
|
|---|
| 120 | cudaMemcpy(sub_a, d_a, nbytes_per_kernel, cudaMemcpyDeviceToHost);
|
|---|
| 121 |
|
|---|
| 122 | //Deallocate the memory on the device
|
|---|
| 123 | cudaFree(d_a);
|
|---|
| 124 |
|
|---|
| 125 | }
|
|---|
| 126 |
|
|---|
| 127 |
|
|---|
| 128 | if(cudaSuccess != cudaGetLastError()) {
|
|---|
| 129 | int err_num = cudaGetLastError();
|
|---|
| 130 | const char * err_str = cudaGetErrorString(err_num);
|
|---|
| 131 | printf("%s\n", err_str);
|
|---|
| 132 | }
|
|---|
| 133 |
|
|---|
| 134 |
|
|---|
| 135 | //Check for correctness of the result
|
|---|
| 136 | if(correctResult(a, n, b)) {
|
|---|
| 137 | #ifdef _CIVL
|
|---|
| 138 | $assert(($true));
|
|---|
| 139 | #endif
|
|---|
| 140 | printf("Test PASSED\n");
|
|---|
| 141 | } else
|
|---|
| 142 | printf("Test FAILED\n");
|
|---|
| 143 |
|
|---|
| 144 | //Deallocate the CPU memory
|
|---|
| 145 | free(a);
|
|---|
| 146 |
|
|---|
| 147 | // deprecated
|
|---|
| 148 | // cudaThreadExit();
|
|---|
| 149 |
|
|---|
| 150 | return 0;
|
|---|
| 151 | }
|
|---|
| 152 |
|
|---|