source: CIVL/examples/cuda/matMult1.cu@ beab7f2

main test-branch
Last change on this file since beab7f2 was ea777aa, checked in by Alex Wilton <awilton@…>, 3 years ago

Moved examples, include, build_default.properties, common.xml, and README out from dev.civl.com into the root of the repo.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5704 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 9.0 KB
Line 
1// TODO add assertions to check equivalence of implementation results
2#ifdef _CIVL
3#include <civlc.cvh>
4#endif
5/***********************************************************************
6* FILENAME: MM.cu
7* Matrix Multiplication
8* Matrix operands have row-major order.
9*
10* C = A * B
11* Multiplies two square matrices (NxN * NxN).
12* Matrix values have type double.
13*
14* A simple CUDA program has a basic workflow:
15* 1) Initialize matrix operands as double-precision arrays on host (CPU).
16* 2) Copy operands from host memory to GPU memory.
17* 3) Apply matrix operaton to operands on GPU
18* 4) Copy result from GPU memory to host memory.
19*
20*
21* CUDA C Programming Guide Version 4.2 (3.2.3, p.22):
22* http://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/CUDA_C_Programming_Guide.pdf
23*
24* MM with linearized matrix operands:
25* http://www.hpcwire.com/hpcwire/2008-10-08/compilers_and_more_programming_gpus_today.html
26*
27*************************************************************************/
28// online source: https://www.rcac.purdue.edu/userinfo/resources/carter/compile/MM.cu
29
30#include <stdio.h>
31#include <stdlib.h>
32#include "cuda.h"
33
34#ifdef _CIVL
35$input int N;
36$input int TILE_WIDTH;
37#else
38#define N 1024 /* size of square matrix */
39#define TILE_WIDTH 16
40#endif
41
42#ifdef _CIVL
43 $input double A[N*N];
44 $input double B[N*N];
45#endif
46
47/* MM kernel using global (not shared) memory. */
48__global__
49void myMM_global (const double * const A, const double * const B, double *C, int width) {
50
51 /* Get row and column from block and thread IDs */
52 int row = (blockDim.y*blockIdx.y) + threadIdx.y;
53 int col = (blockDim.x*blockIdx.x) + threadIdx.x;
54
55 /* Initialize result of one element which one thread computes. */
56 double result=0.0;
57
58 /* Compute one element of the matrix product. */
59 for (int i = 0; i < width; ++i)
60 result += A[row*width + i] * B[i*width + col];
61
62 /* Store the result of one matrix element in matrix C. */
63 C[row * width + col] = result;
64}
65
66
67/* MM kernel using shared memory. */
68__global__
69void myMM_shared (const double * const A, const double * const B, double* C, int width) {
70 __shared__ double A_shared[TILE_WIDTH][TILE_WIDTH];
71 __shared__ double B_shared[TILE_WIDTH][TILE_WIDTH];
72
73 int bx = blockIdx.x; int by = blockIdx.y;
74 int tx = threadIdx.x; int ty = threadIdx.y;
75
76 /* Identify the row and column of the C element to work on. */
77 int row = by * TILE_WIDTH + ty;
78 int col = bx * TILE_WIDTH + tx;
79
80 double result = 0.0;
81
82 /* Loop over the A and B tiles required to compute the C element. */
83 for (int phase = 0; phase < width/TILE_WIDTH; ++phase) {
84 /* Shared effort: loading of A and B tiles into shared memory. */
85 A_shared[ty][tx] = A[row*width + (phase*TILE_WIDTH + tx)];
86 B_shared[ty][tx] = B[col + (phase*TILE_WIDTH + ty)*width];
87 __syncthreads();
88
89 for (int k = 0; k < TILE_WIDTH; ++k)
90 result += A_shared[ty][k] * B_shared[k][tx];
91 __syncthreads();
92
93 }
94 C[row*width+col] = result;
95}
96
97
98/************************************************************************/
99/************************************************************************/
100/************************************************************************/
101
102
103int main (int argc, char** argv) {
104
105#ifdef _CIVL
106 $assume(argc == 2);
107 $assume(atoi(argv[1]) == 0);
108#endif
109
110 /* Set device based on input from command line */
111 if (argc > 1) {
112 if (cudaSetDevice(atoi(argv[1])) != cudaSuccess) {
113 int num_devices;
114 cudaGetDeviceCount(&num_devices);
115 fprintf(stderr, "Error initializing device %s,\
116 device value must be 0-%d\n", argv[1], (num_devices-1));
117 return 0;
118 }
119 } else {
120 fprintf(stderr, "Usage: %s gpu_device\n", argv[0]);
121 return 0;
122 }
123
124 /* Declare CPU arrays. */
125#ifndef _CIVL
126 double A[N*N],B[N*N];
127#endif
128 double C[N*N]; /* linearized CPU double arrays */
129 int r,c;
130
131 /* Declare GPU arrays. */
132 double *G_A,*G_B,*G_C; /* linearized GPU double arrays */
133 size_t size_a,size_b,size_c; /* size of linearized array in bytes */
134 size_a = size_b = size_c = N*N;
135
136 /* Setup a clock. */
137 cudaEvent_t start, stop;
138 float CPU_elapsedtime, GPU_global_elapsedtime, GPU_shared_elapsedtime;
139 cudaEventCreate(&start);
140 cudaEventCreate(&stop);
141
142
143
144
145 /* 1) Initialize matrix operands as double-precision arrays on host (CPU). */
146#ifndef _CIVL
147 for (r=0;r<N;++r)
148 for (c=0;c<N;++c) {
149 A[r*N+c] = 1.0;
150 B[r*N+c] = 1.0;
151 }
152#endif
153
154
155/*-----------------------------------------------------------------------*/
156
157 /* MM on a CPU. */
158 cudaEventRecord(start,0);
159 for (int r = 0; r < N; ++r )
160 for (int c = 0; c < N; ++c )
161 for (int k = 0; k < N; ++k )
162 C[r*N+c] += A[r*N+c] * B[k*N+c];
163 cudaEventRecord(stop,0);
164 cudaEventSynchronize(stop);
165 cudaEventElapsedTime(&CPU_elapsedtime,start,stop);
166 printf(" speedup\n");
167 printf(" -------\n");
168 printf("Elapsed time in CPU: %7.1f milliseconds\n", CPU_elapsedtime);
169/*-----------------------------------------------------------------------*/
170
171 /* MM on Global Memory of GPGPU. */
172 cudaEventRecord(start,0);
173
174 /* 2) Copy operands from CPU memory to GPGPU memory. */
175 cudaMalloc((void**)&G_A,size_a*sizeof(double)); /* alloc A in GPGPU */
176 cudaMalloc((void**)&G_B,size_b*sizeof(double)); /* alloc B in GPGPU */
177 cudaMalloc((void**)&G_C,size_c*sizeof(double)); /* alloc C in GPGPU */
178 cudaMemcpy(G_A,A,size_a*sizeof(double),cudaMemcpyHostToDevice);
179 cudaMemcpy(G_B,B,size_b*sizeof(double),cudaMemcpyHostToDevice);
180
181 /* 3) Apply matrix operation to operands on GPGPU */
182 /* There is no partial final block in this example. */
183 dim3 block = {TILE_WIDTH,TILE_WIDTH,1}; /* using a 2D block: 16,16,1 */
184 dim3 grid = {N/TILE_WIDTH,N/TILE_WIDTH,1}; /* as many 16x16-thread blocks as needed: */
185 myMM_global<<< grid,block >>>(G_A,G_B,G_C,N); /* grid(16,16,1) */
186
187 /* 4) Copy result from GPGPU memory to CPU memory. */
188 cudaMemcpy(C,G_C,size_c*sizeof(double),cudaMemcpyDeviceToHost);
189
190 /* Deallocate memory on GPGPU. */
191 cudaFree(G_A);
192 cudaFree(G_B);
193 cudaFree(G_C);
194
195 cudaEventRecord(stop,0);
196 cudaEventSynchronize(stop);
197 cudaEventElapsedTime(&GPU_global_elapsedtime,start,stop);
198 printf("Elapsed time in GPU (global memory): %7.1f milliseconds %5.1f\n",
199 GPU_global_elapsedtime,CPU_elapsedtime/GPU_global_elapsedtime);
200//*
201 printf("\nGLOBAL MEMORY:\n");
202 for (r=0;r<N;++r)
203 for (c=0;c<N;++c) {
204 printf("%2d,%2d %g\n", r,c,C[r*N+c]);
205 }
206//*/
207/*-----------------------------------------------------------------------*/
208
209 /* MM on Shared Memory of GPGPU. */
210 cudaEventRecord(start,0);
211
212 /* 2) Copy operands from CPU memory to GPGPU memory. */
213 cudaMalloc((void**)&G_A,size_a*sizeof(double)); /* alloc A in GPGPU */
214 cudaMalloc((void**)&G_B,size_b*sizeof(double)); /* alloc B in GPGPU */
215 cudaMalloc((void**)&G_C,size_c*sizeof(double)); /* alloc C in GPGPU */
216 cudaMemcpy(G_A,A,size_a*sizeof(double),cudaMemcpyHostToDevice);
217 cudaMemcpy(G_B,B,size_b*sizeof(double),cudaMemcpyHostToDevice);
218
219 /* 3) Apply matrix operation to operands on GPGPU */
220 /* There is not partial final block in this example. */
221 /* Use the same grid and block from the previous case. */
222 myMM_shared<<< grid,block >>>(G_A,G_B,G_C,N);
223
224 /* 4) Copy result from GPGPU memory to CPU memory. */
225 cudaMemcpy(C,G_C,size_c*sizeof(double),cudaMemcpyDeviceToHost);
226
227 /* Deallocate memory on GPGPU. */
228 cudaFree(G_A);
229 cudaFree(G_B);
230 cudaFree(G_C);
231
232 cudaEventRecord(stop,0);
233 cudaEventSynchronize(stop);
234 cudaEventElapsedTime(&GPU_shared_elapsedtime,start,stop);
235 printf("Elapsed time in GPU (shared memory): %7.1f milliseconds %5.1f\n",
236 GPU_shared_elapsedtime,CPU_elapsedtime/GPU_shared_elapsedtime);
237//*
238 printf("\nSHARED MEMORY:\n");
239 for (r=0;r<N;++r)
240 for (c=0;c<N;++c) {
241 printf("%2d,%2d %g\n", r,c,C[r*N+c]);
242 }
243//*/
244/*-----------------------------------------------------------------------*/
245
246 /* Deallocate the clock. */
247 cudaEventDestroy(start);
248 cudaEventDestroy(stop);
249
250 return 0;
251}
252
Note: See TracBrowser for help on using the repository browser.