source: CIVL/examples/omp/shtns/sht_private.h

main
Last change on this file was ea777aa, checked in by Alex Wilton <awilton@…>, 3 years ago

Moved examples, include, build_default.properties, common.xml, and README out from dev.civl.com into the root of the repo.

git-svn-id: svn://vsl.cis.udel.edu/civl/trunk@5704 fb995dde-84ed-4084-dfe6-e5aef3e2452c

  • Property mode set to 100644
File size: 21.3 KB
Line 
1/*
2 * Copyright (c) 2010-2015 Centre National de la Recherche Scientifique.
3 * written by Nathanael Schaeffer (CNRS, ISTerre, Grenoble, France).
4 *
5 * nathanael.schaeffer@ujf-grenoble.fr
6 *
7 * This software is governed by the CeCILL license under French law and
8 * abiding by the rules of distribution of free software. You can use,
9 * modify and/or redistribute the software under the terms of the CeCILL
10 * license as circulated by CEA, CNRS and INRIA at the following URL
11 * "http://www.cecill.info".
12 *
13 * The fact that you are presently reading this means that you have had
14 * knowledge of the CeCILL license and that you accept its terms.
15 *
16 */
17
18/********************************************************************
19 * SHTns : Spherical Harmonic Transform for numerical simulations. *
20 * written by Nathanael Schaeffer / CNRS *
21 ********************************************************************/
22
23/// \internal \file sht_private.h private data and options.
24
25#include <stdlib.h>
26#include <complex.h>
27#include <math.h>
28// FFTW la derivee d/dx = ik (pas de moins !)
29#include "fftw3/fftw3.h"
30
31// config file generated by ./configure
32#include "sht_config.h"
33
34#define SHTNS_PRIVATE
35#include "shtns.h"
36
37#ifdef _OPENMP
38 #include <omp.h>
39#endif
40
41
42/* BEGIN COMPILE-TIME SETTINGS */
43
44/// defines the maximum amount of memory in megabytes that SHTns should use.
45#define SHTNS_MAX_MEMORY 2048
46
47/// Minimum performance improve for DCT in \ref sht_auto mode. If not atained, we may switch back to gauss.
48#define MIN_PERF_IMPROVE_DCT 1.05
49
50/// Try to enforce at least this accuracy for DCT in sht_auto mode.
51#define MIN_ACCURACY_DCT 1.e-8
52
53/// The default \ref opt_polar threshold (0 disabled, 1.e-6 is aggressive, 1.e-10 is safe, 1.e-14 is VERY safe)
54#define SHT_DEFAULT_POLAR_OPT 1.e-10
55
56/// The default \ref norm used by shtns_init
57#define SHT_DEFAULT_NORM ( sht_orthonormal )
58//#define SHT_DEFAULT_NORM ( sht_schmidt | SHT_NO_CS_PHASE )
59
60/// The maximum order of non-linear terms to be resolved by SH transform by default.
61/// 1 : no non-linear terms. 2 : quadratic non-linear terms (default), 3 : triadic, ...
62/// must be larger or equal to 1.
63#define SHT_DEFAULT_NL_ORDER 1
64
65/// minimum NLAT to consider the use of DCT acceleration.
66#define SHT_MIN_NLAT_DCT 64
67
68/// time-limit for timing individual transforms (in seconds)
69#define SHT_TIME_LIMIT 0.2
70
71/* END COMPILE-TIME SETTINGS */
72
73// sht variants (std, ltr)
74enum sht_variants { SHT_STD, SHT_LTR, SHT_M, SHT_NVAR };
75// sht types (scal synth, scal analys, vect synth, ...)
76enum sht_types { SHT_TYP_SSY, SHT_TYP_SAN, SHT_TYP_VSY, SHT_TYP_VAN,
77 SHT_TYP_GSP, SHT_TYP_GTO, SHT_TYP_3SY, SHT_TYP_3AN, SHT_NTYP };
78
79// sht grids
80enum sht_grids { GRID_NONE, GRID_GAUSS, GRID_REGULAR, GRID_POLES };
81
82// pointer to various function types
83typedef void (*pf2l)(shtns_cfg, void*, void*, long int);
84typedef void (*pf3l)(shtns_cfg, void*, void*, void*, long int);
85typedef void (*pf4l)(shtns_cfg, void*, void*, void*, void*, long int);
86typedef void (*pf6l)(shtns_cfg, void*, void*, void*, void*, void*, void*, long int);
87typedef void (*pf2ml)(shtns_cfg, int, void*, void*, long int);
88typedef void (*pf3ml)(shtns_cfg, int, void*, void*, void*, long int);
89typedef void (*pf4ml)(shtns_cfg, int, void*, void*, void*, void*, long int);
90typedef void (*pf6ml)(shtns_cfg, int, void*, void*, void*, void*, void*, void*, long int);
91
92/// structure containing useful information about the SHT.
93struct shtns_info { // MUST start with "int nlm;"
94/* PUBLIC PART (if modified, shtns.h should be modified acordingly) */
95 unsigned int nlm; ///< total number of (l,m) spherical harmonics components.
96 unsigned short lmax; ///< maximum degree (lmax) of spherical harmonics.
97 unsigned short mmax; ///< maximum order (mmax*mres) of spherical harmonics.
98 unsigned short mres; ///< the periodicity along the phi axis.
99 unsigned short nphi; ///< number of spatial points in Phi direction (longitude)
100 unsigned short nlat; ///< number of spatial points in Theta direction (latitude) ...
101 unsigned short nlat_2; ///< ...and half of it (using (shtns.nlat+1)/2 allows odd shtns.nlat.)
102 int *lmidx; ///< (virtual) index in SH array of given im (size mmax+1) : LiM(l,im) = lmidx[im] + l
103 unsigned short *li; ///< degree l for given mode index (size nlm) : li[lm]
104 unsigned short *mi; ///< order m for given mode index (size nlm) : mi[lm]
105 double *ct, *st; ///< cos(theta) and sin(theta) arrays (size nlat)
106 unsigned int nspat; ///< number of real numbers that must be allocated in a spatial field.
107/* END OF PUBLIC PART */
108
109 short fftc_mode; ///< how to perform the complex fft : -1 = no fft; 0 = interleaved/native; 1 = split/transpose.
110 unsigned short nthreads; ///< number of threads (openmp).
111 unsigned short *tm; ///< start theta value for SH (polar optimization : near the poles the legendre polynomials go to zero for high m's)
112 int k_stride_a; ///< stride in theta direction
113 int m_stride_a; ///< stride in phi direction (m)
114 double *wg; ///< Gauss weights for Gauss-Legendre quadrature.
115 double *st_1; ///< 1/sin(theta);
116
117 fftw_plan ifft, fft; // plans for FFTW.
118 fftw_plan ifftc, fftc;
119
120 /* Legendre function generation arrays */
121 double *alm; // coefficient list for Legendre function recurrence (size 2*NLM)
122 double *blm; // coefficient list for modified Legendre function recurrence for analysis (size 2*NLM)
123 double *l_2; // array of size (LMAX+1) containing 1./l(l+1) for increasing integer l.
124
125 void* ftable[SHT_NVAR][SHT_NTYP]; // pointers to transform functions.
126
127 /* MEM matrices */
128 double **ylm; // matrix for inverse transform (synthesis)
129 struct DtDp** dylm; // theta and phi derivative of Ylm matrix
130 double **zlm; // matrix for direct transform (analysis)
131 struct DtDp** dzlm;
132
133 int ncplx_fft; ///< number of complex numbers to allocate for the fft : -1 = no fft; 0 = in-place fft (no allocation).
134
135 /* DCT stuff */
136 short mtr_dct; ///< m truncation for dct. -1 means no dct at all.
137 unsigned short klim; ///< Limit to k for non-linear terms (dct)
138 fftw_plan idct, dct_m0; // (I)DCT
139 double **ykm_dct; // matrix for inverse transform (synthesis) using dct.
140 struct DtDp** dykm_dct; // theta and phi derivative of Ykm matrix.
141 double *zlm_dct0; // matrix for direct transform (analysis), only m=0
142 double *dzlm_dct0;
143
144 /* other misc informations */
145 unsigned char nlorder; // order of non-linear terms to be resolved by SH transform.
146 unsigned char grid; // store grid type.
147 short norm; // store the normalization of the Spherical Harmonics (enum \ref shtns_norm + \ref SHT_NO_CS_PHASE flag)
148 unsigned fftw_plan_mode;
149 double Y00_1, Y10_ct, Y11_st;
150 shtns_cfg next; // pointer to next sht_setup or NULL (records a chained list of SHT setup).
151 // the end should be aligned on the size of int, to allow the storage of small arrays.
152};
153
154// define shortcuts to sizes.
155#define NLM shtns->nlm
156#define LMAX shtns->lmax
157#define NLAT shtns->nlat
158#define NLAT_2 shtns->nlat_2
159#define NPHI shtns->nphi
160#define MMAX shtns->mmax
161#define MRES shtns->mres
162#define MTR_DCT shtns->mtr_dct
163#define SHT_NL_ORDER shtns->nlorder
164
165// define index in alm/blm matrices
166#define ALM_IDX(shtns, im) ( (im)*(2*shtns->lmax - ((im)-1)*shtns->mres) )
167
168// SHT_NORM without CS_PHASE
169#define SHT_NORM (shtns->norm & 0x0FF)
170
171#ifndef M_PI
172# define M_PI 3.1415926535897932384626433832795
173#endif
174#ifndef M_PIl
175# define M_PIl 3.1415926535897932384626433832795L
176#endif
177
178// value for on-the-fly transforms is lower because it allows to optimize some more (don't compute l which are not significant).
179#define SHT_L_RESCALE_FLY 1000
180// set to a value close to the machine accuracy, it allows to speed-up on-the-fly SHTs with very large l (lmax > SHT_L_RESCALE_FLY).
181#define SHT_ACCURACY 1.0e-20
182// scale factor for extended range numbers (used in on-the-fly transforms to compute recurrence)
183#define SHT_SCALE_FACTOR 2.9073548971824275622e+135
184//#define SHT_SCALE_FACTOR 2.0370359763344860863e+90
185
186
187#if _GCC_VEC_ == 0
188 #undef _GCC_VEC_
189#endif
190
191/* are there vector extensions available ? */
192#if !(defined __SSE2__ || defined __MIC__ || defined __VECTOR4DOUBLE__)
193 #undef _GCC_VEC_
194#endif
195#ifdef __INTEL_COMPILER
196 #if __INTEL_COMPILER < 1400
197 #undef _GCC_VEC_
198 #warning "no vector extensions available ! use gcc 4+ or icc 14+ for best performance."
199 #endif
200#endif
201#ifdef __GNUC__
202 #if __GNUC__ < 4
203 #undef _GCC_VEC_
204 #warning "no vector extensions available ! use gcc 4+ or icc 14+ for best performance."
205 #endif
206#endif
207
208#if _GCC_VEC_ && __VECTOR4DOUBLE__
209 // support Blue Gene/Q QPX vectors
210 #define MIN_ALIGNMENT 32
211 #define VSIZE 2
212 typedef complex double v2d __attribute__((aligned (16))); // vector that contains a complex number
213 typedef double s2d __attribute__((aligned (16))); // scalar number
214 #define VSIZE2 4
215 #define _SIMD_NAME_ "qpx"
216 typedef vector4double rnd; // vector of 4 doubles.
217 #define vall(x) vec_splats(x)
218 #define vread(mem, idx) vec_lda((idx)*32, ((double*)mem))
219 #define vstor(mem, idx, v) vec_sta(v, (idx)*32, ((double*)mem))
220 inline static double reduce_add(rnd a) {
221 a += vec_perm(a, a, vec_gpci(02301));
222 a += vec_perm(a, a, vec_gpci(01032));
223 return( a[0] );
224 }
225 inline static v2d v2d_reduce(rnd a, rnd b) {
226 a = vec_perm(a, b, vec_gpci(00426)) + vec_perm(a, b, vec_gpci(01537));
227 a += vec_perm(a, a, vec_gpci(02301));
228 return a[0] + I*a[1];
229 }
230 //#define v2d_reduce(a, b) ( reduce_add(a) +I* reduce_add(b) )
231
232 #define S2D_STORE(mem, idx, ev, od) \
233 vstor(mem, idx, ev+od); \
234 vstor((double*)mem + NLAT-VSIZE2 - (idx)*VSIZE2, 0, vec_perm(ev-od, ev-od, vec_gpci(03210)));
235 #define S2D_CSTORE(mem, idx, er, or, ei, oi) { \
236 rnd aa = vec_perm(ei+oi, ei+oi, vec_gpci(01032)) + (er+or); \
237 rnd bb = (er + or) - vec_perm(ei+oi, ei+oi, vec_gpci(01032)); \
238 vstor(mem, idx, vec_perm(bb, aa, vec_gpci(00527))); \
239 vstor(((double*)mem) + (NPHI-2*im)*NLAT, idx, vec_perm(aa, bb, vec_gpci(00527))); \
240 aa = vec_perm(er-or, er-or, vec_gpci(01032)) + (ei-oi); \
241 bb = vec_perm(er-or, er-or, vec_gpci(01032)) - (ei-oi); \
242 vstor(((double*)mem) + NLAT, -(idx+1), vec_perm(bb, aa, vec_gpci(02705))); \
243 vstor(((double*)mem) + (NPHI+1-2*im)*NLAT, -(idx+1), vec_perm(aa, bb, vec_gpci(02705))); }
244 // TODO: S2D_CSTORE2 has not been tested and is probably wrong...
245 #define S2D_CSTORE2(mem, idx, er, or, ei, oi) { \
246 rnd aa = vec_perm(er+or, ei+oi, vec_gpci(00415)); \
247 rnd bb = vec_perm(er+or, ei+oi, vec_gpci(02637)); \
248 vstor(mem, idx*2, aa); \
249 vstor(mem, idx*2+1, bb); \
250 aa = vec_perm(er-or, ei-oi, vec_gpci(00415)); \
251 bb = vec_perm(er-or, ei-oi, vec_gpci(02637)); \
252 vstor(mem, NLAT_2-1-idx*2, aa); \
253 vstor(mem, NLAT_2-2-idx*2, bb); }
254
255 #define vdup(x) (x)
256
257 #define vlo(a) (a[0])
258
259 #define vcplx_real(a) creal(a)
260 #define vcplx_imag(a) cimag(a)
261
262 /*inline static void* VMALLOC(size_t s) {
263 void* ptr;
264 posix_memalign(&ptr, MIN_ALIGNMENT, s);
265 return ptr;
266 }*/
267 #define VMALLOC(s) malloc(s)
268 #define VFREE(s) free(s)
269#endif
270
271#if _GCC_VEC_ && __MIC__
272 // these values must be adjusted for the larger vectors of the MIC
273 #undef SHT_L_RESCALE_FLY
274 #undef SHT_ACCURACY
275 #define SHT_L_RESCALE_FLY 1800
276 #define SHT_ACCURACY 1.0e-40
277
278 #define MIN_ALIGNMENT 64
279 #define VSIZE 2
280 typedef complex double v2d __attribute__((aligned (16))); // vector that contains a complex number
281 typedef double s2d __attribute__((aligned (16))); // scalar number
282 #define VSIZE2 8
283 #include <immintrin.h>
284 #define _SIMD_NAME_ "mic"
285 typedef double rnd __attribute__ ((vector_size (VSIZE2*8))); // vector of 8 doubles.
286
287 typedef union { rnd i; double v[8]; } vec_rnd;
288 #define vall(x) ((rnd) _mm512_set1_pd(x))
289 inline static rnd vread(double *mem, int idx) { // unaligned load.
290 rnd t;
291 t = (rnd)_mm512_loadunpacklo_pd( t, (mem) + (idx)*VSIZE2 );
292 t = (rnd)_mm512_loadunpackhi_pd( t, (mem) + (idx)*VSIZE2 + 64 );
293 return t;
294 }
295 #define reduce_add(a) _mm512_reduce_add_pd(a)
296 #define v2d_reduce(a, b) ( _mm512_reduce_add_pd(a) +I* _mm512_reduce_add_pd(b) )
297 inline static void vstor(double *mem, int idx, rnd v) { // unaligned store.
298 _mm512_packstorelo_pd((mem) + (idx)*VSIZE2, v);
299 _mm512_packstorehi_pd((mem) + (idx)*VSIZE2 + 64, v);
300 }
301
302 // could be simplified with scatter
303 #define S2D_STORE(mem, idx, ev, od) \
304 _mm512_store_pd( (double*)mem + (idx)*VSIZE2, ev+od); \
305 _mm512_store_pd( (double*)mem + NLAT_2-VSIZE2 - (idx)*VSIZE2, \
306 _mm512_castsi512_pd(_mm512_shuffle_epi32(_mm512_permute4f128_epi32(_mm512_castpd_si512(ev-od), _MM_PERM_ABCD),_MM_PERM_BADC)));
307
308 // could be simplified with scatter
309 #define S2D_CSTORE(mem, idx, er, or, ei, oi) { \
310 rnd aa = (rnd)_mm512_castsi512_pd(_mm512_shuffle_epi32(_mm512_castpd_si512(ei+oi), _MM_PERM_BADC)); \
311 rnd bb = (er + or) - aa; \
312 aa += er + or; \
313 _mm512_store_pd( (double*)mem + (idx)*VSIZE2, _mm512_mask_mov_pd(bb, 170, aa) ); \
314 _mm512_store_pd( (double*)mem + (NPHI-VSIZE2*im)*NLAT_2 + (idx)*VSIZE2, _mm512_mask_mov_pd(aa, 170, bb) ); \
315 aa = (rnd)_mm512_castsi512_pd(_mm512_shuffle_epi32( _mm512_castpd_si512(er-or), _MM_PERM_BADC )); \
316 bb = aa - (ei - oi); \
317 aa += ei - oi; \
318 _mm512_store_pd( (double*)mem + NLAT_2-VSIZE2 - (idx)*VSIZE2, \
319 _mm512_castsi512_pd(_mm512_shuffle_epi32(_mm512_permute4f128_epi32(_mm512_castpd_si512(_mm512_mask_mov_pd(bb,170,aa)), _MM_PERM_ABCD),_MM_PERM_BADC))); \
320 _mm512_store_pd( (double*)mem + (NPHI+1-2*im)*NLAT_2-VSIZE2 - (idx)*VSIZE2, \
321 _mm512_castsi512_pd(_mm512_shuffle_epi32(_mm512_permute4f128_epi32(_mm512_castpd_si512(_mm512_mask_mov_pd(bb,170,aa)), _MM_PERM_ABCD),_MM_PERM_BADC))); }
322
323 #define vdup(x) (x)
324
325 #define vlo(a) ((vec_rnd)a).v[0]
326
327 #define vcplx_real(a) creal(a)
328 #define vcplx_imag(a) cimag(a)
329
330 #define VMALLOC(s) _mm_malloc(s, MIN_ALIGNMENT)
331 #define VFREE(s) _mm_free(s)
332#endif
333
334
335#if _GCC_VEC_ && __SSE2__
336 #define MIN_ALIGNMENT 16
337 #define VSIZE 2
338 typedef double s2d __attribute__ ((vector_size (8*VSIZE))); // vector that should behave like a real scalar for complex number multiplication.
339 typedef double v2d __attribute__ ((vector_size (8*VSIZE))); // vector that contains a complex number
340 #ifdef __AVX__
341 #define VSIZE2 4
342 #include <immintrin.h>
343 #define _SIMD_NAME_ "avx"
344 typedef double rnd __attribute__ ((vector_size (VSIZE2*8))); // vector of 4 doubles.
345 #define vall(x) ((rnd) _mm256_set1_pd(x))
346 #define vread(mem, idx) ((rnd)_mm256_loadu_pd( ((double*)mem) + (idx)*4 ))
347 #define vstor(mem, idx, v) _mm256_storeu_pd( ((double*)mem) + (idx)*4 , v)
348 inline static double reduce_add(rnd a) {
349 v2d t = (v2d)_mm256_castpd256_pd128(a) + (v2d)_mm256_extractf128_pd(a,1);
350 return _mm_cvtsd_f64(t) + _mm_cvtsd_f64(_mm_unpackhi_pd(t,t));
351 }
352 inline static v2d v2d_reduce(rnd a, rnd b) {
353 a = _mm256_hadd_pd(a, b);
354 return (v2d)_mm256_castpd256_pd128(a) + (v2d)_mm256_extractf128_pd(a,1);
355 }
356 #define S2D_STORE(mem, idx, ev, od) \
357 _mm256_storeu_pd(((double*)mem) + (idx)*4, ev+od); \
358 ((s2d*)mem)[NLAT_2-1 - (idx)*2] = _mm256_castpd256_pd128(_mm256_shuffle_pd(ev-od, ev-od, 5)); \
359 ((s2d*)mem)[NLAT_2-2 - (idx)*2] = _mm256_extractf128_pd(_mm256_shuffle_pd(ev-od, ev-od, 5), 1);
360 #define S2D_CSTORE(mem, idx, er, or, ei, oi) { \
361 rnd aa = (rnd)_mm256_shuffle_pd(ei+oi,ei+oi,5) + (er + or); rnd bb = (er + or) - (rnd)_mm256_shuffle_pd(ei+oi,ei+oi,5); \
362 _mm256_storeu_pd(((double*)mem) + (idx)*4, _mm256_shuffle_pd(bb, aa, 10 )); \
363 _mm256_storeu_pd(((double*)mem) + (NPHI-2*im)*NLAT + (idx)*4, _mm256_shuffle_pd(aa, bb, 10 )); \
364 aa = (rnd)_mm256_shuffle_pd(er-or,er-or,5) + (ei - oi); bb = (rnd)_mm256_shuffle_pd(er-or,er-or,5) - (ei - oi); \
365 ((s2d*)mem)[NLAT_2-1 -(idx)*2] = _mm256_castpd256_pd128(_mm256_shuffle_pd(bb, aa, 10 )); \
366 ((s2d*)mem)[NLAT_2-2 -(idx)*2] = _mm256_extractf128_pd(_mm256_shuffle_pd(bb, aa, 10 ), 1); \
367 ((s2d*)mem)[(NPHI+1-2*im)*NLAT_2 -1 -(idx)*2] = _mm256_castpd256_pd128(_mm256_shuffle_pd(aa, bb, 10 )); \
368 ((s2d*)mem)[(NPHI+1-2*im)*NLAT_2 -2 -(idx)*2] = _mm256_extractf128_pd(_mm256_shuffle_pd(aa, bb, 10 ), 1); }
369 #define S2D_CSTORE2(mem, idx, er, or, ei, oi) { \
370 rnd aa = (rnd)_mm256_unpacklo_pd(er+or, ei+oi); rnd bb = (rnd)_mm256_unpackhi_pd(er+or, ei+oi); \
371 ((s2d*)mem)[(idx)*4] = _mm256_castpd256_pd128(aa); \
372 ((s2d*)mem)[(idx)*4+1] = _mm256_castpd256_pd128(bb); \
373 ((s2d*)mem)[(idx)*4+2] = _mm256_extractf128_pd(aa, 1); \
374 ((s2d*)mem)[(idx)*4+3] = _mm256_extractf128_pd(bb, 1); \
375 aa = (rnd)_mm256_unpacklo_pd(er-or, ei-oi); bb = (rnd)_mm256_unpackhi_pd(er-or, ei-oi); \
376 ((s2d*)mem)[NLAT-1-(idx)*4] = _mm256_castpd256_pd128(aa); \
377 ((s2d*)mem)[NLAT-2-(idx)*4] = _mm256_castpd256_pd128(bb); \
378 ((s2d*)mem)[NLAT-3-(idx)*4] = _mm256_extractf128_pd(aa, 1); \
379 ((s2d*)mem)[NLAT-4-(idx)*4] = _mm256_extractf128_pd(bb, 1); }
380 #else
381 #define VSIZE2 2
382 typedef double rnd __attribute__ ((vector_size (VSIZE2*8))); // vector of 2 doubles.
383 #ifdef __SSE3__
384 #include <pmmintrin.h>
385 #define _SIMD_NAME_ "sse3"
386 inline static v2d v2d_reduce(v2d a, v2d b) {
387 return _mm_hadd_pd(a,b);
388 }
389 #else
390 #include <emmintrin.h>
391 #define _SIMD_NAME_ "sse2"
392 inline static v2d v2d_reduce(v2d a, v2d b) {
393 v2d c = _mm_unpacklo_pd(a, b); b = _mm_unpackhi_pd(a, b);
394 return b + c;
395 }
396 #endif
397 #define reduce_add(a) ( _mm_cvtsd_f64(a) + _mm_cvtsd_f64(_mm_unpackhi_pd(a,a)) )
398 #define vall(x) ((rnd) _mm_set1_pd(x))
399 #define vread(mem, idx) ((s2d*)mem)[idx]
400 #define vstor(mem, idx, v) ((s2d*)mem)[idx] = v
401 #define S2D_STORE(mem, idx, ev, od) ((s2d*)mem)[idx] = ev+od; ((s2d*)mem)[NLAT_2-1 - (idx)] = vxchg(ev-od);
402 #define S2D_CSTORE(mem, idx, er, or, ei, oi) { \
403 rnd aa = vxchg(ei + oi) + (er + or); rnd bb = (er + or) - vxchg(ei + oi); \
404 ((s2d*)mem)[idx] = _mm_shuffle_pd(bb, aa, 2 ); \
405 ((s2d*)mem)[(NPHI-2*im)*NLAT_2 + (idx)] = _mm_shuffle_pd(aa, bb, 2 ); \
406 aa = vxchg(er - or) + (ei - oi); bb = vxchg(er - or) - (ei - oi); \
407 ((s2d*)mem)[NLAT_2-1 -(idx)] = _mm_shuffle_pd(bb, aa, 2 ); \
408 ((s2d*)mem)[(NPHI+1-2*im)*NLAT_2 -1 -(idx)] = _mm_shuffle_pd(aa, bb, 2 ); }
409 #define S2D_CSTORE2(mem, idx, er, or, ei, oi) { \
410 ((s2d*)mem)[(idx)*2] = _mm_unpacklo_pd(er+or, ei+oi); \
411 ((s2d*)mem)[(idx)*2+1] = _mm_unpackhi_pd(er+or, ei+oi); \
412 ((s2d*)mem)[NLAT-1-(idx)*2] = _mm_unpacklo_pd(er-or, ei-oi); \
413 ((s2d*)mem)[NLAT-2-(idx)*2] = _mm_unpackhi_pd(er-or, ei-oi); }
414 #endif
415 #ifdef __SSE3__
416 #define addi(a,b) _mm_addsub_pd(a, _mm_shuffle_pd(b,b,1)) // a + I*b
417 #define subadd(a,b) _mm_addsub_pd(a, b) // [al-bl, ah+bh]
418 //#define CMUL(a,b) _mm_addsub_pd(_mm_shuffle_pd(a,a,0)*b, _mm_shuffle_pd(a,a,3)*_mm_shuffle_pd(b,b,1))
419 #else
420 #define addi(a,b) ( (a) + (_mm_shuffle_pd(b,b,1) * _mm_set_pd(1.0, -1.0)) ) // a + I*b [note: _mm_set_pd(imag, real)) ]
421 #define subadd(a,b) ( (a) + (b) * _mm_set_pd(1.0, -1.0) ) // [al-bl, ah+bh]
422 #endif
423
424 // build mask (-0, -0) to change sign of both hi and lo values using xorpd
425 #define SIGN_MASK_2 _mm_castsi128_pd(_mm_slli_epi64(_mm_cmpeq_epi16(_mm_set1_epi64x(0), _mm_set1_epi64x(0)), 63))
426 // build mask (0, -0) to change sign of hi value using xorpd (used in CFFT_TO_2REAL)
427 #define SIGN_MASK_HI _mm_unpackhi_pd(vdup(0.0), SIGN_MASK_2 )
428 // build mask (-0, 0) to change sign of lo value using xorpd
429 #define SIGN_MASK_LO _mm_unpackhi_pd(SIGN_MASK_2, vdup(0.0) )
430
431 // vset(lo, hi) takes two doubles and pack them in a vector
432 #define vset(lo, hi) _mm_set_pd(hi, lo)
433 // vdup(x) takes a double and duplicate it to a vector of 2 doubles.
434 #define vdup(x) ((s2d)_mm_set1_pd(x))
435 // vxchg(a) exchange hi and lo component of vector a
436 #define vxchg(a) ((v2d)_mm_shuffle_pd(a,a,1))
437 #define vlo_to_cplx(a) _mm_unpacklo_pd(a, vdup(0.0))
438 #define vhi_to_cplx(a) _mm_unpackhi_pd(a, vdup(0.0))
439 #define vcplx_real(a) vlo_to_dbl(a)
440 #define vcplx_imag(a) vhi_to_dbl(a)
441 #ifdef __clang__
442 // allow to compile with clang (llvm)
443 #define vlo(a) (a)[0]
444 #define vlo_to_dbl(a) (a)[0]
445 #define vhi_to_dbl(a) (a)[1]
446 #else
447 // gcc extensions
448 #ifdef __AVX__
449 #define vlo(a) _mm_cvtsd_f64(_mm256_castpd256_pd128(a))
450 #else
451 #define vlo(a) _mm_cvtsd_f64(a)
452 #endif
453 #define vlo_to_dbl(a) _mm_cvtsd_f64(a)
454 #define vhi_to_dbl(a) _mm_cvtsd_f64(_mm_unpackhi_pd(a,a))
455 #endif
456
457 // Allocate memory aligned on 16 bytes for SSE2 (fftw_malloc works only if fftw was compiled with --enable-sse2)
458 // in 64 bit systems, malloc should be 16 bytes aligned anyway.
459 #define VMALLOC(s) ( (sizeof(void*) >= 8) ? malloc(s) : _mm_malloc(s, MIN_ALIGNMENT) )
460 #define VFREE(s) ( (sizeof(void*) >= 8) ? free(s) : _mm_free(s) )
461#endif
462
463
464
465#ifndef _GCC_VEC_
466 #define MIN_ALIGNMENT 16
467 #define VSIZE 1
468 #define VSIZE2 1
469 #define _SIMD_NAME_ "scalar"
470 typedef double s2d;
471 typedef complex double v2d;
472 typedef double rnd;
473 #define vread(mem, idx) ((double*)mem)[idx]
474 #define vstor(mem, idx, v) ((double*)mem)[idx] = v;
475 #define reduce_add(a) (a)
476 #define v2d_reduce(a,b) ((a) +I*(b))
477 #define vlo(a) (a)
478 #define vall(x) (x)
479 #define vdup(x) (x)
480 #define vxchg(x) (x)
481 #define addi(a,b) ((a) + I*(b))
482 #define vlo_to_dbl(a) (a)
483 #define vhi_to_dbl(a) (a)
484 #define vcplx_real(a) creal(a)
485 #define vcplx_imag(a) cimag(a)
486
487 // allocate memory aligned for FFTW. In 64 bit systems, malloc should be 16 bytes aligned.
488 #define VMALLOC(s) ( (sizeof(void*) >= 8) ? malloc(s) : fftw_malloc(s) )
489 #define VFREE(s) ( (sizeof(void*) >= 8) ? free(s) : fftw_free(s) )
490#endif
491
492
493#define SSE __attribute__((aligned (MIN_ALIGNMENT)))
494
495/// align pointer on MIN_ALIGNMENT (must be a power of 2)
496#define PTR_ALIGN(p) ((((size_t)(p)) + (MIN_ALIGNMENT-1)) & (~((size_t)(MIN_ALIGNMENT-1))))
497
498
499struct DtDp { // theta and phi derivatives stored together.
500 double t, p;
501};
502
503#define GLUE2(a,b) a##b
504#define GLUE3(a,b,c) a##b##c
505
506// verbose printing
507#if SHT_VERBOSE > 1
508 #define PRINT_VERB(msg) printf(msg)
509#else
510 #define PRINT_VERB(msg) (0)
511#endif
512
Note: See TracBrowser for help on using the repository browser.