Vector Optimized Library of Kernels  2.5.0
Architecture-tuned implementations of math kernels
volk_32fc_x2_conjugate_dot_prod_32fc.h
Go to the documentation of this file.
1 /* -*- c++ -*- */
2 /*
3  * Copyright 2012, 2014 Free Software Foundation, Inc.
4  *
5  * This file is part of GNU Radio
6  *
7  * GNU Radio is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 3, or (at your option)
10  * any later version.
11  *
12  * GNU Radio is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with GNU Radio; see the file COPYING. If not, write to
19  * the Free Software Foundation, Inc., 51 Franklin Street,
20  * Boston, MA 02110-1301, USA.
21  */
22 
59 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
60 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H
61 
62 
63 #include <volk/volk_complex.h>
64 
65 
66 #ifdef LV_HAVE_GENERIC
67 
69  const lv_32fc_t* input,
70  const lv_32fc_t* taps,
71  unsigned int num_points)
72 {
73 
74  const unsigned int num_bytes = num_points * 8;
75 
76  float* res = (float*)result;
77  float* in = (float*)input;
78  float* tp = (float*)taps;
79  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
80 
81  float sum0[2] = { 0, 0 };
82  float sum1[2] = { 0, 0 };
83  unsigned int i = 0;
84 
85  for (i = 0; i < n_2_ccomplex_blocks; ++i) {
86  sum0[0] += in[0] * tp[0] + in[1] * tp[1];
87  sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
88  sum1[0] += in[2] * tp[2] + in[3] * tp[3];
89  sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
90 
91  in += 4;
92  tp += 4;
93  }
94 
95  res[0] = sum0[0] + sum1[0];
96  res[1] = sum0[1] + sum1[1];
97 
98  if (num_bytes >> 3 & 1) {
99  *result += input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]);
100  }
101 }
102 
103 #endif /*LV_HAVE_GENERIC*/
104 
105 #ifdef LV_HAVE_AVX
106 
107 #include <immintrin.h>
108 
110  const lv_32fc_t* input,
111  const lv_32fc_t* taps,
112  unsigned int num_points)
113 {
114  // Partial sums for indices i, i+1, i+2 and i+3.
115  __m256 sum_a_mult_b_real = _mm256_setzero_ps();
116  __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
117 
118  for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
119  /* Four complex elements a time are processed.
120  * (ar + j⋅ai)*conj(br + j⋅bi) =
121  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
122  */
123 
124  /* Load input and taps, split and duplicate real und imaginary parts of taps.
125  * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
126  * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
127  * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
128  * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
129  */
130  __m256 a = _mm256_loadu_ps((const float*)&input[i]);
131  __m256 b = _mm256_loadu_ps((const float*)&taps[i]);
132  __m256 b_real = _mm256_moveldup_ps(b);
133  __m256 b_imag = _mm256_movehdup_ps(b);
134 
135  // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
136  sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
137  // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
138  sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
139  }
140 
141  // Swap position of −ar⋅bi and ai⋅bi.
142  sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
143  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
144  __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
145  /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
146  * s1 + s3 and s0 + s2 …
147  */
148  sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
149  // … and now (s0 + s2) + (s1 + s3)
150  sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
151  // Store result.
152  __m128 lower = _mm256_extractf128_ps(sum, 0);
153  _mm_storel_pi((__m64*)result, lower);
154 
155  // Handle the last elements if num_points mod 4 is bigger than 0.
156  for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
157  *result += lv_cmake(lv_creal(input[i]) * lv_creal(taps[i]) +
158  lv_cimag(input[i]) * lv_cimag(taps[i]),
159  lv_cimag(input[i]) * lv_creal(taps[i]) -
160  lv_creal(input[i]) * lv_cimag(taps[i]));
161  }
162 }
163 
164 #endif /* LV_HAVE_AVX */
165 
166 #ifdef LV_HAVE_SSE3
167 
168 #include <pmmintrin.h>
169 #include <xmmintrin.h>
170 
172  const lv_32fc_t* input,
173  const lv_32fc_t* taps,
174  unsigned int num_points)
175 {
176  // Partial sums for indices i and i+1.
177  __m128 sum_a_mult_b_real = _mm_setzero_ps();
178  __m128 sum_a_mult_b_imag = _mm_setzero_ps();
179 
180  for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
181  /* Two complex elements a time are processed.
182  * (ar + j⋅ai)*conj(br + j⋅bi) =
183  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
184  */
185 
186  /* Load input and taps, split and duplicate real und imaginary parts of taps.
187  * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
188  * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
189  * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
190  * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
191  */
192  __m128 a = _mm_loadu_ps((const float*)&input[i]);
193  __m128 b = _mm_loadu_ps((const float*)&taps[i]);
194  __m128 b_real = _mm_moveldup_ps(b);
195  __m128 b_imag = _mm_movehdup_ps(b);
196 
197  // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
198  sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
199  // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
200  sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
201  }
202 
203  // Swap position of −ar⋅bi and ai⋅bi.
204  sum_a_mult_b_imag =
205  _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
206  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
207  __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
208  // Sum the two partial sums.
209  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
210  // Store result.
211  _mm_storel_pi((__m64*)result, sum);
212 
213  // Handle the last element if num_points mod 2 is 1.
214  if (num_points & 1u) {
215  *result += lv_cmake(
216  lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
217  lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
218  lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
219  lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
220  }
221 }
222 
223 #endif /*LV_HAVE_SSE3*/
224 
225 #ifdef LV_HAVE_NEON
226 #include <arm_neon.h>
228  const lv_32fc_t* input,
229  const lv_32fc_t* taps,
230  unsigned int num_points)
231 {
232 
233  unsigned int quarter_points = num_points / 4;
234  unsigned int number;
235 
236  lv_32fc_t* a_ptr = (lv_32fc_t*)taps;
237  lv_32fc_t* b_ptr = (lv_32fc_t*)input;
238  // for 2-lane vectors, 1st lane holds the real part,
239  // 2nd lane holds the imaginary part
240  float32x4x2_t a_val, b_val, accumulator;
241  float32x4x2_t tmp_imag;
242  accumulator.val[0] = vdupq_n_f32(0);
243  accumulator.val[1] = vdupq_n_f32(0);
244 
245  for (number = 0; number < quarter_points; ++number) {
246  a_val = vld2q_f32((float*)a_ptr); // a0r|a1r|a2r|a3r || a0i|a1i|a2i|a3i
247  b_val = vld2q_f32((float*)b_ptr); // b0r|b1r|b2r|b3r || b0i|b1i|b2i|b3i
248  __VOLK_PREFETCH(a_ptr + 8);
249  __VOLK_PREFETCH(b_ptr + 8);
250 
251  // do the first multiply
252  tmp_imag.val[1] = vmulq_f32(a_val.val[1], b_val.val[0]);
253  tmp_imag.val[0] = vmulq_f32(a_val.val[0], b_val.val[0]);
254 
255  // use multiply accumulate/subtract to get result
256  tmp_imag.val[1] = vmlsq_f32(tmp_imag.val[1], a_val.val[0], b_val.val[1]);
257  tmp_imag.val[0] = vmlaq_f32(tmp_imag.val[0], a_val.val[1], b_val.val[1]);
258 
259  accumulator.val[0] = vaddq_f32(accumulator.val[0], tmp_imag.val[0]);
260  accumulator.val[1] = vaddq_f32(accumulator.val[1], tmp_imag.val[1]);
261 
262  // increment pointers
263  a_ptr += 4;
264  b_ptr += 4;
265  }
266  lv_32fc_t accum_result[4];
267  vst2q_f32((float*)accum_result, accumulator);
268  *result = accum_result[0] + accum_result[1] + accum_result[2] + accum_result[3];
269 
270  // tail case
271  for (number = quarter_points * 4; number < num_points; ++number) {
272  *result += (*a_ptr++) * lv_conj(*b_ptr++);
273  }
274  *result = lv_conj(*result);
275 }
276 #endif /*LV_HAVE_NEON*/
277 
278 #endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_u_H*/
279 
280 #ifndef INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
281 #define INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H
282 
283 #include <stdio.h>
284 #include <volk/volk_common.h>
285 #include <volk/volk_complex.h>
286 
287 
288 #ifdef LV_HAVE_AVX
289 #include <immintrin.h>
290 
292  const lv_32fc_t* input,
293  const lv_32fc_t* taps,
294  unsigned int num_points)
295 {
296  // Partial sums for indices i, i+1, i+2 and i+3.
297  __m256 sum_a_mult_b_real = _mm256_setzero_ps();
298  __m256 sum_a_mult_b_imag = _mm256_setzero_ps();
299 
300  for (long unsigned i = 0; i < (num_points & ~3u); i += 4) {
301  /* Four complex elements a time are processed.
302  * (ar + j⋅ai)*conj(br + j⋅bi) =
303  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
304  */
305 
306  /* Load input and taps, split and duplicate real und imaginary parts of taps.
307  * a: | ai,i+3 | ar,i+3 | … | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
308  * b: | bi,i+3 | br,i+3 | … | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
309  * b_real: | br,i+3 | br,i+3 | … | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
310  * b_imag: | bi,i+3 | bi,i+3 | … | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
311  */
312  __m256 a = _mm256_load_ps((const float*)&input[i]);
313  __m256 b = _mm256_load_ps((const float*)&taps[i]);
314  __m256 b_real = _mm256_moveldup_ps(b);
315  __m256 b_imag = _mm256_movehdup_ps(b);
316 
317  // Add | ai⋅br,i+3 | ar⋅br,i+3 | … | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
318  sum_a_mult_b_real = _mm256_add_ps(sum_a_mult_b_real, _mm256_mul_ps(a, b_real));
319  // Add | ai⋅bi,i+3 | −ar⋅bi,i+3 | … | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
320  sum_a_mult_b_imag = _mm256_addsub_ps(sum_a_mult_b_imag, _mm256_mul_ps(a, b_imag));
321  }
322 
323  // Swap position of −ar⋅bi and ai⋅bi.
324  sum_a_mult_b_imag = _mm256_permute_ps(sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
325  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains four such partial sums.
326  __m256 sum = _mm256_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
327  /* Sum the four partial sums: Add high half of vector sum to the low one, i.e.
328  * s1 + s3 and s0 + s2 …
329  */
330  sum = _mm256_add_ps(sum, _mm256_permute2f128_ps(sum, sum, 0x01));
331  // … and now (s0 + s2) + (s1 + s3)
332  sum = _mm256_add_ps(sum, _mm256_permute_ps(sum, _MM_SHUFFLE(1, 0, 3, 2)));
333  // Store result.
334  __m128 lower = _mm256_extractf128_ps(sum, 0);
335  _mm_storel_pi((__m64*)result, lower);
336 
337  // Handle the last elements if num_points mod 4 is bigger than 0.
338  for (long unsigned i = num_points & ~3u; i < num_points; ++i) {
339  *result += lv_cmake(lv_creal(input[i]) * lv_creal(taps[i]) +
340  lv_cimag(input[i]) * lv_cimag(taps[i]),
341  lv_cimag(input[i]) * lv_creal(taps[i]) -
342  lv_creal(input[i]) * lv_cimag(taps[i]));
343  }
344 }
345 #endif /* LV_HAVE_AVX */
346 
347 #ifdef LV_HAVE_SSE3
348 
349 #include <pmmintrin.h>
350 #include <xmmintrin.h>
351 
353  const lv_32fc_t* input,
354  const lv_32fc_t* taps,
355  unsigned int num_points)
356 {
357  // Partial sums for indices i and i+1.
358  __m128 sum_a_mult_b_real = _mm_setzero_ps();
359  __m128 sum_a_mult_b_imag = _mm_setzero_ps();
360 
361  for (long unsigned i = 0; i < (num_points & ~1u); i += 2) {
362  /* Two complex elements a time are processed.
363  * (ar + j⋅ai)*conj(br + j⋅bi) =
364  * ar⋅br + ai⋅bi + j⋅(ai⋅br − ar⋅bi)
365  */
366 
367  /* Load input and taps, split and duplicate real und imaginary parts of taps.
368  * a: | ai,i+1 | ar,i+1 | ai,i+0 | ar,i+0 |
369  * b: | bi,i+1 | br,i+1 | bi,i+0 | br,i+0 |
370  * b_real: | br,i+1 | br,i+1 | br,i+0 | br,i+0 |
371  * b_imag: | bi,i+1 | bi,i+1 | bi,i+0 | bi,i+0 |
372  */
373  __m128 a = _mm_load_ps((const float*)&input[i]);
374  __m128 b = _mm_load_ps((const float*)&taps[i]);
375  __m128 b_real = _mm_moveldup_ps(b);
376  __m128 b_imag = _mm_movehdup_ps(b);
377 
378  // Add | ai⋅br,i+1 | ar⋅br,i+1 | ai⋅br,i+0 | ar⋅br,i+0 | to partial sum.
379  sum_a_mult_b_real = _mm_add_ps(sum_a_mult_b_real, _mm_mul_ps(a, b_real));
380  // Add | ai⋅bi,i+1 | −ar⋅bi,i+1 | ai⋅bi,i+0 | −ar⋅bi,i+0 | to partial sum.
381  sum_a_mult_b_imag = _mm_addsub_ps(sum_a_mult_b_imag, _mm_mul_ps(a, b_imag));
382  }
383 
384  // Swap position of −ar⋅bi and ai⋅bi.
385  sum_a_mult_b_imag =
386  _mm_shuffle_ps(sum_a_mult_b_imag, sum_a_mult_b_imag, _MM_SHUFFLE(2, 3, 0, 1));
387  // | ai⋅br + ai⋅bi | ai⋅br − ar⋅bi |, sum contains two such partial sums.
388  __m128 sum = _mm_add_ps(sum_a_mult_b_real, sum_a_mult_b_imag);
389  // Sum the two partial sums.
390  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 0, 3, 2)));
391  // Store result.
392  _mm_storel_pi((__m64*)result, sum);
393 
394  // Handle the last element if num_points mod 2 is 1.
395  if (num_points & 1u) {
396  *result += lv_cmake(
397  lv_creal(input[num_points - 1]) * lv_creal(taps[num_points - 1]) +
398  lv_cimag(input[num_points - 1]) * lv_cimag(taps[num_points - 1]),
399  lv_cimag(input[num_points - 1]) * lv_creal(taps[num_points - 1]) -
400  lv_creal(input[num_points - 1]) * lv_cimag(taps[num_points - 1]));
401  }
402 }
403 
404 #endif /*LV_HAVE_SSE3*/
405 
406 
407 #ifdef LV_HAVE_GENERIC
408 
409 
411  const lv_32fc_t* input,
412  const lv_32fc_t* taps,
413  unsigned int num_points)
414 {
415 
416  const unsigned int num_bytes = num_points * 8;
417 
418  float* res = (float*)result;
419  float* in = (float*)input;
420  float* tp = (float*)taps;
421  unsigned int n_2_ccomplex_blocks = num_bytes >> 4;
422 
423  float sum0[2] = { 0, 0 };
424  float sum1[2] = { 0, 0 };
425  unsigned int i = 0;
426 
427  for (i = 0; i < n_2_ccomplex_blocks; ++i) {
428  sum0[0] += in[0] * tp[0] + in[1] * tp[1];
429  sum0[1] += (-in[0] * tp[1]) + in[1] * tp[0];
430  sum1[0] += in[2] * tp[2] + in[3] * tp[3];
431  sum1[1] += (-in[2] * tp[3]) + in[3] * tp[2];
432 
433  in += 4;
434  tp += 4;
435  }
436 
437  res[0] = sum0[0] + sum1[0];
438  res[1] = sum0[1] + sum1[1];
439 
440  if (num_bytes >> 3 & 1) {
441  *result += input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]);
442  }
443 }
444 
445 #endif /*LV_HAVE_GENERIC*/
446 
447 
448 #if LV_HAVE_SSE && LV_HAVE_64
449 
450 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse(lv_32fc_t* result,
451  const lv_32fc_t* input,
452  const lv_32fc_t* taps,
453  unsigned int num_points)
454 {
455 
456  const unsigned int num_bytes = num_points * 8;
457 
459  static const uint32_t conjugator[4] = {
460  0x00000000, 0x80000000, 0x00000000, 0x80000000
461  };
462 
464  "# ccomplex_conjugate_dotprod_generic (float* result, const float *input,\n\t"
465  "# const float *taps, unsigned num_bytes)\n\t"
466  "# float sum0 = 0;\n\t"
467  "# float sum1 = 0;\n\t"
468  "# float sum2 = 0;\n\t"
469  "# float sum3 = 0;\n\t"
470  "# do {\n\t"
471  "# sum0 += input[0] * taps[0] - input[1] * taps[1];\n\t"
472  "# sum1 += input[0] * taps[1] + input[1] * taps[0];\n\t"
473  "# sum2 += input[2] * taps[2] - input[3] * taps[3];\n\t"
474  "# sum3 += input[2] * taps[3] + input[3] * taps[2];\n\t"
475  "# input += 4;\n\t"
476  "# taps += 4; \n\t"
477  "# } while (--n_2_ccomplex_blocks != 0);\n\t"
478  "# result[0] = sum0 + sum2;\n\t"
479  "# result[1] = sum1 + sum3;\n\t"
480  "# TODO: prefetch and better scheduling\n\t"
481  " xor %%r9, %%r9\n\t"
482  " xor %%r10, %%r10\n\t"
483  " movq %[conjugator], %%r9\n\t"
484  " movq %%rcx, %%rax\n\t"
485  " movaps 0(%%r9), %%xmm8\n\t"
486  " movq %%rcx, %%r8\n\t"
487  " movq %[rsi], %%r9\n\t"
488  " movq %[rdx], %%r10\n\t"
489  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
490  " movaps 0(%%r9), %%xmm0\n\t"
491  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
492  " movups 0(%%r10), %%xmm2\n\t"
493  " shr $5, %%rax # rax = n_2_ccomplex_blocks / 2\n\t"
494  " shr $4, %%r8\n\t"
495  " xorps %%xmm8, %%xmm2\n\t"
496  " jmp .%=L1_test\n\t"
497  " # 4 taps / loop\n\t"
498  " # something like ?? cycles / loop\n\t"
499  ".%=Loop1: \n\t"
500  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
501  "# movaps (%%r9), %%xmmA\n\t"
502  "# movaps (%%r10), %%xmmB\n\t"
503  "# movaps %%xmmA, %%xmmZ\n\t"
504  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
505  "# mulps %%xmmB, %%xmmA\n\t"
506  "# mulps %%xmmZ, %%xmmB\n\t"
507  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
508  "# xorps %%xmmPN, %%xmmA\n\t"
509  "# movaps %%xmmA, %%xmmZ\n\t"
510  "# unpcklps %%xmmB, %%xmmA\n\t"
511  "# unpckhps %%xmmB, %%xmmZ\n\t"
512  "# movaps %%xmmZ, %%xmmY\n\t"
513  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
514  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
515  "# addps %%xmmZ, %%xmmA\n\t"
516  "# addps %%xmmA, %%xmmC\n\t"
517  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
518  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
519  " movaps 16(%%r9), %%xmm1\n\t"
520  " movaps %%xmm0, %%xmm4\n\t"
521  " mulps %%xmm2, %%xmm0\n\t"
522  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
523  " movaps 16(%%r10), %%xmm3\n\t"
524  " movaps %%xmm1, %%xmm5\n\t"
525  " xorps %%xmm8, %%xmm3\n\t"
526  " addps %%xmm0, %%xmm6\n\t"
527  " mulps %%xmm3, %%xmm1\n\t"
528  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
529  " addps %%xmm1, %%xmm6\n\t"
530  " mulps %%xmm4, %%xmm2\n\t"
531  " movaps 32(%%r9), %%xmm0\n\t"
532  " addps %%xmm2, %%xmm7\n\t"
533  " mulps %%xmm5, %%xmm3\n\t"
534  " add $32, %%r9\n\t"
535  " movaps 32(%%r10), %%xmm2\n\t"
536  " addps %%xmm3, %%xmm7\n\t"
537  " add $32, %%r10\n\t"
538  " xorps %%xmm8, %%xmm2\n\t"
539  ".%=L1_test:\n\t"
540  " dec %%rax\n\t"
541  " jge .%=Loop1\n\t"
542  " # We've handled the bulk of multiplies up to here.\n\t"
543  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
544  " # If so, we've got 2 more taps to do.\n\t"
545  " and $1, %%r8\n\t"
546  " je .%=Leven\n\t"
547  " # The count was odd, do 2 more taps.\n\t"
548  " # Note that we've already got mm0/mm2 preloaded\n\t"
549  " # from the main loop.\n\t"
550  " movaps %%xmm0, %%xmm4\n\t"
551  " mulps %%xmm2, %%xmm0\n\t"
552  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
553  " addps %%xmm0, %%xmm6\n\t"
554  " mulps %%xmm4, %%xmm2\n\t"
555  " addps %%xmm2, %%xmm7\n\t"
556  ".%=Leven:\n\t"
557  " # neg inversor\n\t"
558  " xorps %%xmm1, %%xmm1\n\t"
559  " mov $0x80000000, %%r9\n\t"
560  " movd %%r9, %%xmm1\n\t"
561  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
562  " # pfpnacc\n\t"
563  " xorps %%xmm1, %%xmm6\n\t"
564  " movaps %%xmm6, %%xmm2\n\t"
565  " unpcklps %%xmm7, %%xmm6\n\t"
566  " unpckhps %%xmm7, %%xmm2\n\t"
567  " movaps %%xmm2, %%xmm3\n\t"
568  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
569  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
570  " addps %%xmm2, %%xmm6\n\t"
571  " # xmm6 = r1 i2 r3 i4\n\t"
572  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
573  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
574  " movlps %%xmm6, (%[rdi]) # store low 2x32 bits (complex) "
575  "to memory\n\t"
576  :
577  : [rsi] "r"(input),
578  [rdx] "r"(taps),
579  "c"(num_bytes),
580  [rdi] "r"(result),
581  [conjugator] "r"(conjugator)
582  : "rax", "r8", "r9", "r10");
583 
584  int getem = num_bytes % 16;
585 
586  for (; getem > 0; getem -= 8) {
587  *result += (input[(num_bytes >> 3) - 1] * lv_conj(taps[(num_bytes >> 3) - 1]));
588  }
589 }
590 #endif
591 
592 #if LV_HAVE_SSE && LV_HAVE_32
593 static inline void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse_32(lv_32fc_t* result,
594  const lv_32fc_t* input,
595  const lv_32fc_t* taps,
596  unsigned int num_points)
597 {
598 
599  const unsigned int num_bytes = num_points * 8;
600 
602  static const uint32_t conjugator[4] = {
603  0x00000000, 0x80000000, 0x00000000, 0x80000000
604  };
605 
606  int bound = num_bytes >> 4;
607  int leftovers = num_bytes % 16;
608 
610  " #pushl %%ebp\n\t"
611  " #movl %%esp, %%ebp\n\t"
612  " #movl 12(%%ebp), %%eax # input\n\t"
613  " #movl 16(%%ebp), %%edx # taps\n\t"
614  " #movl 20(%%ebp), %%ecx # n_bytes\n\t"
615  " movaps 0(%[conjugator]), %%xmm1\n\t"
616  " xorps %%xmm6, %%xmm6 # zero accumulators\n\t"
617  " movaps 0(%[eax]), %%xmm0\n\t"
618  " xorps %%xmm7, %%xmm7 # zero accumulators\n\t"
619  " movaps 0(%[edx]), %%xmm2\n\t"
620  " movl %[ecx], (%[out])\n\t"
621  " shrl $5, %[ecx] # ecx = n_2_ccomplex_blocks / 2\n\t"
622 
623  " xorps %%xmm1, %%xmm2\n\t"
624  " jmp .%=L1_test\n\t"
625  " # 4 taps / loop\n\t"
626  " # something like ?? cycles / loop\n\t"
627  ".%=Loop1: \n\t"
628  "# complex prod: C += A * B, w/ temp Z & Y (or B), xmmPN=$0x8000000080000000\n\t"
629  "# movaps (%[eax]), %%xmmA\n\t"
630  "# movaps (%[edx]), %%xmmB\n\t"
631  "# movaps %%xmmA, %%xmmZ\n\t"
632  "# shufps $0xb1, %%xmmZ, %%xmmZ # swap internals\n\t"
633  "# mulps %%xmmB, %%xmmA\n\t"
634  "# mulps %%xmmZ, %%xmmB\n\t"
635  "# # SSE replacement for: pfpnacc %%xmmB, %%xmmA\n\t"
636  "# xorps %%xmmPN, %%xmmA\n\t"
637  "# movaps %%xmmA, %%xmmZ\n\t"
638  "# unpcklps %%xmmB, %%xmmA\n\t"
639  "# unpckhps %%xmmB, %%xmmZ\n\t"
640  "# movaps %%xmmZ, %%xmmY\n\t"
641  "# shufps $0x44, %%xmmA, %%xmmZ # b01000100\n\t"
642  "# shufps $0xee, %%xmmY, %%xmmA # b11101110\n\t"
643  "# addps %%xmmZ, %%xmmA\n\t"
644  "# addps %%xmmA, %%xmmC\n\t"
645  "# A=xmm0, B=xmm2, Z=xmm4\n\t"
646  "# A'=xmm1, B'=xmm3, Z'=xmm5\n\t"
647  " movaps 16(%[edx]), %%xmm3\n\t"
648  " movaps %%xmm0, %%xmm4\n\t"
649  " xorps %%xmm1, %%xmm3\n\t"
650  " mulps %%xmm2, %%xmm0\n\t"
651  " movaps 16(%[eax]), %%xmm1\n\t"
652  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
653  " movaps %%xmm1, %%xmm5\n\t"
654  " addps %%xmm0, %%xmm6\n\t"
655  " mulps %%xmm3, %%xmm1\n\t"
656  " shufps $0xb1, %%xmm5, %%xmm5 # swap internals\n\t"
657  " addps %%xmm1, %%xmm6\n\t"
658  " movaps 0(%[conjugator]), %%xmm1\n\t"
659  " mulps %%xmm4, %%xmm2\n\t"
660  " movaps 32(%[eax]), %%xmm0\n\t"
661  " addps %%xmm2, %%xmm7\n\t"
662  " mulps %%xmm5, %%xmm3\n\t"
663  " addl $32, %[eax]\n\t"
664  " movaps 32(%[edx]), %%xmm2\n\t"
665  " addps %%xmm3, %%xmm7\n\t"
666  " xorps %%xmm1, %%xmm2\n\t"
667  " addl $32, %[edx]\n\t"
668  ".%=L1_test:\n\t"
669  " decl %[ecx]\n\t"
670  " jge .%=Loop1\n\t"
671  " # We've handled the bulk of multiplies up to here.\n\t"
672  " # Let's sse if original n_2_ccomplex_blocks was odd.\n\t"
673  " # If so, we've got 2 more taps to do.\n\t"
674  " movl 0(%[out]), %[ecx] # n_2_ccomplex_blocks\n\t"
675  " shrl $4, %[ecx]\n\t"
676  " andl $1, %[ecx]\n\t"
677  " je .%=Leven\n\t"
678  " # The count was odd, do 2 more taps.\n\t"
679  " # Note that we've already got mm0/mm2 preloaded\n\t"
680  " # from the main loop.\n\t"
681  " movaps %%xmm0, %%xmm4\n\t"
682  " mulps %%xmm2, %%xmm0\n\t"
683  " shufps $0xb1, %%xmm4, %%xmm4 # swap internals\n\t"
684  " addps %%xmm0, %%xmm6\n\t"
685  " mulps %%xmm4, %%xmm2\n\t"
686  " addps %%xmm2, %%xmm7\n\t"
687  ".%=Leven:\n\t"
688  " # neg inversor\n\t"
689  " #movl 8(%%ebp), %[eax] \n\t"
690  " xorps %%xmm1, %%xmm1\n\t"
691  " movl $0x80000000, (%[out])\n\t"
692  " movss (%[out]), %%xmm1\n\t"
693  " shufps $0x11, %%xmm1, %%xmm1 # b00010001 # 0 -0 0 -0\n\t"
694  " # pfpnacc\n\t"
695  " xorps %%xmm1, %%xmm6\n\t"
696  " movaps %%xmm6, %%xmm2\n\t"
697  " unpcklps %%xmm7, %%xmm6\n\t"
698  " unpckhps %%xmm7, %%xmm2\n\t"
699  " movaps %%xmm2, %%xmm3\n\t"
700  " shufps $0x44, %%xmm6, %%xmm2 # b01000100\n\t"
701  " shufps $0xee, %%xmm3, %%xmm6 # b11101110\n\t"
702  " addps %%xmm2, %%xmm6\n\t"
703  " # xmm6 = r1 i2 r3 i4\n\t"
704  " #movl 8(%%ebp), %[eax] # @result\n\t"
705  " movhlps %%xmm6, %%xmm4 # xmm4 = r3 i4 ?? ??\n\t"
706  " addps %%xmm4, %%xmm6 # xmm6 = r1+r3 i2+i4 ?? ??\n\t"
707  " movlps %%xmm6, (%[out]) # store low 2x32 bits (complex) "
708  "to memory\n\t"
709  " #popl %%ebp\n\t"
710  :
711  : [eax] "r"(input),
712  [edx] "r"(taps),
713  [ecx] "r"(num_bytes),
714  [out] "r"(result),
715  [conjugator] "r"(conjugator));
716 
717  for (; leftovers > 0; leftovers -= 8) {
718  *result += (input[(bound << 1)] * lv_conj(taps[(bound << 1)]));
719  }
720 }
721 #endif /*LV_HAVE_SSE*/
722 
723 
724 #endif /*INCLUDED_volk_32fc_x2_conjugate_dot_prod_32fc_a_H*/
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:352
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:410
static void volk_32fc_x2_conjugate_dot_prod_32fc_a_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:291
static void volk_32fc_x2_conjugate_dot_prod_32fc_generic(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:68
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_avx(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:109
static void volk_32fc_x2_conjugate_dot_prod_32fc_u_sse3(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:171
static void volk_32fc_x2_conjugate_dot_prod_32fc_neon(lv_32fc_t *result, const lv_32fc_t *input, const lv_32fc_t *taps, unsigned int num_points)
Definition: volk_32fc_x2_conjugate_dot_prod_32fc.h:227
#define __VOLK_VOLATILE
Definition: volk_common.h:64
#define __VOLK_PREFETCH(addr)
Definition: volk_common.h:62
#define __VOLK_ASM
Definition: volk_common.h:63
#define __VOLK_ATTR_ALIGNED(x)
Definition: volk_common.h:56
#define lv_cimag(x)
Definition: volk_complex.h:89
#define lv_conj(x)
Definition: volk_complex.h:91
#define lv_cmake(r, i)
Definition: volk_complex.h:68
#define lv_creal(x)
Definition: volk_complex.h:87
float complex lv_32fc_t
Definition: volk_complex.h:65
for i
Definition: volk_config_fixed.tmpl.h:25