1 ;******************************************************************************
2 ;* Core video DSP functions
3 ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
5 ;* This file is part of FFmpeg.
7 ;* FFmpeg is free software; you can redistribute it and/or
8 ;* modify it under the terms of the GNU Lesser General Public
9 ;* License as published by the Free Software Foundation; either
10 ;* version 2.1 of the License, or (at your option) any later version.
12 ;* FFmpeg is distributed in the hope that it will be useful,
13 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;* Lesser General Public License for more details.
17 ;* You should have received a copy of the GNU Lesser General Public
18 ;* License along with FFmpeg; if not, write to the Free Software
19 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 ;******************************************************************************
22 %include "libavutil/x86/x86util.asm"
26 ; slow vertical extension loop function. Works with variable-width, and
27 ; does per-line reading/writing of source data
29 %macro V_COPY_ROW 2 ; type (top/body/bottom), h
31 mov wq, r7mp ; initialize w (r7mp = wmp)
33 movu m0, [srcq+wq] ; m0 = read($mmsize)
34 movu [dstq+wq], m0 ; write(m0, $mmsize)
35 add wq, mmsize ; w -= $mmsize
36 cmp wq, -mmsize ; } while (w > $mmsize);
38 movu m0, [srcq-mmsize] ; m0 = read($mmsize)
39 movu [dstq-mmsize], m0 ; write(m0, $mmsize)
40 %ifidn %1, body ; if ($type == body) {
41 add srcq, src_strideq ; src += src_stride
43 add dstq, dst_strideq ; dst += dst_stride
44 dec %2 ; } while (--$h);
50 ; | | <- top is copied from first line in body of source
52 ; | | <- body is copied verbatim (line-by-line) from source
54 ; | | <- bottom is copied from last line in body of source
57 cglobal emu_edge_vvar, 7, 8, 1, dst, dst_stride, src, src_stride, \
60 cglobal emu_edge_vvar, 1, 6, 1, dst, src, start_y, end_y, bh, w
61 %define src_strideq r3mp
62 %define dst_strideq r1mp
68 sub bhq, end_yq ; bh -= end_q
69 sub end_yq, start_yq ; end_q -= start_q
70 add srcq, r7mp ; (r7mp = wmp)
71 add dstq, r7mp ; (r7mp = wmp)
72 neg r7mp ; (r7mp = wmp)
73 test start_yq, start_yq ; if (start_q) {
75 V_COPY_ROW top, start_yq ; v_copy_row(top, start_yq)
77 V_COPY_ROW body, end_yq ; v_copy_row(body, end_yq)
78 test bhq, bhq ; if (bh) {
80 sub srcq, src_strideq ; src -= src_stride
81 V_COPY_ROW bottom, bhq ; v_copy_row(bottom, bh)
95 cglobal emu_edge_hvar, 5, 6, 1, dst, dst_stride, start_x, n_words, h, w
96 lea dstq, [dstq+n_wordsq*2]
98 lea start_xq, [start_xq+n_wordsq*2]
101 vpbroadcastb m0, [dstq+start_xq]
102 mov wq, n_wordsq ; initialize w
104 movzx wd, byte [dstq+start_xq] ; w = read(1)
105 imul wd, 0x01010101 ; w *= 0x01010101
107 mov wq, n_wordsq ; initialize w
109 pshufd m0, m0, q0000 ; splat
111 punpckldq m0, m0 ; splat
115 movu [dstq+wq*2], m0 ; write($reg, $mmsize)
116 add wq, mmsize/2 ; w -= $mmsize/2
117 cmp wq, -(mmsize/2) ; } while (w > $mmsize/2)
119 movu [dstq-mmsize], m0 ; write($reg, $mmsize)
120 add dstq, dst_strideq ; dst += dst_stride
121 dec hq ; } while (h--)
134 %if HAVE_AVX2_EXTERNAL
139 ; macro to read/write a horizontal number of pixels (%2) to/from registers
140 ; on sse, - fills xmm0-15 for consecutive sets of 16 pixels
141 ; - if (%2 & 8) fills 8 bytes into xmm$next
142 ; - if (%2 & 4) fills 4 bytes into xmm$next
143 ; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
144 ; on mmx, - fills mm0-7 for consecutive sets of 8 pixels
145 ; - if (%2 & 4) fills 4 bytes into mm$next
146 ; - if (%2 & 3) fills 1, 2 or 4 bytes in eax
147 ; writing data out is in the same way
148 %macro READ_NUM_BYTES 2
149 %assign %%off 0 ; offset in source buffer
150 %assign %%mmx_idx 0 ; mmx register index
151 %assign %%xmm_idx 0 ; xmm register index
155 movu xmm %+ %%xmm_idx, [srcq+%%off]
156 %assign %%xmm_idx %%xmm_idx+1
158 movu mm %+ %%mmx_idx, [srcq+%%off]
159 %assign %%mmx_idx %%mmx_idx+1
161 %assign %%off %%off+mmsize
166 %if %2 > 16 && (%2-%%off) > 8
167 movu xmm %+ %%xmm_idx, [srcq+%2-16]
168 %assign %%xmm_idx %%xmm_idx+1
171 movq mm %+ %%mmx_idx, [srcq+%%off]
172 %assign %%mmx_idx %%mmx_idx+1
173 %assign %%off %%off+8
175 %endif ; (%2-%%off) >= 8
179 %if %2 > 8 && (%2-%%off) > 4
180 movq mm %+ %%mmx_idx, [srcq+%2-8]
183 movd mm %+ %%mmx_idx, [srcq+%%off]
184 %assign %%off %%off+4
186 %assign %%mmx_idx %%mmx_idx+1
187 %endif ; (%2-%%off) >= 4
191 movd mm %+ %%mmx_idx, [srcq+%2-4]
192 %elif (%2-%%off) == 1
193 mov valb, [srcq+%2-1]
194 %elif (%2-%%off) == 2
195 mov valw, [srcq+%2-2]
197 mov valb, [srcq+%2-1]
199 mov valw, [srcq+%2-3]
201 %endif ; (%2-%%off) >= 1
202 %endmacro ; READ_NUM_BYTES
204 %macro WRITE_NUM_BYTES 2
205 %assign %%off 0 ; offset in destination buffer
206 %assign %%mmx_idx 0 ; mmx register index
207 %assign %%xmm_idx 0 ; xmm register index
211 movu [dstq+%%off], xmm %+ %%xmm_idx
212 %assign %%xmm_idx %%xmm_idx+1
214 movu [dstq+%%off], mm %+ %%mmx_idx
215 %assign %%mmx_idx %%mmx_idx+1
217 %assign %%off %%off+mmsize
222 %if %2 > 16 && (%2-%%off) > 8
223 movu [dstq+%2-16], xmm %+ %%xmm_idx
224 %assign %%xmm_idx %%xmm_idx+1
227 movq [dstq+%%off], mm %+ %%mmx_idx
228 %assign %%mmx_idx %%mmx_idx+1
229 %assign %%off %%off+8
231 %endif ; (%2-%%off) >= 8
235 %if %2 > 8 && (%2-%%off) > 4
236 movq [dstq+%2-8], mm %+ %%mmx_idx
239 movd [dstq+%%off], mm %+ %%mmx_idx
240 %assign %%off %%off+4
242 %assign %%mmx_idx %%mmx_idx+1
243 %endif ; (%2-%%off) >= 4
247 movd [dstq+%2-4], mm %+ %%mmx_idx
248 %elif (%2-%%off) == 1
249 mov [dstq+%2-1], valb
250 %elif (%2-%%off) == 2
251 mov [dstq+%2-2], valw
253 mov [dstq+%2-3], valw
255 mov [dstq+%2-1], valb
260 %endif ; (%2-%%off) >= 1
261 %endmacro ; WRITE_NUM_BYTES
263 ; vertical top/bottom extend and body copy fast loops
264 ; these are function pointers to set-width line copy functions, i.e.
265 ; they read a fixed number of pixels into set registers, and write
266 ; those out into the destination buffer
267 %macro VERTICAL_EXTEND 2
272 cglobal emu_edge_vfix %+ %%n, 6, 8, 0, dst, dst_stride, src, src_stride, \
273 start_y, end_y, val, bh
274 mov bhq, r6mp ; r6mp = bhmp
276 cglobal emu_edge_vfix %+ %%n, 0, 6, 0, val, dst, src, start_y, end_y, bh
282 %define dst_strideq r1mp
283 %define src_strideq r3mp
287 cglobal emu_edge_vfix %+ %%n, 7, 7, 1, dst, dst_stride, src, src_stride, \
290 cglobal emu_edge_vfix %+ %%n, 1, 5, 1, dst, src, start_y, end_y, bh
295 %define dst_strideq r1mp
296 %define src_strideq r3mp
299 ; FIXME move this to c wrapper?
300 sub bhq, end_yq ; bh -= end_y
301 sub end_yq, start_yq ; end_y -= start_y
303 ; extend pixels above body
304 test start_yq, start_yq ; if (start_y) {
306 READ_NUM_BYTES top, %%n ; $variable_regs = read($n)
308 WRITE_NUM_BYTES top, %%n ; write($variable_regs, $n)
309 add dstq, dst_strideq ; dst += linesize
310 dec start_yq ; } while (--start_y)
315 READ_NUM_BYTES body, %%n ; $variable_regs = read($n)
316 WRITE_NUM_BYTES body, %%n ; write($variable_regs, $n)
317 add dstq, dst_strideq ; dst += dst_stride
318 add srcq, src_strideq ; src += src_stride
319 dec end_yq ; } while (--end_y)
323 test bhq, bhq ; if (block_h) {
325 sub srcq, src_strideq ; src -= linesize
326 READ_NUM_BYTES bottom, %%n ; $variable_regs = read($n)
328 WRITE_NUM_BYTES bottom, %%n ; write($variable_regs, $n)
329 add dstq, dst_strideq ; dst += linesize
330 dec bhq ; } while (--bh)
337 %endmacro ; VERTICAL_EXTEND
340 VERTICAL_EXTEND 1, 15
342 VERTICAL_EXTEND 16, 22
346 VERTICAL_EXTEND 16, 22
348 ; left/right (horizontal) fast extend functions
349 ; these are essentially identical to the vertical extend ones above,
350 ; just left/right separated because number of pixels to extend is
351 ; obviously not the same on both sides.
353 %macro READ_V_PIXEL 2
358 imul vald, 0x01010101
365 %endif ; mmsize == 16
368 %endmacro ; READ_V_PIXEL
370 %macro WRITE_V_PIXEL 2
377 %assign %%off %%off+mmsize
382 %if %1 > 16 && %1-%%off > 8
387 %assign %%off %%off+8
389 %endif ; %1-%%off >= 8
390 %endif ; mmsize == 16
393 %if %1 > 8 && %1-%%off > 4
398 %assign %%off %%off+4
400 %endif ; %1-%%off >= 4
406 %assign %%off %%off+4
413 movd [%2+%%off-2], m0
417 %endif ; (%1-%%off)/2
418 %endmacro ; WRITE_V_PIXEL
424 cglobal emu_edge_hfix %+ %%n, 4, 4, 1, dst, dst_stride, start_x, bh
426 cglobal emu_edge_hfix %+ %%n, 4, 5, 1, dst, dst_stride, start_x, bh, val
429 READ_V_PIXEL %%n, [dstq+start_xq] ; $variable_regs = read($n)
430 WRITE_V_PIXEL %%n, dstq ; write($variable_regs, $n)
431 add dstq, dst_strideq ; dst += dst_stride
432 dec bhq ; } while (--bh)
436 %endrep ; 1+(%2-%1)/2
448 %if HAVE_AVX2_EXTERNAL
454 cglobal prefetch, 3, 3, 0, buf, stride, h
464 PREFETCH_FN prefetcht0