Skip to content

Commit

Permalink
string: Cleanup memset
Browse files Browse the repository at this point in the history
Remove unnecessary code for unused ZVA sizes.
For zero memsets it's faster use DC ZVA for >= 160 bytes.
Add a define which allows skipping the ZVA size test when the
ZVA size is known to be 64 - reading dczid_el0 may be expensive.

Change-Id: Ie577c5f263b1446a1c0030c62b3f1f4a2bebfd05
  • Loading branch information
Wilco Dijkstra authored and adithya2306 committed Sep 27, 2020
1 parent d15eb64 commit 7a8c222
Showing 1 changed file with 31 additions and 143 deletions.
174 changes: 31 additions & 143 deletions libc/arch-arm64/generic/bionic/memset.S
Original file line number Diff line number Diff line change
@@ -1,60 +1,13 @@
/* Copyright (c) 2012-2013, Linaro Limited
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Linaro nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */

/*
* Copyright (c) 2015 ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
* memset - fill memory with a constant byte
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* Copyright (c) 2012-2020, Arm Limited.
* SPDX-License-Identifier: MIT
*/

/* Assumptions:
*
* ARMv8-a, AArch64, unaligned accesses
* ARMv8-a, AArch64, Advanced SIMD, unaligned accesses.
*
*/

Expand All @@ -76,12 +29,7 @@
#define count x2
#define dst x3
#define dstend x4
#define tmp1 x5
#define tmp1w w5
#define tmp2 x6
#define tmp2w w6
#define zva_len x7
#define zva_lenw w7
#define zva_val x5

#define L(l) .L ## l

Expand Down Expand Up @@ -144,108 +92,48 @@ L(set96):
stp q0, q0, [dstend, -32]
ret

.p2align 3
nop
.p2align 4
L(set_long):
and valw, valw, 255
bic dst, dstin, 15
str q0, [dstin]
cmp count, 256
ccmp valw, 0, 0, cs
b.eq L(try_zva)
L(no_zva):
sub count, dstend, dst /* Count is 16 too large. */
add dst, dst, 16
sub count, count, 64 + 16 /* Adjust count and bias for loop. */
1: stp q0, q0, [dst], 64
stp q0, q0, [dst, -32]
L(tail64):
subs count, count, 64
b.hi 1b
2: stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret

.p2align 3
L(try_zva):
mrs tmp1, dczid_el0
tbnz tmp1w, 4, L(no_zva)
and tmp1w, tmp1w, 15
cmp tmp1w, 4 /* ZVA size is 64 bytes. */
b.ne L(zva_128)

/* Write the first and last 64 byte aligned block using stp rather
than using DC ZVA. This is faster on some cores.
*/
L(zva_64):
cmp count, 160
ccmp valw, 0, 0, hs
b.ne L(no_zva)

#ifndef SKIP_ZVA_CHECK
mrs zva_val, dczid_el0
and zva_val, zva_val, 31
cmp zva_val, 4 /* ZVA size is 64 bytes. */
b.ne L(no_zva)
#endif
str q0, [dst, 16]
stp q0, q0, [dst, 32]
bic dst, dst, 63
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
sub count, dstend, dst /* Count is now 128 too large. */
sub count, count, 128+64+64 /* Adjust count and bias for loop. */
add dst, dst, 128
nop
1: dc zva, dst
sub count, dstend, dst /* Count is now 64 too large. */
sub count, count, 128 /* Adjust count and bias for loop. */

.p2align 4
L(zva_loop):
add dst, dst, 64
dc zva, dst
subs count, count, 64
b.hi 1b
stp q0, q0, [dst, 0]
stp q0, q0, [dst, 32]
b.hi L(zva_loop)
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret

.p2align 3
L(zva_128):
cmp tmp1w, 5 /* ZVA size is 128 bytes. */
b.ne L(zva_other)

str q0, [dst, 16]
L(no_zva):
sub count, dstend, dst /* Count is 16 too large. */
sub dst, dst, 16 /* Dst is biased by -32. */
sub count, count, 64 + 16 /* Adjust count and bias for loop. */
L(no_zva_loop):
stp q0, q0, [dst, 32]
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
bic dst, dst, 127
sub count, dstend, dst /* Count is now 128 too large. */
sub count, count, 128+128 /* Adjust count and bias for loop. */
add dst, dst, 128
1: dc zva, dst
add dst, dst, 128
subs count, count, 128
b.hi 1b
stp q0, q0, [dstend, -128]
stp q0, q0, [dstend, -96]
stp q0, q0, [dst, 64]!
subs count, count, 64
b.hi L(no_zva_loop)
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret

L(zva_other):
mov tmp2w, 4
lsl zva_lenw, tmp2w, tmp1w
add tmp1, zva_len, 64 /* Max alignment bytes written. */
cmp count, tmp1
blo L(no_zva)

sub tmp2, zva_len, 1
add tmp1, dst, zva_len
add dst, dst, 16
subs count, tmp1, dst /* Actual alignment bytes to write. */
bic tmp1, tmp1, tmp2 /* Aligned dc zva start address. */
beq 2f
1: stp q0, q0, [dst], 64
stp q0, q0, [dst, -32]
subs count, count, 64
b.hi 1b
2: mov dst, tmp1
sub count, dstend, tmp1 /* Remaining bytes to write. */
subs count, count, zva_len
b.lo 4f
3: dc zva, dst
add dst, dst, zva_len
subs count, count, zva_len
b.hs 3b
4: add count, count, zva_len
b L(tail64)

END(memset)

0 comments on commit 7a8c222

Please sign in to comment.