summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--libcutils/Android.mk4
-rw-r--r--libcutils/arch-arm/memset32.S7
-rw-r--r--libcutils/arch-arm64/android_memset.S211
-rw-r--r--libcutils/tests/Android.mk32
-rw-r--r--libcutils/tests/MemsetTest.cpp181
5 files changed, 426 insertions, 9 deletions
diff --git a/libcutils/Android.mk b/libcutils/Android.mk
index c0faed4f5..933a77bdf 100644
--- a/libcutils/Android.mk
+++ b/libcutils/Android.mk
@@ -134,6 +134,9 @@ LOCAL_SRC_FILES := $(commonSources) \
134LOCAL_SRC_FILES_arm += \ 134LOCAL_SRC_FILES_arm += \
135 arch-arm/memset32.S \ 135 arch-arm/memset32.S \
136 136
137LOCAL_SRC_FILES_arm64 += \
138 arch-arm64/android_memset.S \
139
137LOCAL_SRC_FILES_mips += \ 140LOCAL_SRC_FILES_mips += \
138 arch-mips/android_memset.c \ 141 arch-mips/android_memset.c \
139 142
@@ -146,6 +149,7 @@ LOCAL_SRC_FILES_x86_64 += \
146 arch-x86_64/android_memset32_SSE2-atom.S \ 149 arch-x86_64/android_memset32_SSE2-atom.S \
147 150
148LOCAL_CFLAGS_arm += -DHAVE_MEMSET16 -DHAVE_MEMSET32 151LOCAL_CFLAGS_arm += -DHAVE_MEMSET16 -DHAVE_MEMSET32
152LOCAL_CFLAGS_arm64 += -DHAVE_MEMSET16 -DHAVE_MEMSET32
149LOCAL_CFLAGS_mips += -DHAVE_MEMSET16 -DHAVE_MEMSET32 153LOCAL_CFLAGS_mips += -DHAVE_MEMSET16 -DHAVE_MEMSET32
150LOCAL_CFLAGS_x86 += -DHAVE_MEMSET16 -DHAVE_MEMSET32 154LOCAL_CFLAGS_x86 += -DHAVE_MEMSET16 -DHAVE_MEMSET32
151LOCAL_CFLAGS_x86_64 += -DHAVE_MEMSET16 -DHAVE_MEMSET32 155LOCAL_CFLAGS_x86_64 += -DHAVE_MEMSET16 -DHAVE_MEMSET32
diff --git a/libcutils/arch-arm/memset32.S b/libcutils/arch-arm/memset32.S
index 469726563..6efab9f93 100644
--- a/libcutils/arch-arm/memset32.S
+++ b/libcutils/arch-arm/memset32.S
@@ -51,8 +51,10 @@ android_memset16:
51 51
52android_memset32: 52android_memset32:
53 .fnstart 53 .fnstart
54 .save {lr} 54 .cfi_startproc
55 str lr, [sp, #-4]! 55 str lr, [sp, #-4]!
56 .cfi_def_cfa_offset 4
57 .cfi_rel_offset lr, 0
56 58
57 /* align the destination to a cache-line */ 59 /* align the destination to a cache-line */
58 mov r12, r1 60 mov r12, r1
@@ -89,5 +91,8 @@ android_memset32:
89 strmih lr, [r0], #2 91 strmih lr, [r0], #2
90 92
91 ldr lr, [sp], #4 93 ldr lr, [sp], #4
94 .cfi_def_cfa_offset 0
95 .cfi_restore lr
92 bx lr 96 bx lr
97 .cfi_endproc
93 .fnend 98 .fnend
diff --git a/libcutils/arch-arm64/android_memset.S b/libcutils/arch-arm64/android_memset.S
new file mode 100644
index 000000000..9a83a6876
--- /dev/null
+++ b/libcutils/arch-arm64/android_memset.S
@@ -0,0 +1,211 @@
1/* Copyright (c) 2012, Linaro Limited
2 All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are met:
6 * Redistributions of source code must retain the above copyright
7 notice, this list of conditions and the following disclaimer.
8 * Redistributions in binary form must reproduce the above copyright
9 notice, this list of conditions and the following disclaimer in the
10 documentation and/or other materials provided with the distribution.
11 * Neither the name of the Linaro nor the
12 names of its contributors may be used to endorse or promote products
13 derived from this software without specific prior written permission.
14
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*/
27
28/* Assumptions:
29 *
30 * ARMv8-a, AArch64
31 * Unaligned accesses
32 *
33 */
34
35/* By default we assume that the DC instruction can be used to zero
36 data blocks more efficiently. In some circumstances this might be
37 unsafe, for example in an asymmetric multiprocessor environment with
38 different DC clear lengths (neither the upper nor lower lengths are
39 safe to use). */
40
41#define dst x0
42#define count x2
43#define tmp1 x3
44#define tmp1w w3
45#define tmp2 x4
46#define tmp2w w4
47#define zva_len_x x5
48#define zva_len w5
49#define zva_bits_x x6
50
51#define A_l x1
52#define A_lw w1
53#define tmp3w w9
54
55#define ENTRY(f) \
56 .text; \
57 .globl f; \
58 .align 0; \
59 .type f, %function; \
60 f: \
61 .cfi_startproc \
62
63#define END(f) \
64 .cfi_endproc; \
65 .size f, .-f; \
66
67ENTRY(android_memset16)
68 ands A_lw, A_lw, #0xffff
69 b.eq .Lzero_mem
70 orr A_lw, A_lw, A_lw, lsl #16
71 b .Lexpand_to_64
72END(android_memset16)
73
74ENTRY(android_memset32)
75 cmp A_lw, #0
76 b.eq .Lzero_mem
77.Lexpand_to_64:
78 orr A_l, A_l, A_l, lsl #32
79.Ltail_maybe_long:
80 cmp count, #64
81 b.ge .Lnot_short
82.Ltail_maybe_tiny:
83 cmp count, #15
84 b.le .Ltail15tiny
85.Ltail63:
86 ands tmp1, count, #0x30
87 b.eq .Ltail15
88 add dst, dst, tmp1
89 cmp tmp1w, #0x20
90 b.eq 1f
91 b.lt 2f
92 stp A_l, A_l, [dst, #-48]
931:
94 stp A_l, A_l, [dst, #-32]
952:
96 stp A_l, A_l, [dst, #-16]
97
98.Ltail15:
99 and count, count, #15
100 add dst, dst, count
101 stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
102 ret
103
104.Ltail15tiny:
105 /* Set up to 15 bytes. Does not assume earlier memory
106 being set. */
107 tbz count, #3, 1f
108 str A_l, [dst], #8
1091:
110 tbz count, #2, 1f
111 str A_lw, [dst], #4
1121:
113 tbz count, #1, 1f
114 strh A_lw, [dst], #2
1151:
116 ret
117
118 /* Critical loop. Start at a new cache line boundary. Assuming
119 * 64 bytes per line, this ensures the entire loop is in one line. */
120 .p2align 6
121.Lnot_short:
122 neg tmp2, dst
123 ands tmp2, tmp2, #15
124 b.eq 2f
125 /* Bring DST to 128-bit (16-byte) alignment. We know that there's
126 * more than that to set, so we simply store 16 bytes and advance by
127 * the amount required to reach alignment. */
128 sub count, count, tmp2
129 stp A_l, A_l, [dst]
130 add dst, dst, tmp2
131 /* There may be less than 63 bytes to go now. */
132 cmp count, #63
133 b.le .Ltail63
1342:
135 sub dst, dst, #16 /* Pre-bias. */
136 sub count, count, #64
1371:
138 stp A_l, A_l, [dst, #16]
139 stp A_l, A_l, [dst, #32]
140 stp A_l, A_l, [dst, #48]
141 stp A_l, A_l, [dst, #64]!
142 subs count, count, #64
143 b.ge 1b
144 tst count, #0x3f
145 add dst, dst, #16
146 b.ne .Ltail63
147 ret
148
149 /* For zeroing memory, check to see if we can use the ZVA feature to
150 * zero entire 'cache' lines. */
151.Lzero_mem:
152 mov A_l, #0
153 cmp count, #63
154 b.le .Ltail_maybe_tiny
155 neg tmp2, dst
156 ands tmp2, tmp2, #15
157 b.eq 1f
158 sub count, count, tmp2
159 stp A_l, A_l, [dst]
160 add dst, dst, tmp2
161 cmp count, #63
162 b.le .Ltail63
1631:
164 /* For zeroing small amounts of memory, it's not worth setting up
165 * the line-clear code. */
166 cmp count, #128
167 b.lt .Lnot_short
168 mrs tmp1, dczid_el0
169 tbnz tmp1, #4, .Lnot_short
170 mov tmp3w, #4
171 and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
172 lsl zva_len, tmp3w, zva_len
173
174.Lzero_by_line:
175 /* Compute how far we need to go to become suitably aligned. We're
176 * already at quad-word alignment. */
177 cmp count, zva_len_x
178 b.lt .Lnot_short /* Not enough to reach alignment. */
179 sub zva_bits_x, zva_len_x, #1
180 neg tmp2, dst
181 ands tmp2, tmp2, zva_bits_x
182 b.eq 1f /* Already aligned. */
183 /* Not aligned, check that there's enough to copy after alignment. */
184 sub tmp1, count, tmp2
185 cmp tmp1, #64
186 ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
187 b.lt .Lnot_short
188 /* We know that there's at least 64 bytes to zero and that it's safe
189 * to overrun by 64 bytes. */
190 mov count, tmp1
1912:
192 stp A_l, A_l, [dst]
193 stp A_l, A_l, [dst, #16]
194 stp A_l, A_l, [dst, #32]
195 subs tmp2, tmp2, #64
196 stp A_l, A_l, [dst, #48]
197 add dst, dst, #64
198 b.ge 2b
199 /* We've overrun a bit, so adjust dst downwards. */
200 add dst, dst, tmp2
2011:
202 sub count, count, zva_len_x
2033:
204 dc zva, dst
205 add dst, dst, zva_len_x
206 subs count, count, zva_len_x
207 b.ge 3b
208 ands count, count, zva_bits_x
209 b.ne .Ltail_maybe_long
210 ret
211END(android_memset32)
diff --git a/libcutils/tests/Android.mk b/libcutils/tests/Android.mk
index d3e07f80e..8e6531074 100644
--- a/libcutils/tests/Android.mk
+++ b/libcutils/tests/Android.mk
@@ -13,20 +13,36 @@
13# limitations under the License. 13# limitations under the License.
14 14
15LOCAL_PATH := $(call my-dir) 15LOCAL_PATH := $(call my-dir)
16include $(CLEAR_VARS)
17 16
18test_src_files := \ 17test_src_files := \
18 MemsetTest.cpp \
19 PropertiesTest.cpp \ 19 PropertiesTest.cpp \
20 20
21shared_libraries := \ 21include $(CLEAR_VARS)
22LOCAL_MODULE := libcutils_test
23LOCAL_SRC_FILES := $(test_src_files)
24LOCAL_SHARED_LIBRARIES := \
25 libcutils \
26 liblog \
22 libutils \ 27 libutils \
23 liblog
24 28
25static_libraries := \ 29LOCAL_MULTILIB := both
26 libcutils 30LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)32
31LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)64
32include $(BUILD_NATIVE_TEST)
27 33
28LOCAL_SHARED_LIBRARIES := $(shared_libraries) 34include $(CLEAR_VARS)
29LOCAL_STATIC_LIBRARIES := $(static_libraries) 35LOCAL_MODULE := libcutils_test_static
36LOCAL_FORCE_STATIC_EXECUTABLE := true
30LOCAL_SRC_FILES := $(test_src_files) 37LOCAL_SRC_FILES := $(test_src_files)
31LOCAL_MODULE := libcutils_test 38LOCAL_STATIC_LIBRARIES := \
39 libc \
40 libcutils \
41 liblog \
42 libstlport_static \
43 libutils \
44
45LOCAL_MULTILIB := both
46LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)32
47LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)64
32include $(BUILD_NATIVE_TEST) 48include $(BUILD_NATIVE_TEST)
diff --git a/libcutils/tests/MemsetTest.cpp b/libcutils/tests/MemsetTest.cpp
new file mode 100644
index 000000000..45efc519c
--- /dev/null
+++ b/libcutils/tests/MemsetTest.cpp
@@ -0,0 +1,181 @@
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <stdint.h>
18#include <stdlib.h>
19#include <string.h>
20#include <sys/mman.h>
21#include <sys/types.h>
22
23#include <cutils/memory.h>
24#include <gtest/gtest.h>
25
26#define FENCEPOST_LENGTH 8
27
28#define MAX_TEST_SIZE (64*1024)
29// Choose values that have no repeating byte values.
30#define MEMSET16_PATTERN 0xb139
31#define MEMSET32_PATTERN 0x48193a27
32
33enum test_e {
34 MEMSET16 = 0,
35 MEMSET32,
36};
37
38static int g_memset16_aligns[][2] = {
39 { 2, 0 },
40 { 4, 0 },
41 { 8, 0 },
42 { 16, 0 },
43 { 32, 0 },
44 { 64, 0 },
45 { 128, 0 },
46
47 { 4, 2 },
48
49 { 8, 2 },
50 { 8, 4 },
51 { 8, 6 },
52
53 { 128, 2 },
54 { 128, 4 },
55 { 128, 6 },
56 { 128, 8 },
57 { 128, 10 },
58 { 128, 12 },
59 { 128, 14 },
60 { 128, 16 },
61};
62
63static int g_memset32_aligns[][2] = {
64 { 4, 0 },
65 { 8, 0 },
66 { 16, 0 },
67 { 32, 0 },
68 { 64, 0 },
69 { 128, 0 },
70
71 { 8, 4 },
72
73 { 128, 4 },
74 { 128, 8 },
75 { 128, 12 },
76 { 128, 16 },
77};
78
79static size_t GetIncrement(size_t len, size_t min_incr) {
80 if (len >= 4096) {
81 return 1024;
82 } else if (len >= 1024) {
83 return 256;
84 }
85 return min_incr;
86}
87
88// Return a pointer into the current buffer with the specified alignment.
89static void *GetAlignedPtr(void *orig_ptr, int alignment, int or_mask) {
90 uint64_t ptr = reinterpret_cast<uint64_t>(orig_ptr);
91 if (alignment > 0) {
92 // When setting the alignment, set it to exactly the alignment chosen.
93 // The pointer returned will be guaranteed not to be aligned to anything
94 // more than that.
95 ptr += alignment - (ptr & (alignment - 1));
96 ptr |= alignment | or_mask;
97 }
98
99 return reinterpret_cast<void*>(ptr);
100}
101
102static void SetFencepost(uint8_t *buffer) {
103 for (int i = 0; i < FENCEPOST_LENGTH; i += 2) {
104 buffer[i] = 0xde;
105 buffer[i+1] = 0xad;
106 }
107}
108
109static void VerifyFencepost(uint8_t *buffer) {
110 for (int i = 0; i < FENCEPOST_LENGTH; i += 2) {
111 if (buffer[i] != 0xde || buffer[i+1] != 0xad) {
112 uint8_t expected_value;
113 if (buffer[i] == 0xde) {
114 i++;
115 expected_value = 0xad;
116 } else {
117 expected_value = 0xde;
118 }
119 ASSERT_EQ(expected_value, buffer[i]);
120 }
121 }
122}
123
124void RunMemsetTests(test_e test_type, uint32_t value, int align[][2], size_t num_aligns) {
125 size_t min_incr = 4;
126 if (test_type == MEMSET16) {
127 min_incr = 2;
128 value |= value << 16;
129 }
130 uint32_t* expected_buf = new uint32_t[MAX_TEST_SIZE/sizeof(uint32_t)];
131 for (size_t i = 0; i < MAX_TEST_SIZE/sizeof(uint32_t); i++) {
132 expected_buf[i] = value;
133 }
134
135 // Allocate one large buffer with lots of extra space so that we can
136 // guarantee that all possible alignments will fit.
137 uint8_t *buf = new uint8_t[3*MAX_TEST_SIZE];
138 uint8_t *buf_align;
139 for (size_t i = 0; i < num_aligns; i++) {
140 size_t incr = min_incr;
141 for (size_t len = incr; len <= MAX_TEST_SIZE; len += incr) {
142 incr = GetIncrement(len, min_incr);
143
144 buf_align = reinterpret_cast<uint8_t*>(GetAlignedPtr(
145 buf+FENCEPOST_LENGTH, align[i][0], align[i][1]));
146
147 SetFencepost(&buf_align[-FENCEPOST_LENGTH]);
148 SetFencepost(&buf_align[len]);
149
150 memset(buf_align, 0xff, len);
151 if (test_type == MEMSET16) {
152 android_memset16(reinterpret_cast<uint16_t*>(buf_align), value, len);
153 } else {
154 android_memset32(reinterpret_cast<uint32_t*>(buf_align), value, len);
155 }
156 ASSERT_EQ(0, memcmp(expected_buf, buf_align, len))
157 << "Failed size " << len << " align " << align[i][0] << " " << align[i][1] << "\n";
158
159 VerifyFencepost(&buf_align[-FENCEPOST_LENGTH]);
160 VerifyFencepost(&buf_align[len]);
161 }
162 }
163 delete expected_buf;
164 delete buf;
165}
166
167TEST(libcutils, android_memset16_non_zero) {
168 RunMemsetTests(MEMSET16, MEMSET16_PATTERN, g_memset16_aligns, sizeof(g_memset16_aligns)/sizeof(int[2]));
169}
170
171TEST(libcutils, android_memset16_zero) {
172 RunMemsetTests(MEMSET16, 0, g_memset16_aligns, sizeof(g_memset16_aligns)/sizeof(int[2]));
173}
174
175TEST(libcutils, android_memset32_non_zero) {
176 RunMemsetTests(MEMSET32, MEMSET32_PATTERN, g_memset32_aligns, sizeof(g_memset32_aligns)/sizeof(int[2]));
177}
178
179TEST(libcutils, android_memset32_zero) {
180 RunMemsetTests(MEMSET32, 0, g_memset32_aligns, sizeof(g_memset32_aligns)/sizeof(int[2]));
181}