arm: fix multiarch memcpy for negative len [BZ #25620]

Message ID 20200609120003.GA8412@anisyan
State Committed
Commit beea361050728138b82c57dda0c4810402d342b9
Headers
Series arm: fix multiarch memcpy for negative len [BZ #25620] |

Commit Message

Alexander Anisimov June 9, 2020, noon UTC
  Hi,

Someday ago Evgeniy sent patch [1] to fix behavior of memcpy and memmove,
when negative len is passed to. The one is only for common arm implementation
(sysdeps/arm).

Now, I've prepared same fix for multiarch memcpy implementation.
(sysdeps/arm/armv7/multiarch/memcpy_impl.S).

All test-cases have been finished with success, including
string/tst-memmove-overflow.c by Florian Weimer.

This patch and the [1] can fully eliminate vulnerability CVE-2020-6096.
But as I know Yan Zhu has already started to fix it [BZ #25620] for multiarch [2].
I appreciate Yan Zhu to finalize his version.
However this issue is important for our project, that's why I offer own fix.

Signed-off-by: Evgeny Eremin <e.eremin@omprussia.ru>
Signed-off-by: Konstantin Karasev <k.karasev@omprussia.ru>
Signed-off-by: Anton Rybakov <a.rybakov@omprussia.ru>
Signed-off-by: Ildar Kamaletdinov <i.kamaletdinov@omprussia.ru>

[1] https://sourceware.org/pipermail/libc-alpha/2020-June/114702.html
[2] https://sourceware.org/pipermail/libc-alpha/2020-April/112671.html

---
--

Alexander Anisimov
Software Engineer
Open Mobile Platform
https://omprussia.ru
  

Comments

Alexander Anisimov June 18, 2020, 8:20 p.m. UTC | #1
Hi,

We are eager to finalize the Open Mobile Platform contribution to close CVE-2020-6096 [BZ #25620] in the glibc,
but legal process on FSF side seems to be on a hold for more than a week with no feedback. 
Could we somehow assist or help to speedup the process?

Best regards,
Alexander Anisimov
Software Engineer,
Open Mobile Platform.
  

Patch

diff --git a/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
index 2de17263..802c310f 100644
--- a/sysdeps/arm/armv7/multiarch/memcpy_impl.S
+++ b/sysdeps/arm/armv7/multiarch/memcpy_impl.S
@@ -268,7 +268,7 @@  ENTRY(memcpy)
 
 	mov	dst, dstin	/* Preserve dstin, we need to return it.  */
 	cmp	count, #64
-	bge	.Lcpy_not_short
+	bhs	.Lcpy_not_short
 	/* Deal with small copies quickly by dropping straight into the
 	   exit block.  */
 
@@ -351,10 +351,10 @@  ENTRY(memcpy)
 
 1:
 	subs	tmp2, count, #64	/* Use tmp2 for count.  */
-	blt	.Ltail63aligned
+	blo	.Ltail63aligned
 
 	cmp	tmp2, #512
-	bge	.Lcpy_body_long
+	bhs	.Lcpy_body_long
 
 .Lcpy_body_medium:			/* Count in tmp2.  */
 #ifdef USE_VFP
@@ -378,7 +378,7 @@  ENTRY(memcpy)
 	add	src, src, #64
 	vstr	d1, [dst, #56]
 	add	dst, dst, #64
-	bge	1b
+	bhs	1b
 	tst	tmp2, #0x3f
 	beq	.Ldone
 
@@ -412,7 +412,7 @@  ENTRY(memcpy)
 	ldrd	A_l, A_h, [src, #64]!
 	strd	A_l, A_h, [dst, #64]!
 	subs	tmp2, tmp2, #64
-	bge	1b
+	bhs	1b
 	tst	tmp2, #0x3f
 	bne	1f
 	ldr	tmp2,[sp], #FRAME_SIZE
@@ -482,7 +482,7 @@  ENTRY(memcpy)
 	add	src, src, #32
 
 	subs	tmp2, tmp2, #prefetch_lines * 64 * 2
-	blt	2f
+	blo	2f
 1:
 	cpy_line_vfp	d3, 0
 	cpy_line_vfp	d4, 64
@@ -494,7 +494,7 @@  ENTRY(memcpy)
 	add	dst, dst, #2 * 64
 	add	src, src, #2 * 64
 	subs	tmp2, tmp2, #prefetch_lines * 64
-	bge	1b
+	bhs	1b
 
 2:
 	cpy_tail_vfp	d3, 0
@@ -615,8 +615,8 @@  ENTRY(memcpy)
 1:
 	pld	[src, #(3 * 64)]
 	subs	count, count, #64
-	ldrmi	tmp2, [sp], #FRAME_SIZE
-	bmi	.Ltail63unaligned
+	ldrlo	tmp2, [sp], #FRAME_SIZE
+	blo	.Ltail63unaligned
 	pld	[src, #(4 * 64)]
 
 #ifdef USE_NEON
@@ -633,7 +633,7 @@  ENTRY(memcpy)
 	neon_load_multi d0-d3, src
 	neon_load_multi d4-d7, src
 	subs	count, count, #64
-	bmi	2f
+	blo	2f
 1:
 	pld	[src, #(4 * 64)]
 	neon_store_multi d0-d3, dst
@@ -641,7 +641,7 @@  ENTRY(memcpy)
 	neon_store_multi d4-d7, dst
 	neon_load_multi d4-d7, src
 	subs	count, count, #64
-	bpl	1b
+	bhs	1b
 2:
 	neon_store_multi d0-d3, dst
 	neon_store_multi d4-d7, dst