aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2022-12-08 09:42:43 +0100
committerJeff Vander Stoep <jeffv@google.com>2022-12-08 09:48:48 +0100
commit61e66a44bc92c925ada24e2765572e5de1b827fe (patch)
treef076aa0469c4503eed1329b45d8a42af241ffb3b /tests
parenta8cac4dbb79bd0c8317dc7e8442cd02578df10ca (diff)
downloadbytes-61e66a44bc92c925ada24e2765572e5de1b827fe.tar.gz
Upgrade bytes to 1.3.0
This project was upgraded with external_updater. Usage: tools/external_updater/updater.sh update rust/crates/bytes For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md Test: TreeHugger Change-Id: Ifc6693cab6343ed5bcd70b718bb4b99c447783ea
Diffstat (limited to 'tests')
-rw-r--r--tests/test_bytes.rs175
-rw-r--r--tests/test_bytes_odd_alloc.rs32
-rw-r--r--tests/test_bytes_vec_alloc.rs142
-rw-r--r--tests/test_chain.rs22
4 files changed, 323 insertions, 48 deletions
diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs
index 402017b..3481f8d 100644
--- a/tests/test_bytes.rs
+++ b/tests/test_bytes.rs
@@ -4,8 +4,8 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::usize;
-const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb";
-const SHORT: &'static [u8] = b"hello world";
+const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb";
+const SHORT: &[u8] = b"hello world";
fn is_sync<T: Sync>() {}
fn is_send<T: Send>() {}
@@ -412,8 +412,8 @@ fn freeze_after_split_off() {
fn fns_defined_for_bytes_mut() {
let mut bytes = BytesMut::from(&b"hello world"[..]);
- bytes.as_ptr();
- bytes.as_mut_ptr();
+ let _ = bytes.as_ptr();
+ let _ = bytes.as_mut_ptr();
// Iterator
let v: Vec<u8> = bytes.as_ref().iter().cloned().collect();
@@ -444,7 +444,7 @@ fn reserve_growth() {
let _ = bytes.split();
bytes.reserve(65);
- assert_eq!(bytes.capacity(), 128);
+ assert_eq!(bytes.capacity(), 117);
}
#[test]
@@ -517,6 +517,34 @@ fn reserve_in_arc_unique_doubles() {
}
#[test]
+fn reserve_in_arc_unique_does_not_overallocate_after_split() {
+ let mut bytes = BytesMut::from(LONG);
+ let orig_capacity = bytes.capacity();
+ drop(bytes.split_off(LONG.len() / 2));
+
+ // now bytes is Arc and refcount == 1
+
+ let new_capacity = bytes.capacity();
+ bytes.reserve(orig_capacity - new_capacity);
+ assert_eq!(bytes.capacity(), orig_capacity);
+}
+
+#[test]
+fn reserve_in_arc_unique_does_not_overallocate_after_multiple_splits() {
+ let mut bytes = BytesMut::from(LONG);
+ let orig_capacity = bytes.capacity();
+ for _ in 0..10 {
+ drop(bytes.split_off(LONG.len() / 2));
+
+ // now bytes is Arc and refcount == 1
+
+ let new_capacity = bytes.capacity();
+ bytes.reserve(orig_capacity - new_capacity);
+ }
+ assert_eq!(bytes.capacity(), orig_capacity);
+}
+
+#[test]
fn reserve_in_arc_nonunique_does_not_overallocate() {
let mut bytes = BytesMut::with_capacity(1000);
let _copy = bytes.split();
@@ -528,6 +556,25 @@ fn reserve_in_arc_nonunique_does_not_overallocate() {
assert_eq!(2001, bytes.capacity());
}
+/// This function tests `BytesMut::reserve_inner`, where `BytesMut` holds
+/// a unique reference to the shared vector and decide to reuse it
+/// by reallocating the `Vec`.
+#[test]
+fn reserve_shared_reuse() {
+ let mut bytes = BytesMut::with_capacity(1000);
+ bytes.put_slice(b"Hello, World!");
+ drop(bytes.split());
+
+ bytes.put_slice(b"!123ex123,sadchELLO,_wORLD!");
+ // Use split_off so that v.capacity() - self.cap != off
+ drop(bytes.split_off(9));
+ assert_eq!(&*bytes, b"!123ex123");
+
+ bytes.reserve(2000);
+ assert_eq!(&*bytes, b"!123ex123");
+ assert_eq!(bytes.capacity(), 2009);
+}
+
#[test]
fn extend_mut() {
let mut bytes = BytesMut::with_capacity(0);
@@ -546,6 +593,13 @@ fn extend_from_slice_mut() {
}
#[test]
+fn extend_mut_from_bytes() {
+ let mut bytes = BytesMut::with_capacity(0);
+ bytes.extend([Bytes::from(LONG)]);
+ assert_eq!(*bytes, LONG[..]);
+}
+
+#[test]
fn extend_mut_without_size_hint() {
let mut bytes = BytesMut::with_capacity(0);
let mut long_iter = LONG.iter();
@@ -875,7 +929,7 @@ fn from_iter_no_size_hint() {
fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) {
let slice = &(bytes.as_ref()[start..end]);
- let sub = bytes.slice_ref(&slice);
+ let sub = bytes.slice_ref(slice);
assert_eq!(&sub[..], expected);
}
@@ -895,7 +949,7 @@ fn slice_ref_empty() {
let bytes = Bytes::from(&b""[..]);
let slice = &(bytes.as_ref()[0..0]);
- let sub = bytes.slice_ref(&slice);
+ let sub = bytes.slice_ref(slice);
assert_eq!(&sub[..], b"");
}
@@ -1003,3 +1057,110 @@ fn box_slice_empty() {
let b = Bytes::from(empty);
assert!(b.is_empty());
}
+
+#[test]
+fn bytes_into_vec() {
+ // Test kind == KIND_VEC
+ let content = b"helloworld";
+
+ let mut bytes = BytesMut::new();
+ bytes.put_slice(content);
+
+ let vec: Vec<u8> = bytes.into();
+ assert_eq!(&vec, content);
+
+ // Test kind == KIND_ARC, shared.is_unique() == True
+ let mut bytes = BytesMut::new();
+ bytes.put_slice(b"abcdewe23");
+ bytes.put_slice(content);
+
+ // Overwrite the bytes to make sure only one reference to the underlying
+ // Vec exists.
+ bytes = bytes.split_off(9);
+
+ let vec: Vec<u8> = bytes.into();
+ assert_eq!(&vec, content);
+
+ // Test kind == KIND_ARC, shared.is_unique() == False
+ let prefix = b"abcdewe23";
+
+ let mut bytes = BytesMut::new();
+ bytes.put_slice(prefix);
+ bytes.put_slice(content);
+
+ let vec: Vec<u8> = bytes.split_off(prefix.len()).into();
+ assert_eq!(&vec, content);
+
+ let vec: Vec<u8> = bytes.into();
+ assert_eq!(&vec, prefix);
+}
+
+#[test]
+fn test_bytes_into_vec() {
+ // Test STATIC_VTABLE.to_vec
+ let bs = b"1b23exfcz3r";
+ let vec: Vec<u8> = Bytes::from_static(bs).into();
+ assert_eq!(&*vec, bs);
+
+ // Test bytes_mut.SHARED_VTABLE.to_vec impl
+ eprintln!("1");
+ let mut bytes_mut: BytesMut = bs[..].into();
+
+ // Set kind to KIND_ARC so that after freeze, Bytes will use bytes_mut.SHARED_VTABLE
+ eprintln!("2");
+ drop(bytes_mut.split_off(bs.len()));
+
+ eprintln!("3");
+ let b1 = bytes_mut.freeze();
+ eprintln!("4");
+ let b2 = b1.clone();
+
+ eprintln!("{:#?}", (&*b1).as_ptr());
+
+ // shared.is_unique() = False
+ eprintln!("5");
+ assert_eq!(&*Vec::from(b2), bs);
+
+ // shared.is_unique() = True
+ eprintln!("6");
+ assert_eq!(&*Vec::from(b1), bs);
+
+ // Test bytes_mut.SHARED_VTABLE.to_vec impl where offset != 0
+ let mut bytes_mut1: BytesMut = bs[..].into();
+ let bytes_mut2 = bytes_mut1.split_off(9);
+
+ let b1 = bytes_mut1.freeze();
+ let b2 = bytes_mut2.freeze();
+
+ assert_eq!(Vec::from(b2), bs[9..]);
+ assert_eq!(Vec::from(b1), bs[..9]);
+}
+
+#[test]
+fn test_bytes_into_vec_promotable_even() {
+ let vec = vec![33u8; 1024];
+
+ // Test cases where kind == KIND_VEC
+ let b1 = Bytes::from(vec.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 1
+ let b1 = Bytes::from(vec.clone());
+ drop(b1.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 2
+ let b1 = Bytes::from(vec.clone());
+ let b2 = b1.clone();
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+ assert_eq!(Vec::from(b2), vec);
+
+ // Test cases where offset != 0
+ let mut b1 = Bytes::from(vec.clone());
+ let b2 = b1.split_off(20);
+
+ assert_eq!(Vec::from(b2), vec[20..]);
+ assert_eq!(Vec::from(b1), vec[..20]);
+}
diff --git a/tests/test_bytes_odd_alloc.rs b/tests/test_bytes_odd_alloc.rs
index 04ba7c2..27ed877 100644
--- a/tests/test_bytes_odd_alloc.rs
+++ b/tests/test_bytes_odd_alloc.rs
@@ -24,8 +24,7 @@ unsafe impl GlobalAlloc for Odd {
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
- let ptr = ptr.offset(1);
- ptr
+ ptr.offset(1)
} else {
ptr
}
@@ -67,3 +66,32 @@ fn test_bytes_clone_drop() {
let b1 = Bytes::from(vec);
let _b2 = b1.clone();
}
+
+#[test]
+fn test_bytes_into_vec() {
+ let vec = vec![33u8; 1024];
+
+ // Test cases where kind == KIND_VEC
+ let b1 = Bytes::from(vec.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 1
+ let b1 = Bytes::from(vec.clone());
+ drop(b1.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 2
+ let b1 = Bytes::from(vec.clone());
+ let b2 = b1.clone();
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+ assert_eq!(Vec::from(b2), vec);
+
+ // Test cases where offset != 0
+ let mut b1 = Bytes::from(vec.clone());
+ let b2 = b1.split_off(20);
+
+ assert_eq!(Vec::from(b2), vec[20..]);
+ assert_eq!(Vec::from(b1), vec[..20]);
+}
diff --git a/tests/test_bytes_vec_alloc.rs b/tests/test_bytes_vec_alloc.rs
index 418a9cd..107e56e 100644
--- a/tests/test_bytes_vec_alloc.rs
+++ b/tests/test_bytes_vec_alloc.rs
@@ -1,61 +1,87 @@
use std::alloc::{GlobalAlloc, Layout, System};
-use std::{mem, ptr};
+use std::ptr::null_mut;
+use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use bytes::{Buf, Bytes};
#[global_allocator]
-static LEDGER: Ledger = Ledger;
+static LEDGER: Ledger = Ledger::new();
-struct Ledger;
+const LEDGER_LENGTH: usize = 2048;
-const USIZE_SIZE: usize = mem::size_of::<usize>();
+struct Ledger {
+ alloc_table: [(AtomicPtr<u8>, AtomicUsize); LEDGER_LENGTH],
+}
-unsafe impl GlobalAlloc for Ledger {
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- if layout.align() == 1 && layout.size() > 0 {
- // Allocate extra space to stash a record of
- // how much space there was.
- let orig_size = layout.size();
- let size = orig_size + USIZE_SIZE;
- let new_layout = match Layout::from_size_align(size, 1) {
- Ok(layout) => layout,
- Err(_err) => return ptr::null_mut(),
- };
- let ptr = System.alloc(new_layout);
- if !ptr.is_null() {
- (ptr as *mut usize).write(orig_size);
- let ptr = ptr.offset(USIZE_SIZE as isize);
- ptr
- } else {
- ptr
+impl Ledger {
+ const fn new() -> Self {
+ const ELEM: (AtomicPtr<u8>, AtomicUsize) =
+ (AtomicPtr::new(null_mut()), AtomicUsize::new(0));
+ let alloc_table = [ELEM; LEDGER_LENGTH];
+
+ Self { alloc_table }
+ }
+
+ /// Iterate over our table until we find an open entry, then insert into said entry
+ fn insert(&self, ptr: *mut u8, size: usize) {
+ for (entry_ptr, entry_size) in self.alloc_table.iter() {
+ // SeqCst is good enough here, we don't care about perf, i just want to be correct!
+ if entry_ptr
+ .compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst)
+ .is_ok()
+ {
+ entry_size.store(size, Ordering::SeqCst);
+ break;
}
- } else {
- System.alloc(layout)
}
}
- unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- if layout.align() == 1 && layout.size() > 0 {
- let off_ptr = (ptr as *mut usize).offset(-1);
- let orig_size = off_ptr.read();
- if orig_size != layout.size() {
- panic!(
- "bad dealloc: alloc size was {}, dealloc size is {}",
- orig_size,
- layout.size()
- );
+ fn remove(&self, ptr: *mut u8) -> usize {
+ for (entry_ptr, entry_size) in self.alloc_table.iter() {
+ // set the value to be something that will never try and be deallocated, so that we
+ // don't have any chance of a race condition
+ //
+ // dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space
+ if entry_ptr
+ .compare_exchange(
+ ptr,
+ invalid_ptr(usize::MAX),
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ )
+ .is_ok()
+ {
+ return entry_size.load(Ordering::SeqCst);
}
+ }
+
+ panic!("Couldn't find a matching entry for {:x?}", ptr);
+ }
+}
+
+unsafe impl GlobalAlloc for Ledger {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ let size = layout.size();
+ let ptr = System.alloc(layout);
+ self.insert(ptr, size);
+ ptr
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ let orig_size = self.remove(ptr);
- let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) {
- Ok(layout) => layout,
- Err(_err) => std::process::abort(),
- };
- System.dealloc(off_ptr as *mut u8, new_layout);
+ if orig_size != layout.size() {
+ panic!(
+ "bad dealloc: alloc size was {}, dealloc size is {}",
+ orig_size,
+ layout.size()
+ );
} else {
System.dealloc(ptr, layout);
}
}
}
+
#[test]
fn test_bytes_advance() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
@@ -77,3 +103,41 @@ fn test_bytes_truncate_and_advance() {
bytes.advance(1);
drop(bytes);
}
+
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+ let ptr = std::ptr::null_mut::<u8>().wrapping_add(addr);
+ debug_assert_eq!(ptr as usize, addr);
+ ptr.cast::<T>()
+}
+
+#[test]
+fn test_bytes_into_vec() {
+ let vec = vec![33u8; 1024];
+
+ // Test cases where kind == KIND_VEC
+ let b1 = Bytes::from(vec.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 1
+ let b1 = Bytes::from(vec.clone());
+ drop(b1.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 2
+ let b1 = Bytes::from(vec.clone());
+ let b2 = b1.clone();
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+ assert_eq!(Vec::from(b2), vec);
+
+ // Test cases where offset != 0
+ let mut b1 = Bytes::from(vec.clone());
+ let b2 = b1.split_off(20);
+
+ assert_eq!(Vec::from(b2), vec[20..]);
+ assert_eq!(Vec::from(b1), vec[..20]);
+}
diff --git a/tests/test_chain.rs b/tests/test_chain.rs
index affaf7a..cfda6b8 100644
--- a/tests/test_chain.rs
+++ b/tests/test_chain.rs
@@ -134,6 +134,28 @@ fn vectored_read() {
}
#[test]
+fn chain_growing_buffer() {
+ let mut buff = [' ' as u8; 10];
+ let mut vec = b"wassup".to_vec();
+
+ let mut chained = (&mut buff[..]).chain_mut(&mut vec).chain_mut(Vec::new()); // Required for potential overflow because remaining_mut for Vec is isize::MAX - vec.len(), but for chain_mut is usize::MAX
+
+ chained.put_slice(b"hey there123123");
+
+ assert_eq!(&buff, b"hey there1");
+ assert_eq!(&vec, b"wassup23123");
+}
+
+#[test]
+fn chain_overflow_remaining_mut() {
+ let mut chained = Vec::<u8>::new().chain_mut(Vec::new()).chain_mut(Vec::new());
+
+ assert_eq!(chained.remaining_mut(), usize::MAX);
+ chained.put_slice(&[0; 256]);
+ assert_eq!(chained.remaining_mut(), usize::MAX);
+}
+
+#[test]
fn chain_get_bytes() {
let mut ab = Bytes::copy_from_slice(b"ab");
let mut cd = Bytes::copy_from_slice(b"cd");