aboutsummaryrefslogtreecommitdiff
path: root/src/bytes_mut.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/bytes_mut.rs')
-rw-r--r--src/bytes_mut.rs124
1 files changed, 74 insertions, 50 deletions
diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs
index dc4e4b1..4d0585e 100644
--- a/src/bytes_mut.rs
+++ b/src/bytes_mut.rs
@@ -1,15 +1,22 @@
-use core::{cmp, fmt, hash, isize, slice, usize};
+use core::iter::{FromIterator, Iterator};
use core::mem::{self, ManuallyDrop};
use core::ops::{Deref, DerefMut};
use core::ptr::{self, NonNull};
-use core::iter::{FromIterator, Iterator};
+use core::{cmp, fmt, hash, isize, slice, usize};
-use alloc::{vec::Vec, string::String, boxed::Box, borrow::{Borrow, BorrowMut}};
+use alloc::{
+ borrow::{Borrow, BorrowMut},
+ boxed::Box,
+ string::String,
+ vec::Vec,
+};
-use crate::{Bytes, Buf, BufMut};
-use crate::bytes::Vtable;
use crate::buf::IntoIter;
+use crate::bytes::Vtable;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::{Buf, BufMut, Bytes};
/// A unique reference to a contiguous slice of memory.
///
@@ -107,8 +114,7 @@ impl BytesMut {
/// Creates a new `BytesMut` with the specified capacity.
///
/// The returned `BytesMut` will be able to hold at least `capacity` bytes
- /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
- /// then `BytesMut` will not allocate.
+ /// without reallocating.
///
/// It is important to note that this function does not specify the length
/// of the returned `BytesMut`, but only the capacity.
@@ -233,7 +239,9 @@ impl BytesMut {
let (off, _) = self.get_vec_pos();
let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
mem::forget(self);
- vec.into()
+ let mut b: Bytes = vec.into();
+ b.advance(off);
+ b
}
} else {
debug_assert_eq!(self.kind(), KIND_ARC);
@@ -242,9 +250,7 @@ impl BytesMut {
let len = self.len;
let data = AtomicPtr::new(self.data as _);
mem::forget(self);
- unsafe {
- Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
- }
+ unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
}
}
@@ -386,7 +392,9 @@ impl BytesMut {
/// [`split_off`]: #method.split_off
pub fn truncate(&mut self, len: usize) {
if len <= self.len() {
- unsafe { self.set_len(len); }
+ unsafe {
+ self.set_len(len);
+ }
}
}
@@ -567,7 +575,8 @@ impl BytesMut {
self.cap += off;
} else {
// No space - allocate more
- let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
+ let mut v =
+ ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
v.reserve(additional);
// Update the info
@@ -583,7 +592,6 @@ impl BytesMut {
debug_assert_eq!(kind, KIND_ARC);
let shared: *mut Shared = self.data as _;
-
// Reserving involves abandoning the currently shared buffer and
// allocating a new vector with the requested capacity.
//
@@ -627,9 +635,7 @@ impl BytesMut {
// check.
let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
- new_cap = cmp::max(
- cmp::max(double, new_cap),
- original_capacity);
+ new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
} else {
new_cap = cmp::max(new_cap, original_capacity);
}
@@ -678,14 +684,12 @@ impl BytesMut {
// Reserved above
debug_assert!(dst.len() >= cnt);
- ptr::copy_nonoverlapping(
- extend.as_ptr(),
- dst.as_mut_ptr() as *mut u8,
- cnt);
-
+ ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
}
- unsafe { self.advance_mut(cnt); }
+ unsafe {
+ self.advance_mut(cnt);
+ }
}
/// Absorbs a `BytesMut` that was previously split off.
@@ -750,16 +754,12 @@ impl BytesMut {
#[inline]
fn as_slice(&self) -> &[u8] {
- unsafe {
- slice::from_raw_parts(self.ptr.as_ptr(), self.len)
- }
+ unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
}
#[inline]
fn as_slice_mut(&mut self) -> &mut [u8] {
- unsafe {
- slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
- }
+ unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
}
unsafe fn set_start(&mut self, start: usize) {
@@ -788,7 +788,7 @@ impl BytesMut {
// on 64 bit systems and will only happen on 32 bit systems
// when shifting past 134,217,727 bytes. As such, we don't
// worry too much about performance here.
- self.promote_to_shared(/*ref_count = */1);
+ self.promote_to_shared(/*ref_count = */ 1);
}
}
@@ -820,10 +820,10 @@ impl BytesMut {
}
let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
- if ptr == other.ptr.as_ptr() &&
- self.kind() == KIND_ARC &&
- other.kind() == KIND_ARC &&
- self.data == other.data
+ if ptr == other.ptr.as_ptr()
+ && self.kind() == KIND_ARC
+ && other.kind() == KIND_ARC
+ && self.data == other.data
{
// Contiguous blocks, just combine directly
self.len += other.len;
@@ -884,7 +884,7 @@ impl BytesMut {
increment_shared(self.data);
ptr::read(self)
} else {
- self.promote_to_shared(/*ref_count = */2);
+ self.promote_to_shared(/*ref_count = */ 2);
ptr::read(self)
}
}
@@ -952,7 +952,9 @@ impl Buf for BytesMut {
cnt,
self.remaining(),
);
- unsafe { self.set_start(cnt); }
+ unsafe {
+ self.set_start(cnt);
+ }
}
fn to_bytes(&mut self) -> crate::Bytes {
@@ -969,7 +971,12 @@ impl BufMut for BytesMut {
#[inline]
unsafe fn advance_mut(&mut self, cnt: usize) {
let new_len = self.len() + cnt;
- assert!(new_len <= self.cap, "new_len = {}; capacity = {}", new_len, self.cap);
+ assert!(
+ new_len <= self.cap,
+ "new_len = {}; capacity = {}",
+ new_len,
+ self.cap
+ );
self.len = new_len;
}
@@ -984,7 +991,10 @@ impl BufMut for BytesMut {
// Specialize these methods so they can skip checking `remaining_mut`
// and `advance_mut`.
- fn put<T: crate::Buf>(&mut self, mut src: T) where Self: Sized {
+ fn put<T: crate::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
while src.has_remaining() {
let s = src.bytes();
let l = s.len();
@@ -1063,8 +1073,7 @@ impl Ord for BytesMut {
}
}
-impl Eq for BytesMut {
-}
+impl Eq for BytesMut {}
impl Default for BytesMut {
#[inline]
@@ -1074,7 +1083,10 @@ impl Default for BytesMut {
}
impl hash::Hash for BytesMut {
- fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: hash::Hasher,
+ {
let s: &[u8] = self.as_ref();
s.hash(state);
}
@@ -1134,7 +1146,10 @@ impl<'a> IntoIterator for &'a BytesMut {
}
impl Extend<u8> for BytesMut {
- fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = u8>,
+ {
let iter = iter.into_iter();
let (lower, _) = iter.size_hint();
@@ -1151,7 +1166,10 @@ impl Extend<u8> for BytesMut {
}
impl<'a> Extend<&'a u8> for BytesMut {
- fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = &'a u8>,
+ {
self.extend(iter.into_iter().map(|b| *b))
}
}
@@ -1229,7 +1247,10 @@ impl Shared {
fn original_capacity_to_repr(cap: usize) -> usize {
let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
- cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH)
+ cmp::min(
+ width,
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
+ )
}
fn original_capacity_from_repr(repr: usize) -> usize {
@@ -1397,7 +1418,8 @@ impl PartialOrd<BytesMut> for String {
}
impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
- where BytesMut: PartialEq<T>
+where
+ BytesMut: PartialEq<T>,
{
fn eq(&self, other: &&'a T) -> bool {
*self == **other
@@ -1405,7 +1427,8 @@ impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
}
impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
- where BytesMut: PartialOrd<T>
+where
+ BytesMut: PartialOrd<T>,
{
fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
self.partial_cmp(*other)
@@ -1480,8 +1503,9 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By
}
unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
- let shared = (*data.get_mut()) as *mut Shared;
- release_shared(shared as *mut Shared);
+ data.with_mut(|shared| {
+ release_shared(*shared as *mut Shared);
+ });
}
// compile-fails
@@ -1519,11 +1543,11 @@ fn _split_must_use() {}
// fuzz tests
#[cfg(all(test, loom))]
mod fuzz {
- use std::sync::Arc;
+ use loom::sync::Arc;
use loom::thread;
- use crate::Bytes;
use super::BytesMut;
+ use crate::Bytes;
#[test]
fn bytes_mut_cloning_frozen() {