@@ -19,300 +19,7 @@ use tokio_io::{AsyncRead, AsyncWrite};
19
19
#[cfg(feature = "runtime")]
20
20
use crate::client::connect::{Connect, Connected, Destination};
21
21
22
- #[derive(Debug)]
23
- pub struct MockCursor {
24
- vec: Vec<u8>,
25
- pos: usize,
26
- }
27
-
28
- impl MockCursor {
29
- pub fn wrap(vec: Vec<u8>) -> MockCursor {
30
- MockCursor {
31
- vec: vec,
32
- pos: 0,
33
- }
34
- }
35
- }
36
-
37
- impl ::std::ops::Deref for MockCursor {
38
- type Target = [u8];
39
-
40
- fn deref(&self) -> &[u8] {
41
- &self.vec
42
- }
43
- }
44
-
45
- impl AsRef<[u8]> for MockCursor {
46
- fn as_ref(&self) -> &[u8] {
47
- &self.vec
48
- }
49
- }
50
-
51
- impl<S: AsRef<[u8]>> PartialEq<S> for MockCursor {
52
- fn eq(&self, other: &S) -> bool {
53
- self.vec == other.as_ref()
54
- }
55
- }
56
-
57
- impl Write for MockCursor {
58
- fn write(&mut self, data: &[u8]) -> io::Result<usize> {
59
- trace!("MockCursor::write; len={}", data.len());
60
- self.vec.extend(data);
61
- Ok(data.len())
62
- }
63
-
64
- fn flush(&mut self) -> io::Result<()> {
65
- Ok(())
66
- }
67
- }
68
-
69
- impl Read for MockCursor {
70
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
71
- (&self.vec[self.pos..]).read(buf).map(|n| {
72
- trace!("MockCursor::read; len={}", n);
73
- self.pos += n;
74
- if self.pos == self.vec.len() {
75
- trace!("MockCursor::read to end, clearing");
76
- self.pos = 0;
77
- self.vec.clear();
78
- }
79
- n
80
- })
81
- }
82
- }
83
-
84
- const READ_VECS_CNT: usize = 64;
85
-
86
- #[derive(Debug)]
87
- pub struct AsyncIo<T> {
88
- blocked: bool,
89
- bytes_until_block: usize,
90
- error: Option<io::Error>,
91
- flushed: bool,
92
- inner: T,
93
- max_read_vecs: usize,
94
- num_writes: usize,
95
- panic: bool,
96
- park_tasks: bool,
97
- task: Option<Task>,
98
- }
99
-
100
- impl<T> AsyncIo<T> {
101
- pub fn new(inner: T, bytes: usize) -> AsyncIo<T> {
102
- AsyncIo {
103
- blocked: false,
104
- bytes_until_block: bytes,
105
- error: None,
106
- flushed: false,
107
- inner: inner,
108
- max_read_vecs: READ_VECS_CNT,
109
- num_writes: 0,
110
- panic: false,
111
- park_tasks: false,
112
- task: None,
113
- }
114
- }
115
-
116
- pub fn block_in(&mut self, bytes: usize) {
117
- self.bytes_until_block = bytes;
118
-
119
- if let Some(task) = self.task.take() {
120
- task.notify();
121
- }
122
- }
123
-
124
- pub fn error(&mut self, err: io::Error) {
125
- self.error = Some(err);
126
- }
127
-
128
- #[cfg(feature = "nightly")]
129
- pub fn panic(&mut self) {
130
- self.panic = true;
131
- }
132
-
133
- pub fn max_read_vecs(&mut self, cnt: usize) {
134
- assert!(cnt <= READ_VECS_CNT);
135
- self.max_read_vecs = cnt;
136
- }
137
-
138
- #[cfg(feature = "runtime")]
139
- pub fn park_tasks(&mut self, enabled: bool) {
140
- self.park_tasks = enabled;
141
- }
142
-
143
- /*
144
- pub fn flushed(&self) -> bool {
145
- self.flushed
146
- }
147
- */
148
-
149
- pub fn blocked(&self) -> bool {
150
- self.blocked
151
- }
152
-
153
- pub fn num_writes(&self) -> usize {
154
- self.num_writes
155
- }
156
-
157
- fn would_block(&mut self) -> io::Error {
158
- self.blocked = true;
159
- if self.park_tasks {
160
- self.task = Some(task::current());
161
- }
162
- io::ErrorKind::WouldBlock.into()
163
- }
164
-
165
- }
166
-
167
- impl AsyncIo<MockCursor> {
168
- pub fn new_buf<T: Into<Vec<u8>>>(buf: T, bytes: usize) -> AsyncIo<MockCursor> {
169
- AsyncIo::new(MockCursor::wrap(buf.into()), bytes)
170
- }
171
-
172
- /*
173
- pub fn new_eof() -> AsyncIo<Buf> {
174
- AsyncIo::new(Buf::wrap(Vec::new().into()), 1)
175
- }
176
- */
177
-
178
- #[cfg(feature = "runtime")]
179
- fn close(&mut self) {
180
- self.block_in(1);
181
- assert_eq!(
182
- self.inner.vec.len(),
183
- self.inner.pos,
184
- "AsyncIo::close(), but cursor not consumed",
185
- );
186
- self.inner.vec.truncate(0);
187
- self.inner.pos = 0;
188
- }
189
- }
190
-
191
- impl<T: Read + Write> AsyncIo<T> {
192
- fn write_no_vecs<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
193
- if !buf.has_remaining() {
194
- return Ok(Async::Ready(0));
195
- }
196
-
197
- let n = try_nb!(self.write(buf.bytes()));
198
- buf.advance(n);
199
- Ok(Async::Ready(n))
200
- }
201
- }
202
-
203
- impl<S: AsRef<[u8]>, T: AsRef<[u8]>> PartialEq<S> for AsyncIo<T> {
204
- fn eq(&self, other: &S) -> bool {
205
- self.inner.as_ref() == other.as_ref()
206
- }
207
- }
208
-
209
-
210
- impl<T: Read> Read for AsyncIo<T> {
211
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
212
- assert!(!self.panic, "AsyncIo::read panic");
213
- self.blocked = false;
214
- if let Some(err) = self.error.take() {
215
- Err(err)
216
- } else if self.bytes_until_block == 0 {
217
- Err(self.would_block())
218
- } else {
219
- let n = cmp::min(self.bytes_until_block, buf.len());
220
- let n = self.inner.read(&mut buf[..n])?;
221
- self.bytes_until_block -= n;
222
- Ok(n)
223
- }
224
- }
225
- }
226
-
227
- impl<T: Write> Write for AsyncIo<T> {
228
- fn write(&mut self, data: &[u8]) -> io::Result<usize> {
229
- assert!(!self.panic, "AsyncIo::write panic");
230
- self.num_writes += 1;
231
- if let Some(err) = self.error.take() {
232
- trace!("AsyncIo::write error");
233
- Err(err)
234
- } else if self.bytes_until_block == 0 {
235
- trace!("AsyncIo::write would block");
236
- Err(self.would_block())
237
- } else {
238
- trace!("AsyncIo::write; {} bytes", data.len());
239
- self.flushed = false;
240
- let n = cmp::min(self.bytes_until_block, data.len());
241
- let n = self.inner.write(&data[..n])?;
242
- self.bytes_until_block -= n;
243
- Ok(n)
244
- }
245
- }
246
-
247
- fn flush(&mut self) -> io::Result<()> {
248
- self.flushed = true;
249
- self.inner.flush()
250
- }
251
- }
252
-
253
- impl<T: Read + Write> AsyncRead for AsyncIo<T> {
254
- }
255
-
256
- impl<T: Read + Write> AsyncWrite for AsyncIo<T> {
257
- fn shutdown(&mut self) -> Poll<(), io::Error> {
258
- Ok(().into())
259
- }
260
22
261
- fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
262
- assert!(!self.panic, "AsyncIo::write_buf panic");
263
- if self.max_read_vecs == 0 {
264
- return self.write_no_vecs(buf);
265
- }
266
- let r = {
267
- static DUMMY: &[u8] = &[0];
268
- let mut bufs = [From::from(DUMMY); READ_VECS_CNT];
269
- let i = Buf::bytes_vec(&buf, &mut bufs[..self.max_read_vecs]);
270
- let mut n = 0;
271
- let mut ret = Ok(0);
272
- // each call to write() will increase our count, but we assume
273
- // that if iovecs are used, its really only 1 write call.
274
- let num_writes = self.num_writes;
275
- for iovec in &bufs[..i] {
276
- match self.write(iovec) {
277
- Ok(num) => {
278
- n += num;
279
- ret = Ok(n);
280
- },
281
- Err(e) => {
282
- if e.kind() == io::ErrorKind::WouldBlock {
283
- if let Ok(0) = ret {
284
- ret = Err(e);
285
- }
286
- } else {
287
- ret = Err(e);
288
- }
289
- break;
290
- }
291
- }
292
- }
293
- self.num_writes = num_writes + 1;
294
- ret
295
- };
296
- match r {
297
- Ok(n) => {
298
- Buf::advance(buf, n);
299
- Ok(Async::Ready(n))
300
- }
301
- Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
302
- Ok(Async::NotReady)
303
- }
304
- Err(e) => Err(e),
305
- }
306
- }
307
- }
308
-
309
- impl ::std::ops::Deref for AsyncIo<MockCursor> {
310
- type Target = [u8];
311
-
312
- fn deref(&self) -> &[u8] {
313
- &self.inner
314
- }
315
- }
316
23
317
24
#[cfg(feature = "runtime")]
318
25
pub struct Duplex {
0 commit comments