profile
viewpoint

cloudflare/lol-html 698

Low output latency streaming HTML parser/rewriter with CSS selector-based API

Manishearth/rust-tenacious 36

Lint to disallow the moving of marked types in Rust

nox/arbalest 22

Like Arc<T> but where weak references don't forbid mutable access

nox/cargo-old-lock 4

Print a Cargo manifest, old style

nox/apple-libc 2

A mirror of Apple's libc

cloudflare/h2 1

HTTP 2.0 client & server implementation for Rust.

mbrubeck/servo 1

The Servo Browser Engine

cloudflare/hyper 0

An HTTP library for Rust

cloudflare/hyperx 0

Extraction and modernization of the hyper 0.11.x typed header module

Pull request review commenthyperium/h2

Add test that would make wait_for_capacity hang if it doesn't loop

 async fn data_padding() {      join(srv, h2).await; }++#[tokio::test]+async fn poll_capacity_after_send_data_and_reserve() {+    h2_support::trace_init!();+    let (io, mut srv) = mock::new();++    let srv = async move {+        let settings = srv.assert_client_handshake_with_settings(frames::settings().initial_window_size(5)).await;+        assert_default_settings!(settings);+        srv.recv_frame(+            frames::headers(1)+                .request("POST", "https://www.example.com/")+        )+        .await;+        srv.send_frame(frames::headers(1).response(200)).await;+        srv.recv_frame(frames::data(1, &b"abcde"[..])).await;+        srv.recv_frame(frames::data(1, &b""[..]).eos()).await;+    };++    let h2 = async move {+        let (mut client, mut h2) = client::handshake(io).await.unwrap();+        let request = Request::builder()+            .method(Method::POST)+            .uri("https://www.example.com/")+            .body(())+            .unwrap();++        let (response, mut stream) = client.send_request(request, false).unwrap();++        let response = h2.drive(response).await.unwrap();+        assert_eq!(response.status(), StatusCode::OK);++        stream.send_data("abcde".into(), false).unwrap();++        stream.reserve_capacity(5);++        // Initial window size was 5 so current capacity is 0 even if we just reserved.+        assert_eq!(stream.capacity(), 0);++        // FIXME(nox): This currently hangs because the capacity was reduced to 0 by the `send_data`+        // call, but the stream's `send_capacity_inc` flag was never cleared and is still true,+        // so `poll_for_capacity` immediately returns `Ready(Some(0))`.+        let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await;

I changed wait_for_capacity in that commit then.

nox

comment created time in 3 hours

PullRequestReviewEvent

push eventhyperium/h2

Anthony Ramine

commit sha 9acbfe2f1b0c89097bb6587019b7c6781840dc1b

Add test that would make wait_for_capacity hang if it doesn't loop

view details

push time in 3 hours

Pull request review commenthyperium/h2

Add test that currently makes wait_for_capacity hang

 async fn data_padding() {      join(srv, h2).await; }++#[tokio::test]+async fn poll_capacity_after_send_data_and_reserve() {+    h2_support::trace_init!();+    let (io, mut srv) = mock::new();++    let srv = async move {+        let settings = srv.assert_client_handshake_with_settings(frames::settings().initial_window_size(5)).await;+        assert_default_settings!(settings);+        srv.recv_frame(+            frames::headers(1)+                .request("POST", "https://www.example.com/")+        )+        .await;+        srv.send_frame(frames::headers(1).response(200)).await;+        srv.recv_frame(frames::data(1, &b"abcde"[..])).await;+        srv.recv_frame(frames::data(1, &b""[..]).eos()).await;+    };++    let h2 = async move {+        let (mut client, mut h2) = client::handshake(io).await.unwrap();+        let request = Request::builder()+            .method(Method::POST)+            .uri("https://www.example.com/")+            .body(())+            .unwrap();++        let (response, mut stream) = client.send_request(request, false).unwrap();++        let response = h2.drive(response).await.unwrap();+        assert_eq!(response.status(), StatusCode::OK);++        stream.send_data("abcde".into(), false).unwrap();++        stream.reserve_capacity(5);++        // Initial window size was 5 so current capacity is 0 even if we just reserved.+        assert_eq!(stream.capacity(), 0);++        // FIXME(nox): This currently hangs because the capacity was reduced to 0 by the `send_data`+        // call, but the stream's `send_capacity_inc` flag was never cleared and is still true,+        // so `poll_for_capacity` immediately returns `Ready(Some(0))`.+        let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await;

https://github.com/hyperium/hyper/blob/6169db250c932dd012d391389826cd34833077b4/src/proto/h2/mod.rs#L129-L145

Hyper itself loops on Ready(Some(0)), so I guess wait_for_capacity is broken?

nox

comment created time in 9 hours

PullRequestReviewEvent

push eventhyperium/h2

Anthony Ramine

commit sha 840807ecea91b285bb41eb16275ac7aa504bdd39

Add test that currently makes wait_for_capacity hang

view details

push time in 9 hours

Pull request review commenthyperium/h2

Add test that currently makes wait_for_capacity hang

 async fn data_padding() {      join(srv, h2).await; }++#[tokio::test]+async fn poll_capacity_after_send_data_and_reserve() {+    h2_support::trace_init!();+    let (io, mut srv) = mock::new();++    let srv = async move {+        let settings = srv.assert_client_handshake_with_settings(frames::settings().initial_window_size(5)).await;+        assert_default_settings!(settings);+        srv.recv_frame(+            frames::headers(1)+                .request("POST", "https://www.example.com/")+        )+        .await;+        srv.send_frame(frames::headers(1).response(200)).await;+        srv.recv_frame(frames::data(1, &b"abcde"[..])).await;+        srv.recv_frame(frames::data(1, &b""[..]).eos()).await;+    };++    let h2 = async move {+        let (mut client, mut h2) = client::handshake(io).await.unwrap();+        let request = Request::builder()+            .method(Method::POST)+            .uri("https://www.example.com/")+            .body(())+            .unwrap();++        let (response, mut stream) = client.send_request(request, false).unwrap();++        let response = h2.drive(response).await.unwrap();+        assert_eq!(response.status(), StatusCode::OK);++        stream.send_data("abcde".into(), false).unwrap();++        stream.reserve_capacity(5);++        // Initial window size was 5 so current capacity is 0 even if we just reserved.+        assert_eq!(stream.capacity(), 0);++        // FIXME(nox): This currently hangs because the capacity was reduced to 0 by the `send_data`+        // call, but the stream's `send_capacity_inc` flag was never cleared and is still true,+        // so `poll_for_capacity` immediately returns `Ready(Some(0))`.+        let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await;

@seanmonstar I managed to write a test that hangs on master, for the same reason your test hangs in my max send buffer size branch. Do you think h2's innards are wrong, or is it wait_for_capacity that is wrong?

nox

comment created time in 9 hours

PullRequestReviewEvent

PR opened hyperium/h2

Add test that currently makes wait_for_capacity hang
+52 -0

0 comment

1 changed file

pr created time in 9 hours

create barnchhyperium/h2

branch : hanging-wait-for-capacity

created branch time in 9 hours

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 async fn data_padding() {      join(srv, h2).await; }+++#[tokio::test]+async fn notify_on_send_buffer_available() {+    // This test ensures that the stream gets notified when there is additional+    // send buffer space.+    h2_support::trace_init!();++    let (io, mut client) = mock::new();+++    let client = async move {+        let settings = client.assert_server_handshake().await;+        assert_default_settings!(settings);+        client.send_frame(+            frames::headers(1)+                .request("GET", "https://www.example.com/")+                .eos()+        )+        .await;+        client.send_frame(+            frames::headers(3)+                .request("GET", "https://www.example.com/")+                .eos()+        )+        .await;+        client.recv_frame(frames::headers(1).response(200)).await;+        client.recv_frame(frames::headers(3).response(200)).await;+        dbg!(11);+        client.recv_frame(frames::data(1, &b"abcde"[..]).eos()).await;+        dbg!(31);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(32);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(33);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(34);+        client.recv_frame(frames::data(3, &b""[..]).eos()).await;+    };++    let srv = async move {+        let mut srv = server::Builder::new()+            .max_send_buffer_size(5)+            .handshake::<_, Bytes>(io)+            .await+            .expect("handshake");++        let (_req, mut reply1) = srv.next().await.unwrap().unwrap();+        let (_req, mut reply2) = srv.next().await.unwrap().unwrap();++        let mut stream1 = reply1.send_response(http::Response::new(()), false).unwrap();+        let mut stream2 = reply2.send_response(http::Response::new(()), false).unwrap();+        drop((reply1, reply2));++        let t0 = tokio::spawn(async move {+            assert!(srv.next().await.is_none(), "unexpected request");+        });+        let t1 = tokio::spawn(async move {+            eprintln!("[t1] RESERVE 1 cap");+            stream1.reserve_capacity(1);+            stream1 = util::wait_for_capacity(stream1, 1).await;+            eprintln!("[t1] got 1 cap");+            stream1.send_data("abcde".into(), true).unwrap();+        });+        let t2 = tokio::spawn(async move {+            for n in 0..3 {+                eprintln!("[t2] RESERVE 1 cap, loop {}", n);+                stream2.reserve_capacity(1);+                stream2 = util::wait_for_capacity(stream2, 1).await;+                eprintln!("[t2] got 1 cap, loop {}", n);+                stream2.send_data("abcde".into(), false).unwrap();+            }++            stream2.send_data("".into(), true).unwrap();+        });++        t2.await.expect("srv body spawn");+        t1.await.expect("srv body spawn");+        t0.await.expect("srv end");

https://github.com/hyperium/h2/blob/2c53d600989209e263c8157df6e6e2280a9a3355/tests/h2-support/src/util.rs#L56-L66

There, poll_capacity returns Ready(Some(0)), and capacity returns 0, so act >= self.target is false, and Poll::Pending gets returned even though nothing has been set up to wake up the task.

What do you think is wrong here? That poll_capacity returns Ready(Some(0)), or that wait_for_capacity is badly conceived, or both?

nox

comment created time in a day

PullRequestReviewEvent
PullRequestReviewEvent

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 async fn data_padding() {      join(srv, h2).await; }+++#[tokio::test]+async fn notify_on_send_buffer_available() {+    // This test ensures that the stream gets notified when there is additional+    // send buffer space.+    h2_support::trace_init!();++    let (io, mut client) = mock::new();+++    let client = async move {+        let settings = client.assert_server_handshake().await;+        assert_default_settings!(settings);+        client.send_frame(+            frames::headers(1)+                .request("GET", "https://www.example.com/")+                .eos()+        )+        .await;+        client.send_frame(+            frames::headers(3)+                .request("GET", "https://www.example.com/")+                .eos()+        )+        .await;+        client.recv_frame(frames::headers(1).response(200)).await;+        client.recv_frame(frames::headers(3).response(200)).await;+        dbg!(11);+        client.recv_frame(frames::data(1, &b"abcde"[..]).eos()).await;+        dbg!(31);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(32);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(33);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(34);+        client.recv_frame(frames::data(3, &b""[..]).eos()).await;+    };++    let srv = async move {+        let mut srv = server::Builder::new()+            .max_send_buffer_size(5)+            .handshake::<_, Bytes>(io)+            .await+            .expect("handshake");++        let (_req, mut reply1) = srv.next().await.unwrap().unwrap();+        let (_req, mut reply2) = srv.next().await.unwrap().unwrap();++        let mut stream1 = reply1.send_response(http::Response::new(()), false).unwrap();+        let mut stream2 = reply2.send_response(http::Response::new(()), false).unwrap();+        drop((reply1, reply2));++        let t0 = tokio::spawn(async move {+            assert!(srv.next().await.is_none(), "unexpected request");+        });+        let t1 = tokio::spawn(async move {+            eprintln!("[t1] RESERVE 1 cap");+            stream1.reserve_capacity(1);+            stream1 = util::wait_for_capacity(stream1, 1).await;+            eprintln!("[t1] got 1 cap");+            stream1.send_data("abcde".into(), true).unwrap();+        });+        let t2 = tokio::spawn(async move {+            for n in 0..3 {+                eprintln!("[t2] RESERVE 1 cap, loop {}", n);+                stream2.reserve_capacity(1);+                stream2 = util::wait_for_capacity(stream2, 1).await;+                eprintln!("[t2] got 1 cap, loop {}", n);+                stream2.send_data("abcde".into(), false).unwrap();+            }++            stream2.send_data("".into(), true).unwrap();+        });++        t2.await.expect("srv body spawn");+        t1.await.expect("srv body spawn");+        t0.await.expect("srv end");

Never mind I misread. That being said, the test really doesn't help much finding where the issue with my patch is :/

nox

comment created time in a day

PullRequestReviewEvent

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 async fn data_padding() {      join(srv, h2).await; }+++#[tokio::test]+async fn notify_on_send_buffer_available() {+    // This test ensures that the stream gets notified when there is additional+    // send buffer space.+    h2_support::trace_init!();++    let (io, mut client) = mock::new();+++    let client = async move {+        let settings = client.assert_server_handshake().await;+        assert_default_settings!(settings);+        client.send_frame(+            frames::headers(1)+                .request("GET", "https://www.example.com/")+                .eos()+        )+        .await;+        client.send_frame(+            frames::headers(3)+                .request("GET", "https://www.example.com/")+                .eos()+        )+        .await;+        client.recv_frame(frames::headers(1).response(200)).await;+        client.recv_frame(frames::headers(3).response(200)).await;+        dbg!(11);+        client.recv_frame(frames::data(1, &b"abcde"[..]).eos()).await;+        dbg!(31);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(32);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(33);+        client.recv_frame(frames::data(3, &b"abcde"[..])).await;+        dbg!(34);+        client.recv_frame(frames::data(3, &b""[..]).eos()).await;+    };++    let srv = async move {+        let mut srv = server::Builder::new()+            .max_send_buffer_size(5)+            .handshake::<_, Bytes>(io)+            .await+            .expect("handshake");++        let (_req, mut reply1) = srv.next().await.unwrap().unwrap();+        let (_req, mut reply2) = srv.next().await.unwrap().unwrap();++        let mut stream1 = reply1.send_response(http::Response::new(()), false).unwrap();+        let mut stream2 = reply2.send_response(http::Response::new(()), false).unwrap();+        drop((reply1, reply2));++        let t0 = tokio::spawn(async move {+            assert!(srv.next().await.is_none(), "unexpected request");+        });+        let t1 = tokio::spawn(async move {+            eprintln!("[t1] RESERVE 1 cap");+            stream1.reserve_capacity(1);+            stream1 = util::wait_for_capacity(stream1, 1).await;+            eprintln!("[t1] got 1 cap");+            stream1.send_data("abcde".into(), true).unwrap();+        });+        let t2 = tokio::spawn(async move {+            for n in 0..3 {+                eprintln!("[t2] RESERVE 1 cap, loop {}", n);+                stream2.reserve_capacity(1);+                stream2 = util::wait_for_capacity(stream2, 1).await;+                eprintln!("[t2] got 1 cap, loop {}", n);+                stream2.send_data("abcde".into(), false).unwrap();+            }++            stream2.send_data("".into(), true).unwrap();+        });++        t2.await.expect("srv body spawn");+        t1.await.expect("srv body spawn");+        t0.await.expect("srv end");

AFAIK the test is still wrong. We await for t2 before we await for t0, so t0 is pending and nothing will wake it up when t2 notifies the connection task that it has frames to send, so the send buffer size never decreases and everything gets stuck.

nox

comment created time in a day

PullRequestReviewEvent

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 async fn data_padding() {      join(srv, h2).await; }+++#[tokio::test]+async fn notify_on_send_buffer_available() {+    // This test ensures that the stream gets notified when there is additional+    // send buffer space.+    h2_support::trace_init!();++    let (io, mut client) = mock::new();+++    let client = async move {+        let settings = client.assert_server_handshake().await;+        assert_default_settings!(settings);+        client.send_frame(+            frames::headers(1)+                .request("GET", "https://www.example.com/")+                .eos()+        )+        .await;+        client.recv_frame(frames::headers(1).response(200)).await;+        client.recv_frame(frames::data(1, &b"abcde"[..])).await;+        client.recv_frame(frames::data(1, &b"abcde"[..])).await;+        client.recv_frame(frames::data(1, &b"abcde"[..])).await;+        client.recv_frame(frames::data(1, &b""[..]).eos()).await;+    };++    let srv = async move {+        let mut srv = server::Builder::new()+            .max_send_buffer_size(5)+            .handshake::<_, Bytes>(io)+            .await+            .expect("handshake");++        let (_req, mut reply) = srv.next().await.unwrap().unwrap();+        tokio::spawn(async move {+            let rsp = http::Response::new(());+            let mut stream = reply.send_response(rsp, false).unwrap();++            for _ in 0..3 {+                stream.reserve_capacity(5);+                stream = util::wait_for_capacity(stream, 5).await;

The test is hanging there, and by the time h2 returns Poll::Pending, Actions::task is None so there is no way to wake up the connection task to call poll_complete from there.

nox

comment created time in 2 days

created tagcloudflare/h2

tagcf-6

HTTP 2.0 client & server implementation for Rust.

created time in 3 days

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 impl Prioritize {                             tracing::trace_span!("updating stream flow").in_scope(|| {                                 stream.send_flow.send_data(len); +                                self.current_send_buffer_size -= len as usize;

I called assign_connection_capacity from poll_complete after the reclaim_frame call.

nox

comment created time in 3 days

PullRequestReviewEvent

push eventhyperium/h2

Anthony Ramine

commit sha acb0041514d67ea0680882ee6c8505d6119de4b8

Implement server::Builder::max_send_buffer_size

view details

push time in 3 days

push eventhyperium/h2

Anthony Ramine

commit sha d837e1fd96441b2402ef53546e9900d18df6f23d

Implement server::Builder::max_send_buffer_size

view details

push time in 3 days

PullRequestReviewEvent

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 impl Prioritize {                             tracing::trace_span!("updating stream flow").in_scope(|| {                                 stream.send_flow.send_data(len); +                                self.current_send_buffer_size -= len as usize;

Doesn't that mean that self.flow.available never changes from Prioritize::pop_frame? I don't understand the logic there.

nox

comment created time in 7 days

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 impl Prioritize {                             tracing::trace_span!("updating stream flow").in_scope(|| {                                 stream.send_flow.send_data(len); +                                self.current_send_buffer_size -= len as usize;

I am even more consumed that we call self.flow.send_data(len); immediately afterwards we called self.flow.assign_capacity(len);.

nox

comment created time in 7 days

PullRequestReviewEvent

Pull request review commenthyperium/h2

Implement server::Builder::max_send_buffer_size

 impl Prioritize {                             tracing::trace_span!("updating stream flow").in_scope(|| {                                 stream.send_flow.send_data(len); +                                self.current_send_buffer_size -= len as usize;

I'm not sure I understand. Why do we even call self.flow.assign_capacity(len); below that line, if we are actually supposed to wait for the peer to send another WINDOW_UPDATE frame?

nox

comment created time in 7 days

PullRequestReviewEvent
PullRequestReviewEvent
more