Skip to content

Tests for TDB eviction and stale response processing #592

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
Nov 15, 2024
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
170 changes: 154 additions & 16 deletions cache/test_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from framework.curl_client import CurlResponse
from framework.deproxy_client import DeproxyClientH2
from framework.deproxy_server import StaticDeproxyServer
from helpers import deproxy
from helpers import deproxy, error
from helpers.control import Tempesta
from helpers.deproxy import HttpMessage
from test_suite import checks_for_tests as checks
Expand Down Expand Up @@ -731,7 +731,7 @@ class TestCacheMultipleMethods(tester.TempestaTest):
"HEAD_after_HEAD", first_method="HEAD", second_method="HEAD", should_be_cached=True
),
marks.Param(
"GET_after_HEAD", first_method="HEAD", second_method="GET", should_be_cached=False
"GET_after_HEAD", first_method="HEAD", second_method="GET", should_be_cached=True
),
marks.Param(
"POST_after_HEAD", first_method="HEAD", second_method="POST", should_be_cached=False
Expand All @@ -744,10 +744,12 @@ def test(self, name, first_method, second_method, should_be_cached):
satisfy subsequent GET and HEAD requests.
RFC 9110 9.3.1

The response to a HEAD request is cacheable; a cache MAY use it to
satisfy subsequent HEAD requests
RFC 9110 9.3.2
For HEAD method Tempesta overrides HEAD with GET, sends request
to upstream to cache full response, but returns to client response
with only headers as expected for HEAD request.
"""
if first_method == "HEAD":
self.disable_deproxy_auto_parser()
client = self.get_client("deproxy")
server = self.get_server("deproxy")

Expand Down Expand Up @@ -961,8 +963,8 @@ def test_several_entries(
self, name, first_method, second_method, second_headers, sleep_interval
):
"""
Tempesta can save several entries in cache and use more appropriate
entry to satisfy the next request.
Tempesta can't save several entries in cache. Fresh record with the same key overrides
old, each HEAD request transoforms to GET during forwarding to upstream.
"""
server = self.get_server("deproxy")
client = self.get_client("deproxy")
Expand Down Expand Up @@ -1009,10 +1011,9 @@ def test_several_entries(

server.set_response(second_response)

# Send second request. Tempesta FW forward request to backend server and
# save response in cache (for some requests we use cache-control directive
# to be shure that request will be forwarded to backend server). There
# are two cache enries now in Tempesta FW cache.
# Send second request. Tempesta FW forwards request to backend server and saves response in
# cache overriding already stored record with new fresh record. (for some requests we use
# cache-control directive to be shure that request will be forwarded to backend server).
client.send_request(
client.create_request(method=second_method, uri="/index.html", headers=second_headers),
expected_status_code="200",
Expand Down Expand Up @@ -1040,13 +1041,15 @@ def test_several_entries(
)
self.assertIn("age", client.last_response.headers.keys())
self.assertEqual(len(server.requests), 2)
# Tempesta FW satisfy request from cache, using the most appropriate
# entry. When second request has GET or POST method, Tempesta FW uses
# Tempesta FW satisfy request from cache, using signle stored entry.
# When the second request has GET or POST method, Tempesta FW uses
# it to satisfy third request, because it is the freshest cache entry.
# If second request has HEAD method, we can't use it response to satisfy
# GET request, so use first entry.
# Even if second request has HEAD method, we will use this response,
# because we have only one record per key and we always do GET request
# to upstream not regarding of the request method.
#
self.assertEqual(
"Second body." if second_method != "HEAD" else "First body.", client.last_response.body
"Second body." if second_method != "HEAD" else "", client.last_response.body
)


Expand Down Expand Up @@ -1927,4 +1930,139 @@ def test(self):
self.assertIn("age", response.headers)


class TestCacheClean(tester.TempestaTest):
clients = [
{
"id": "deproxy",
"type": "deproxy_h2",
"addr": "${tempesta_ip}",
"port": "443",
"ssl": True,
"ssl_hostname": "tempesta-tech.com",
}
]
backends = [
{"id": "deproxy", "type": "deproxy", "port": "8080", "response": "static"},
]

tempesta = {
"config": """
listen 443 proto=h2;

srv_group main {
server ${server_ip}:8080;
}

vhost tempesta-tech.com {
tls_certificate ${tempesta_workdir}/tempesta.crt;
tls_certificate_key ${tempesta_workdir}/tempesta.key;
proxy_pass main;
}

http_chain {
-> tempesta-tech.com;
}

cache_fulfill * *;
cache 2;
"""
}

def test(self):
"""
Send request, Tempesta cache it, wait for max-age time then send second request.
At this stage we expected that first request evicted from cache and only new version
is presented in the cache. Send third request with different uri to verify we don't clean
up records with different uri.
"""
self.start_all_services()
server = self.get_server("deproxy")
client = self.get_client("deproxy")
tempesta = self.get_tempesta()

server.set_response(
f"HTTP/1.1 200 OK\r\n"
+ "Connection: keep-alive\r\n"
+ "Content-Length: 0\r\n"
+ "Cache-control: max-age=1\r\n"
+ "\r\n"
)

request = client.create_request(
uri="/", authority="tempesta-tech.com", method="GET", headers=[]
)

# Send two requests with interval greater than max-age to let record become stale
client.send_request(request, expected_status_code="200")
time.sleep(3)
client.send_request(request, expected_status_code="200")

tempesta.get_stats()
self.assertEqual(tempesta.stats.cache_objects, 1)

request = client.create_request(
uri="/2", authority="tempesta-tech.com", method="GET", headers=[]
)
client.send_request(request, expected_status_code="200")

tempesta.get_stats()
self.assertEqual(tempesta.stats.cache_objects, 2)


class TestCacheUseStaleCfg(tester.TempestaTest):
"""
Class for testing "cache_use_stale" configuration.
"""

tempesta = {
"config": """
listen 443 proto=h2;

srv_group default {
server ${server_ip}:8000;
}
tls_match_any_server_name;
vhost default {
proxy_pass default;
tls_certificate ${tempesta_workdir}/tempesta.crt;
tls_certificate_key ${tempesta_workdir}/tempesta.key;
}
cache 2;
cache_fulfill * *;
"""
}

@marks.Parameterize.expand(
[
marks.Param(
name="dupl",
cfg="cache_use_stale 5*;cache_use_stale 4*;\n",
expected_msg="duplicate entry: 'cache_use_stale'",
),
marks.Param(
name="wrong_mask",
cfg="cache_use_stale 3*;\n",
expected_msg='cache_use_stale Unsupported argument "3\*"',
),
marks.Param(
name="wrong_code",
cfg="cache_use_stale 200;\n",
expected_msg="Please specify status code above than 399",
),
]
)
def test_cache_use_stale_config(self, name, cfg, expected_msg):
"""
Test misconfiguration of `cache_use_stale` directive.
"""
tempesta = self.get_tempesta()
tempesta.config.set_defconfig(tempesta.config.defconfig + cfg)
self.oops_ignore.append("ERROR")

with self.assertRaises(error.ProcessBadExitStatusException):
self.start_tempesta()

self.assertTrue(self.oops.find(expected_msg))


# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
Loading