Skip to content

Commit

Permalink
refactor: adjust cache for http proxy
Browse files Browse the repository at this point in the history
  • Loading branch information
vicanso committed Apr 21, 2024
1 parent d2c07b3 commit 080e2fb
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 12 deletions.
36 changes: 33 additions & 3 deletions src/plugin/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,9 @@ use url::Url;

static MEM_BACKEND: Lazy<MemCache> = Lazy::new(MemCache::new);
static PREDICTOR: Lazy<Predictor<32>> = Lazy::new(|| Predictor::new(5, None));
static EVICTION_MANAGER: Lazy<Manager> = Lazy::new(|| Manager::new(8192));
// meomory limit size
const MAX_MEMORY_SIZE: usize = 100 * 1024 * 1024;
static EVICTION_MANAGER: Lazy<Manager> = Lazy::new(|| Manager::new(MAX_MEMORY_SIZE));
static CACHE_LOCK_ONE_SECOND: Lazy<CacheLock> =
Lazy::new(|| CacheLock::new(std::time::Duration::from_secs(1)));
static CACHE_LOCK_TWO_SECONDS: Lazy<CacheLock> =
Expand All @@ -48,6 +50,7 @@ pub struct Cache {
storage: &'static (dyn Storage + Sync),
max_file_size: usize,
namespace: Option<String>,
headers: Option<Vec<String>>,
}

impl Cache {
Expand All @@ -58,8 +61,9 @@ impl Cache {
})?;
let mut lock = 0;
let mut eviction = false;
let mut max_file_size = 30 * 1024;
let mut max_file_size = 100 * 1024;
let mut namespace = None;
let mut headers = None;
for (key, value) in url_info.query_pairs().into_iter() {
match key.as_ref() {
"lock" => {
Expand All @@ -74,6 +78,15 @@ impl Cache {
}
"eviction" => eviction = true,
"namespace" => namespace = Some(value.to_string()),
"headers" => {
headers = Some(
value
.trim()
.split(',')
.map(|item| item.to_string())
.collect(),
)
}
_ => {}
}
}
Expand All @@ -85,6 +98,7 @@ impl Cache {
lock,
max_file_size,
namespace,
headers,
})
}
}
Expand Down Expand Up @@ -123,8 +137,24 @@ impl ProxyPlugin for Cache {
if self.max_file_size > 0 {
session.cache.set_max_file_size_bytes(self.max_file_size);
}
let mut keys = vec![];
if let Some(namespace) = &self.namespace {
ctx.cache_namespace = Some(namespace.clone());
keys.push(namespace.as_bytes());
}
if let Some(headers) = &self.headers {
for key in headers.iter() {
let buf = session.get_header_bytes(key);
if !buf.is_empty() {
keys.push(buf);
}
}
}
if !keys.is_empty() {
let arr: Vec<_> = keys
.iter()
.map(|item| std::string::String::from_utf8_lossy(item.to_owned()).to_string())
.collect();
ctx.cache_prefix = Some(arr.join(":"));
}

Ok(false)
Expand Down
15 changes: 8 additions & 7 deletions src/proxy/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -344,25 +344,26 @@ impl ProxyHttp for Server {
session: &Session,
ctx: &mut Self::CTX,
) -> pingora::Result<CacheKey> {
let namespace = ctx.cache_namespace.clone().unwrap_or_default();

Ok(CacheKey::new(
namespace,
ctx.cache_prefix.clone().unwrap_or_default(),
format!("{}", session.req_header().uri),
"".to_string(),
))
}

fn response_cache_filter(
&self,
session: &Session,
_session: &Session,
resp: &ResponseHeader,
_ctx: &mut Self::CTX,
) -> pingora::Result<RespCacheable> {
if !session.cache.enabled() {
return Ok(RespCacheable::Uncacheable(NoCacheReason::Custom("default")));
}
let cc = CacheControl::from_resp_headers(resp);
if let Some(c) = &cc {
if c.no_cache() || c.no_store() || c.private() {
return Ok(RespCacheable::Uncacheable(NoCacheReason::OriginNotCache));
}
}

Ok(resp_cacheable(cc.as_ref(), resp, false, &META_DEFAULTS))
}

Expand Down
4 changes: 2 additions & 2 deletions src/state/ctx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ pub struct State {
pub client_ip: Option<String>,
pub guard: Option<Guard>,
pub request_id: Option<String>,
pub cache_namespace: Option<String>,
pub cache_prefix: Option<String>,
pub cache_lock_duration: Option<Duration>,
}

Expand All @@ -46,7 +46,7 @@ impl Default for State {
client_ip: None,
guard: None,
request_id: None,
cache_namespace: None,
cache_prefix: None,
cache_lock_duration: None,
}
}
Expand Down

0 comments on commit 080e2fb

Please sign in to comment.