diff --git a/.idea/codeStyles/Project.xml b/.idea/codeStyles/Project.xml
index aaa48772..d69494f0 100644
--- a/.idea/codeStyles/Project.xml
+++ b/.idea/codeStyles/Project.xml
@@ -7,7 +7,74 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/normalized-cache-incubating/api/normalized-cache-incubating.api b/normalized-cache-incubating/api/normalized-cache-incubating.api
index f35bccf1..53a1f569 100644
--- a/normalized-cache-incubating/api/normalized-cache-incubating.api
+++ b/normalized-cache-incubating/api/normalized-cache-incubating.api
@@ -135,12 +135,13 @@ public final class com/apollographql/cache/normalized/VersionKt {
}
public final class com/apollographql/cache/normalized/api/ApolloCacheHeaders {
- public static final field DATE Ljava/lang/String;
public static final field DO_NOT_STORE Ljava/lang/String;
public static final field EVICT_AFTER_READ Ljava/lang/String;
+ public static final field EXPIRATION_DATE Ljava/lang/String;
public static final field INSTANCE Lcom/apollographql/cache/normalized/api/ApolloCacheHeaders;
public static final field MAX_STALE Ljava/lang/String;
public static final field MEMORY_CACHE_ONLY Ljava/lang/String;
+ public static final field RECEIVED_DATE Ljava/lang/String;
}
public abstract interface class com/apollographql/cache/normalized/api/CacheData {
@@ -252,7 +253,7 @@ public final class com/apollographql/cache/normalized/api/DefaultFieldKeyGenerat
public final class com/apollographql/cache/normalized/api/DefaultRecordMerger : com/apollographql/cache/normalized/api/RecordMerger {
public static final field INSTANCE Lcom/apollographql/cache/normalized/api/DefaultRecordMerger;
- public fun merge (Lcom/apollographql/cache/normalized/api/Record;Lcom/apollographql/cache/normalized/api/Record;Ljava/lang/Long;)Lkotlin/Pair;
+ public fun merge (Lcom/apollographql/cache/normalized/api/Record;Lcom/apollographql/cache/normalized/api/Record;)Lkotlin/Pair;
}
public final class com/apollographql/cache/normalized/api/EmbeddedFieldsContext {
@@ -292,7 +293,7 @@ public final class com/apollographql/cache/normalized/api/FieldPolicyCacheResolv
public final class com/apollographql/cache/normalized/api/FieldRecordMerger : com/apollographql/cache/normalized/api/RecordMerger {
public fun (Lcom/apollographql/cache/normalized/api/FieldRecordMerger$FieldMerger;)V
- public fun merge (Lcom/apollographql/cache/normalized/api/Record;Lcom/apollographql/cache/normalized/api/Record;Ljava/lang/Long;)Lkotlin/Pair;
+ public fun merge (Lcom/apollographql/cache/normalized/api/Record;Lcom/apollographql/cache/normalized/api/Record;)Lkotlin/Pair;
}
public final class com/apollographql/cache/normalized/api/FieldRecordMerger$FieldInfo {
@@ -399,7 +400,7 @@ public final class com/apollographql/cache/normalized/api/Record : java/util/Map
public static final field Companion Lcom/apollographql/cache/normalized/api/Record$Companion;
public fun (Ljava/lang/String;Ljava/util/Map;Ljava/util/UUID;)V
public synthetic fun (Ljava/lang/String;Ljava/util/Map;Ljava/util/UUID;ILkotlin/jvm/internal/DefaultConstructorMarker;)V
- public fun (Ljava/lang/String;Ljava/util/Map;Ljava/util/UUID;Ljava/util/Map;Ljava/util/Map;)V
+ public fun (Ljava/lang/String;Ljava/util/Map;Ljava/util/UUID;Ljava/util/Map;)V
public fun clear ()V
public synthetic fun compute (Ljava/lang/Object;Ljava/util/function/BiFunction;)Ljava/lang/Object;
public fun compute (Ljava/lang/String;Ljava/util/function/BiFunction;)Ljava/lang/Object;
@@ -414,7 +415,6 @@ public final class com/apollographql/cache/normalized/api/Record : java/util/Map
public final fun fieldKeys ()Ljava/util/Set;
public final fun get (Ljava/lang/Object;)Ljava/lang/Object;
public fun get (Ljava/lang/String;)Ljava/lang/Object;
- public final fun getDates ()Ljava/util/Map;
public fun getEntries ()Ljava/util/Set;
public final fun getFields ()Ljava/util/Map;
public final fun getKey ()Ljava/lang/String;
@@ -449,8 +449,14 @@ public final class com/apollographql/cache/normalized/api/Record : java/util/Map
public final class com/apollographql/cache/normalized/api/Record$Companion {
}
+public final class com/apollographql/cache/normalized/api/RecordKt {
+ public static final fun expirationDate (Lcom/apollographql/cache/normalized/api/Record;Ljava/lang/String;)Ljava/lang/Long;
+ public static final fun receivedDate (Lcom/apollographql/cache/normalized/api/Record;Ljava/lang/String;)Ljava/lang/Long;
+ public static final fun withDates (Lcom/apollographql/cache/normalized/api/Record;Ljava/lang/String;Ljava/lang/String;)Lcom/apollographql/cache/normalized/api/Record;
+}
+
public abstract interface class com/apollographql/cache/normalized/api/RecordMerger {
- public abstract fun merge (Lcom/apollographql/cache/normalized/api/Record;Lcom/apollographql/cache/normalized/api/Record;Ljava/lang/Long;)Lkotlin/Pair;
+ public abstract fun merge (Lcom/apollographql/cache/normalized/api/Record;Lcom/apollographql/cache/normalized/api/Record;)Lkotlin/Pair;
}
public final class com/apollographql/cache/normalized/api/RecordMergerKt {
diff --git a/normalized-cache-incubating/api/normalized-cache-incubating.klib.api b/normalized-cache-incubating/api/normalized-cache-incubating.klib.api
index aec381a0..606343db 100644
--- a/normalized-cache-incubating/api/normalized-cache-incubating.klib.api
+++ b/normalized-cache-incubating/api/normalized-cache-incubating.klib.api
@@ -62,7 +62,7 @@ abstract interface com.apollographql.cache.normalized.api/ReadOnlyNormalizedCach
abstract fun loadRecords(kotlin.collections/Collection, com.apollographql.cache.normalized.api/CacheHeaders): kotlin.collections/Collection // com.apollographql.cache.normalized.api/ReadOnlyNormalizedCache.loadRecords|loadRecords(kotlin.collections.Collection;com.apollographql.cache.normalized.api.CacheHeaders){}[0]
}
abstract interface com.apollographql.cache.normalized.api/RecordMerger { // com.apollographql.cache.normalized.api/RecordMerger|null[0]
- abstract fun merge(com.apollographql.cache.normalized.api/Record, com.apollographql.cache.normalized.api/Record, kotlin/Long?): kotlin/Pair> // com.apollographql.cache.normalized.api/RecordMerger.merge|merge(com.apollographql.cache.normalized.api.Record;com.apollographql.cache.normalized.api.Record;kotlin.Long?){}[0]
+ abstract fun merge(com.apollographql.cache.normalized.api/Record, com.apollographql.cache.normalized.api/Record): kotlin/Pair> // com.apollographql.cache.normalized.api/RecordMerger.merge|merge(com.apollographql.cache.normalized.api.Record;com.apollographql.cache.normalized.api.Record){}[0]
}
abstract interface com.apollographql.cache.normalized/ApolloStore { // com.apollographql.cache.normalized/ApolloStore|null[0]
abstract fun <#A1: com.apollographql.apollo.api/Fragment.Data> readFragment(com.apollographql.apollo.api/Fragment<#A1>, com.apollographql.cache.normalized.api/CacheKey, com.apollographql.apollo.api/CustomScalarAdapters = ..., com.apollographql.cache.normalized.api/CacheHeaders = ...): #A1 // com.apollographql.cache.normalized/ApolloStore.readFragment|readFragment(com.apollographql.apollo.api.Fragment<0:0>;com.apollographql.cache.normalized.api.CacheKey;com.apollographql.apollo.api.CustomScalarAdapters;com.apollographql.cache.normalized.api.CacheHeaders){0§}[0]
@@ -204,7 +204,7 @@ final class com.apollographql.cache.normalized.api/FieldRecordMerger : com.apoll
final val value // com.apollographql.cache.normalized.api/FieldRecordMerger.FieldInfo.value|{}value[0]
final fun (): kotlin/Any? // com.apollographql.cache.normalized.api/FieldRecordMerger.FieldInfo.value.|(){}[0]
}
- final fun merge(com.apollographql.cache.normalized.api/Record, com.apollographql.cache.normalized.api/Record, kotlin/Long?): kotlin/Pair> // com.apollographql.cache.normalized.api/FieldRecordMerger.merge|merge(com.apollographql.cache.normalized.api.Record;com.apollographql.cache.normalized.api.Record;kotlin.Long?){}[0]
+ final fun merge(com.apollographql.cache.normalized.api/Record, com.apollographql.cache.normalized.api/Record): kotlin/Pair> // com.apollographql.cache.normalized.api/FieldRecordMerger.merge|merge(com.apollographql.cache.normalized.api.Record;com.apollographql.cache.normalized.api.Record){}[0]
}
final class com.apollographql.cache.normalized.api/MemoryCache : com.apollographql.cache.normalized.api/NormalizedCache { // com.apollographql.cache.normalized.api/MemoryCache|null[0]
constructor (com.apollographql.cache.normalized.api/NormalizedCache? = ..., kotlin/Int = ..., kotlin/Long = ...) // com.apollographql.cache.normalized.api/MemoryCache.|(com.apollographql.cache.normalized.api.NormalizedCache?;kotlin.Int;kotlin.Long){}[0]
@@ -239,7 +239,7 @@ final class com.apollographql.cache.normalized.api/ReceiveDateCacheResolver : co
}
final class com.apollographql.cache.normalized.api/Record : kotlin.collections/Map { // com.apollographql.cache.normalized.api/Record|null[0]
constructor (kotlin/String, kotlin.collections/Map, com.benasher44.uuid/Uuid? = ...) // com.apollographql.cache.normalized.api/Record.|(kotlin.String;kotlin.collections.Map;com.benasher44.uuid.Uuid?){}[0]
- constructor (kotlin/String, kotlin.collections/Map, com.benasher44.uuid/Uuid?, kotlin.collections/Map, kotlin.collections/Map>) // com.apollographql.cache.normalized.api/Record.|(kotlin.String;kotlin.collections.Map;com.benasher44.uuid.Uuid?;kotlin.collections.Map;kotlin.collections.Map>){}[0]
+ constructor (kotlin/String, kotlin.collections/Map, com.benasher44.uuid/Uuid?, kotlin.collections/Map>) // com.apollographql.cache.normalized.api/Record.|(kotlin.String;kotlin.collections.Map;com.benasher44.uuid.Uuid?;kotlin.collections.Map>){}[0]
final fun containsKey(kotlin/String): kotlin/Boolean // com.apollographql.cache.normalized.api/Record.containsKey|containsKey(kotlin.String){}[0]
final fun containsValue(kotlin/Any?): kotlin/Boolean // com.apollographql.cache.normalized.api/Record.containsValue|containsValue(kotlin.Any?){}[0]
final fun fieldKeys(): kotlin.collections/Set // com.apollographql.cache.normalized.api/Record.fieldKeys|fieldKeys(){}[0]
@@ -264,8 +264,6 @@ final class com.apollographql.cache.normalized.api/Record : kotlin.collections/M
final fun (): kotlin/Int // com.apollographql.cache.normalized.api/Record.sizeInBytes.|(){}[0]
final val values // com.apollographql.cache.normalized.api/Record.values|{}values[0]
final fun (): kotlin.collections/Collection // com.apollographql.cache.normalized.api/Record.values.|(){}[0]
- final var dates // com.apollographql.cache.normalized.api/Record.dates|{}dates[0]
- final fun (): kotlin.collections/Map // com.apollographql.cache.normalized.api/Record.dates.|(){}[0]
final var metadata // com.apollographql.cache.normalized.api/Record.metadata|{}metadata[0]
final fun (): kotlin.collections/Map> // com.apollographql.cache.normalized.api/Record.metadata.|(){}[0]
// Targets: [js]
@@ -350,6 +348,9 @@ final enum class com.apollographql.cache.normalized/FetchPolicy : kotlin/Enum = ...): com.apollographql.apollo/ApolloClient.Builder // com.apollographql.cache.normalized/logCacheMisses|logCacheMisses@com.apollographql.apollo.ApolloClient.Builder(kotlin.Function1){}[0]
final fun (com.apollographql.apollo/ApolloClient.Builder).com.apollographql.cache.normalized/normalizedCache(com.apollographql.cache.normalized.api/NormalizedCacheFactory, com.apollographql.cache.normalized.api/CacheKeyGenerator = ..., com.apollographql.cache.normalized.api/MetadataGenerator = ..., com.apollographql.cache.normalized.api/CacheResolver = ..., com.apollographql.cache.normalized.api/RecordMerger = ..., com.apollographql.cache.normalized.api/FieldKeyGenerator = ..., com.apollographql.cache.normalized.api/EmbeddedFieldsProvider = ..., kotlin/Boolean = ...): com.apollographql.apollo/ApolloClient.Builder // com.apollographql.cache.normalized/normalizedCache|normalizedCache@com.apollographql.apollo.ApolloClient.Builder(com.apollographql.cache.normalized.api.NormalizedCacheFactory;com.apollographql.cache.normalized.api.CacheKeyGenerator;com.apollographql.cache.normalized.api.MetadataGenerator;com.apollographql.cache.normalized.api.CacheResolver;com.apollographql.cache.normalized.api.RecordMerger;com.apollographql.cache.normalized.api.FieldKeyGenerator;com.apollographql.cache.normalized.api.EmbeddedFieldsProvider;kotlin.Boolean){}[0]
final fun (com.apollographql.apollo/ApolloClient.Builder).com.apollographql.cache.normalized/store(com.apollographql.cache.normalized/ApolloStore, kotlin/Boolean = ...): com.apollographql.apollo/ApolloClient.Builder // com.apollographql.cache.normalized/store|store@com.apollographql.apollo.ApolloClient.Builder(com.apollographql.cache.normalized.ApolloStore;kotlin.Boolean){}[0]
+final fun (com.apollographql.cache.normalized.api/Record).com.apollographql.cache.normalized.api/expirationDate(kotlin/String): kotlin/Long? // com.apollographql.cache.normalized.api/expirationDate|expirationDate@com.apollographql.cache.normalized.api.Record(kotlin.String){}[0]
+final fun (com.apollographql.cache.normalized.api/Record).com.apollographql.cache.normalized.api/receivedDate(kotlin/String): kotlin/Long? // com.apollographql.cache.normalized.api/receivedDate|receivedDate@com.apollographql.cache.normalized.api.Record(kotlin.String){}[0]
+final fun (com.apollographql.cache.normalized.api/Record).com.apollographql.cache.normalized.api/withDates(kotlin/String?, kotlin/String?): com.apollographql.cache.normalized.api/Record // com.apollographql.cache.normalized.api/withDates|withDates@com.apollographql.cache.normalized.api.Record(kotlin.String?;kotlin.String?){}[0]
final fun (kotlin.collections/Collection?).com.apollographql.cache.normalized.api/dependentKeys(): kotlin.collections/Set // com.apollographql.cache.normalized.api/dependentKeys|dependentKeys@kotlin.collections.Collection?(){}[0]
final fun <#A: com.apollographql.apollo.api/Executable.Data> (com.apollographql.apollo.api/Executable<#A>).com.apollographql.cache.normalized.api/normalize(#A, com.apollographql.apollo.api/CustomScalarAdapters, com.apollographql.cache.normalized.api/CacheKeyGenerator, com.apollographql.cache.normalized.api/MetadataGenerator = ..., com.apollographql.cache.normalized.api/FieldKeyGenerator = ..., com.apollographql.cache.normalized.api/EmbeddedFieldsProvider = ..., kotlin/String): kotlin.collections/Map // com.apollographql.cache.normalized.api/normalize|normalize@com.apollographql.apollo.api.Executable<0:0>(0:0;com.apollographql.apollo.api.CustomScalarAdapters;com.apollographql.cache.normalized.api.CacheKeyGenerator;com.apollographql.cache.normalized.api.MetadataGenerator;com.apollographql.cache.normalized.api.FieldKeyGenerator;com.apollographql.cache.normalized.api.EmbeddedFieldsProvider;kotlin.String){0§}[0]
final fun <#A: com.apollographql.apollo.api/Executable.Data> (com.apollographql.apollo.api/Executable<#A>).com.apollographql.cache.normalized.api/readDataFromCache(com.apollographql.apollo.api/CustomScalarAdapters, com.apollographql.cache.normalized.api/ReadOnlyNormalizedCache, com.apollographql.cache.normalized.api/CacheResolver, com.apollographql.cache.normalized.api/CacheHeaders, com.apollographql.cache.normalized.api/FieldKeyGenerator = ...): #A // com.apollographql.cache.normalized.api/readDataFromCache|readDataFromCache@com.apollographql.apollo.api.Executable<0:0>(com.apollographql.apollo.api.CustomScalarAdapters;com.apollographql.cache.normalized.api.ReadOnlyNormalizedCache;com.apollographql.cache.normalized.api.CacheResolver;com.apollographql.cache.normalized.api.CacheHeaders;com.apollographql.cache.normalized.api.FieldKeyGenerator){0§}[0]
@@ -383,16 +384,18 @@ final object com.apollographql.cache.normalized.api.internal/BlobRecordSerialize
final fun serialize(com.apollographql.cache.normalized.api/Record): kotlin/ByteArray // com.apollographql.cache.normalized.api.internal/BlobRecordSerializer.serialize|serialize(com.apollographql.cache.normalized.api.Record){}[0]
}
final object com.apollographql.cache.normalized.api/ApolloCacheHeaders { // com.apollographql.cache.normalized.api/ApolloCacheHeaders|null[0]
- final const val DATE // com.apollographql.cache.normalized.api/ApolloCacheHeaders.DATE|{}DATE[0]
- final fun (): kotlin/String // com.apollographql.cache.normalized.api/ApolloCacheHeaders.DATE.|(){}[0]
final const val DO_NOT_STORE // com.apollographql.cache.normalized.api/ApolloCacheHeaders.DO_NOT_STORE|{}DO_NOT_STORE[0]
final fun (): kotlin/String // com.apollographql.cache.normalized.api/ApolloCacheHeaders.DO_NOT_STORE.|(){}[0]
final const val EVICT_AFTER_READ // com.apollographql.cache.normalized.api/ApolloCacheHeaders.EVICT_AFTER_READ|{}EVICT_AFTER_READ[0]
final fun (): kotlin/String // com.apollographql.cache.normalized.api/ApolloCacheHeaders.EVICT_AFTER_READ.|(){}[0]
+ final const val EXPIRATION_DATE // com.apollographql.cache.normalized.api/ApolloCacheHeaders.EXPIRATION_DATE|{}EXPIRATION_DATE[0]
+ final fun (): kotlin/String // com.apollographql.cache.normalized.api/ApolloCacheHeaders.EXPIRATION_DATE.|(){}[0]
final const val MAX_STALE // com.apollographql.cache.normalized.api/ApolloCacheHeaders.MAX_STALE|{}MAX_STALE[0]
final fun (): kotlin/String // com.apollographql.cache.normalized.api/ApolloCacheHeaders.MAX_STALE.|(){}[0]
final const val MEMORY_CACHE_ONLY // com.apollographql.cache.normalized.api/ApolloCacheHeaders.MEMORY_CACHE_ONLY|{}MEMORY_CACHE_ONLY[0]
final fun (): kotlin/String // com.apollographql.cache.normalized.api/ApolloCacheHeaders.MEMORY_CACHE_ONLY.|(){}[0]
+ final const val RECEIVED_DATE // com.apollographql.cache.normalized.api/ApolloCacheHeaders.RECEIVED_DATE|{}RECEIVED_DATE[0]
+ final fun (): kotlin/String // com.apollographql.cache.normalized.api/ApolloCacheHeaders.RECEIVED_DATE.|(){}[0]
}
final object com.apollographql.cache.normalized.api/DefaultCacheResolver : com.apollographql.cache.normalized.api/CacheResolver { // com.apollographql.cache.normalized.api/DefaultCacheResolver|null[0]
final fun resolveField(com.apollographql.cache.normalized.api/ResolverContext): kotlin/Any? // com.apollographql.cache.normalized.api/DefaultCacheResolver.resolveField|resolveField(com.apollographql.cache.normalized.api.ResolverContext){}[0]
@@ -404,7 +407,7 @@ final object com.apollographql.cache.normalized.api/DefaultFieldKeyGenerator : c
final fun getFieldKey(com.apollographql.cache.normalized.api/FieldKeyContext): kotlin/String // com.apollographql.cache.normalized.api/DefaultFieldKeyGenerator.getFieldKey|getFieldKey(com.apollographql.cache.normalized.api.FieldKeyContext){}[0]
}
final object com.apollographql.cache.normalized.api/DefaultRecordMerger : com.apollographql.cache.normalized.api/RecordMerger { // com.apollographql.cache.normalized.api/DefaultRecordMerger|null[0]
- final fun merge(com.apollographql.cache.normalized.api/Record, com.apollographql.cache.normalized.api/Record, kotlin/Long?): kotlin/Pair> // com.apollographql.cache.normalized.api/DefaultRecordMerger.merge|merge(com.apollographql.cache.normalized.api.Record;com.apollographql.cache.normalized.api.Record;kotlin.Long?){}[0]
+ final fun merge(com.apollographql.cache.normalized.api/Record, com.apollographql.cache.normalized.api/Record): kotlin/Pair> // com.apollographql.cache.normalized.api/DefaultRecordMerger.merge|merge(com.apollographql.cache.normalized.api.Record;com.apollographql.cache.normalized.api.Record){}[0]
}
final object com.apollographql.cache.normalized.api/EmptyMetadataGenerator : com.apollographql.cache.normalized.api/MetadataGenerator { // com.apollographql.cache.normalized.api/EmptyMetadataGenerator|null[0]
final fun metadataForObject(kotlin/Any?, com.apollographql.cache.normalized.api/MetadataGeneratorContext): kotlin.collections/Map // com.apollographql.cache.normalized.api/EmptyMetadataGenerator.metadataForObject|metadataForObject(kotlin.Any?;com.apollographql.cache.normalized.api.MetadataGeneratorContext){}[0]
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ClientCacheExtensions.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ClientCacheExtensions.kt
index b9396798..bc28ba7f 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ClientCacheExtensions.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ClientCacheExtensions.kt
@@ -386,7 +386,7 @@ private class StoreExpirationInterceptor : ApolloInterceptor {
return@map it.newBuilder()
.cacheHeaders(
it.cacheHeaders.newBuilder()
- .addHeader(ApolloCacheHeaders.DATE, expires.toString())
+ .addHeader(ApolloCacheHeaders.EXPIRATION_DATE, expires.toString())
.build()
)
.build()
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/ApolloCacheHeaders.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/ApolloCacheHeaders.kt
index c487d47b..c7cc29ce 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/ApolloCacheHeaders.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/ApolloCacheHeaders.kt
@@ -20,9 +20,14 @@ object ApolloCacheHeaders {
const val EVICT_AFTER_READ = "evict-after-read"
/**
- * The value of this header will be stored in the [Record] fields date
+ * The value of this header will be stored in the [Record]'s received date.
*/
- const val DATE = "apollo-date"
+ const val RECEIVED_DATE = "apollo-received-date"
+
+ /**
+ * The value of this header will be stored in the [Record]'s expiration date.
+ */
+ const val EXPIRATION_DATE = "apollo-expiration-date"
/**
* How long to accept stale fields
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/CacheResolver.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/CacheResolver.kt
index 22401bfb..ffbfc8c6 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/CacheResolver.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/CacheResolver.kt
@@ -134,11 +134,11 @@ class ReceiveDateCacheResolver(private val maxAge: Int) : CacheResolver {
}
if (parent is Record) {
- val lastUpdated = parent.dates?.get(fieldKey)
- if (lastUpdated != null) {
+ val receivedDate = parent.receivedDate(fieldKey)
+ if (receivedDate != null) {
val maxStale = context.cacheHeaders.headerValue(ApolloCacheHeaders.MAX_STALE)?.toLongOrNull() ?: 0L
if (maxStale < Long.MAX_VALUE) {
- val age = currentTimeMillis() / 1000 - lastUpdated
+ val age = currentTimeMillis() / 1000 - receivedDate
if (maxAge + maxStale - age < 0) {
throw CacheMissException(parentKey, fieldKey, true)
}
@@ -164,9 +164,9 @@ class ExpireDateCacheResolver : CacheResolver {
}
if (parent is Record) {
- val expires = parent.dates?.get(fieldKey)
- if (expires != null) {
- if (currentTimeMillis() / 1000 - expires >= 0) {
+ val expirationDate = parent.expirationDate(fieldKey)
+ if (expirationDate != null) {
+ if (currentTimeMillis() / 1000 - expirationDate >= 0) {
throw CacheMissException(parentKey, fieldKey, true)
}
}
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/MemoryCache.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/MemoryCache.kt
index 993b53c0..1aab98f1 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/MemoryCache.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/MemoryCache.kt
@@ -123,14 +123,15 @@ class MemoryCache(
}
private fun internalMerge(record: Record, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set {
+ val receivedDate = cacheHeaders.headerValue(ApolloCacheHeaders.RECEIVED_DATE)
+ val expirationDate = cacheHeaders.headerValue(ApolloCacheHeaders.EXPIRATION_DATE)
val oldRecord = loadRecord(record.key, cacheHeaders)
- val date = cacheHeaders.date()
val changedKeys = if (oldRecord == null) {
- lruCache[record.key] = record.withDate(date)
+ lruCache[record.key] = record.withDates(receivedDate = receivedDate, expirationDate = expirationDate)
record.fieldKeys()
} else {
- val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record, newDate = date)
- lruCache[record.key] = mergedRecord
+ val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record)
+ lruCache[record.key] = mergedRecord.withDates(receivedDate = receivedDate, expirationDate = expirationDate)
changedKeys
}
return changedKeys
@@ -168,20 +169,3 @@ class MemoryCacheFactory @JvmOverloads constructor(
)
}
}
-
-private fun CacheHeaders.date(): Long? {
- return headerValue(ApolloCacheHeaders.DATE)?.toLong()
-}
-
-private fun Record.withDate(date: Long?): Record {
- if (date == null) {
- return this
- }
- return Record(
- key = key,
- fields = fields,
- mutationId = mutationId,
- dates = fields.mapValues { date },
- metadata = metadata
- )
-}
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/NormalizedCache.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/NormalizedCache.kt
index 1b062461..931f6524 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/NormalizedCache.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/NormalizedCache.kt
@@ -92,10 +92,6 @@ interface NormalizedCache : ReadOnlyNormalizedCache {
append("metadata: ")
append(metadata.prettifyDump(level + 1))
append("\n")
- indent(level + 1)
- append("dates: ")
- append(dates.prettifyDump(level + 1))
- append("\n")
indent(level)
append("}")
}
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/Record.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/Record.kt
index bf7b8e80..838d0708 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/Record.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/Record.kt
@@ -1,6 +1,7 @@
package com.apollographql.cache.normalized.api
import com.apollographql.apollo.annotations.ApolloInternal
+import com.apollographql.apollo.api.json.ApolloJsonElement
import com.apollographql.cache.normalized.api.internal.RecordWeigher.calculateBytes
import com.benasher44.uuid.Uuid
@@ -26,13 +27,10 @@ class Record(
val mutationId: Uuid? = null,
) : Map by fields {
- var dates: Map = emptyMap()
- private set
-
/**
* Arbitrary metadata that can be attached to each field.
*/
- var metadata: Map> = emptyMap()
+ var metadata: Map> = emptyMap()
private set
@ApolloInternal
@@ -40,10 +38,8 @@ class Record(
key: String,
fields: Map,
mutationId: Uuid?,
- dates: Map,
- metadata: Map>,
+ metadata: Map>,
) : this(key, fields, mutationId) {
- this.dates = dates
this.metadata = metadata
}
@@ -57,7 +53,7 @@ class Record(
* A field key incorporates any GraphQL arguments in addition to the field name.
*/
fun mergeWith(newRecord: Record): Pair> {
- return DefaultRecordMerger.merge(existing = this, incoming = newRecord, newDate = null)
+ return DefaultRecordMerger.merge(existing = this, incoming = newRecord)
}
@@ -102,3 +98,31 @@ class Record(
}
}
}
+
+@ApolloInternal
+fun Record.withDates(receivedDate: String?, expirationDate: String?): Record {
+ if (receivedDate == null && expirationDate == null) {
+ return this
+ }
+ return Record(
+ key = key,
+ fields = fields,
+ mutationId = mutationId,
+ metadata = metadata + fields.mapValues { (key, _) ->
+ metadata[key].orEmpty() + buildMap {
+ receivedDate?.let {
+ put(ApolloCacheHeaders.RECEIVED_DATE, it.toLong())
+ }
+ expirationDate?.let {
+ put(ApolloCacheHeaders.EXPIRATION_DATE, it.toLong())
+ }
+ }
+ }
+ )
+}
+
+@ApolloInternal
+fun Record.receivedDate(field: String) = metadata[field]?.get(ApolloCacheHeaders.RECEIVED_DATE) as? Long
+
+@ApolloInternal
+fun Record.expirationDate(field: String) = metadata[field]?.get(ApolloCacheHeaders.EXPIRATION_DATE) as? Long
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/RecordMerger.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/RecordMerger.kt
index b1258055..021dcbdb 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/RecordMerger.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/RecordMerger.kt
@@ -10,20 +10,18 @@ interface RecordMerger {
/**
* Merges the incoming Record with the existing Record.
*
- * @param newDate optional date to associate with the fields of the resulting merged Record. If null, a date will not be set.
* @return a pair of the resulting merged Record and a set of field keys which have changed or were added.
*/
- fun merge(existing: Record, incoming: Record, newDate: Long?): Pair>
+ fun merge(existing: Record, incoming: Record): Pair>
}
/**
* A [RecordMerger] that merges fields by replacing them with the incoming fields.
*/
object DefaultRecordMerger : RecordMerger {
- override fun merge(existing: Record, incoming: Record, newDate: Long?): Pair> {
+ override fun merge(existing: Record, incoming: Record): Pair> {
val changedKeys = mutableSetOf()
val mergedFields = existing.fields.toMutableMap()
- val date = existing.dates?.toMutableMap() ?: mutableMapOf()
for ((fieldKey, incomingFieldValue) in incoming.fields) {
val hasExistingFieldValue = existing.fields.containsKey(fieldKey)
@@ -32,22 +30,25 @@ object DefaultRecordMerger : RecordMerger {
mergedFields[fieldKey] = incomingFieldValue
changedKeys.add("${existing.key}.$fieldKey")
}
- // Update the date even if the value did not change
- if (newDate != null) {
- date[fieldKey] = newDate
- }
}
return Record(
key = existing.key,
fields = mergedFields,
mutationId = incoming.mutationId,
- dates = date,
- metadata = existing.metadata + incoming.metadata,
+ metadata = existing.metadata.mergedWith(incoming.metadata),
) to changedKeys
}
}
+private fun Map>.mergedWith(incoming: Map>): Map> {
+ return toMutableMap().also { existing ->
+ for ((incomingField, incomingMetadataForField) in incoming) {
+ existing[incomingField] = existing[incomingField].orEmpty() + incomingMetadataForField
+ }
+ }
+}
+
/**
* A convenience implementation of [RecordMerger] that simplifies the merging of [Record]s by delegating to a [FieldMerger].
*/
@@ -68,7 +69,7 @@ class FieldRecordMerger(private val fieldMerger: FieldMerger) : RecordMerger {
/**
* Value of the field being merged.
*/
- val value: ApolloJsonElement,
+ val value: Any?,
/**
* Metadata attached to the field being merged. See also [Record.metadata] and [MetadataGenerator].
@@ -76,11 +77,10 @@ class FieldRecordMerger(private val fieldMerger: FieldMerger) : RecordMerger {
val metadata: Map,
)
- override fun merge(existing: Record, incoming: Record, newDate: Long?): Pair> {
+ override fun merge(existing: Record, incoming: Record): Pair> {
val changedKeys = mutableSetOf()
val mergedFields = existing.fields.toMutableMap()
val mergedMetadata = existing.metadata.toMutableMap()
- val date = existing.dates?.toMutableMap() ?: mutableMapOf()
for ((fieldKey, incomingFieldValue) in incoming.fields) {
val hasExistingFieldValue = existing.fields.containsKey(fieldKey)
@@ -105,17 +105,12 @@ class FieldRecordMerger(private val fieldMerger: FieldMerger) : RecordMerger {
changedKeys.add("${existing.key}.$fieldKey")
}
- // Update the date even if the value did not change
- if (newDate != null) {
- date[fieldKey] = newDate
- }
}
return Record(
key = existing.key,
fields = mergedFields,
mutationId = incoming.mutationId,
- dates = date,
metadata = mergedMetadata,
) to changedKeys
}
@@ -153,17 +148,17 @@ private object ConnectionFieldMerger : FieldMerger {
// Incoming is empty
existing
} else {
- val existingValue = existing.value as Map
+ val existingValue = existing.value as Map
val existingEdges = existingValue["edges"] as? List<*>
val existingNodes = existingValue["nodes"] as? List<*>
- val existingPageInfo = existingValue["pageInfo"] as? Map
+ val existingPageInfo = existingValue["pageInfo"] as? Map
val existingHasPreviousPage = existingPageInfo?.get("hasPreviousPage") as? Boolean
val existingHasNextPage = existingPageInfo?.get("hasNextPage") as? Boolean
- val incomingValue = incoming.value as Map
+ val incomingValue = incoming.value as Map
val incomingEdges = incomingValue["edges"] as? List<*>
val incomingNodes = incomingValue["nodes"] as? List<*>
- val incomingPageInfo = incomingValue["pageInfo"] as? Map
+ val incomingPageInfo = incomingValue["pageInfo"] as? Map
val incomingHasPreviousPage = incomingPageInfo?.get("hasPreviousPage") as? Boolean
val incomingHasNextPage = incomingPageInfo?.get("hasNextPage") as? Boolean
@@ -234,7 +229,7 @@ private object ConnectionFieldMerger : FieldMerger {
FieldRecordMerger.FieldInfo(
value = mergedValue,
- metadata = mapOf("startCursor" to mergedStartCursor, "endCursor" to mergedEndCursor)
+ metadata = existing.metadata + incoming.metadata + mapOf("startCursor" to mergedStartCursor, "endCursor" to mergedEndCursor)
)
}
}
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/BlobRecordSerializer.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/BlobRecordSerializer.kt
index 717efa5b..af7ed2e7 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/BlobRecordSerializer.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/BlobRecordSerializer.kt
@@ -38,7 +38,6 @@ object BlobRecordSerializer {
buffer.writeInt(keys.size)
for (key in keys) {
buffer.writeString(key)
- buffer.writeAny(record.dates?.get(key))
buffer.writeAny(record.fields[key])
}
@@ -57,16 +56,14 @@ object BlobRecordSerializer {
val metadata = buffer.readAny() as Map>
val fields = mutableMapOf()
- val dates = mutableMapOf()
val size = buffer.readInt()
for (i in 0.until(size)) {
val name = buffer.readString()
- dates[name] = buffer.readAny() as Long?
fields[name] = buffer.readAny()
}
- return Record(key, fields, null, dates, metadata)
+ return Record(key, fields, null, metadata)
}
private fun Buffer.writeString(value: String) {
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/Normalizer.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/Normalizer.kt
index c1206fc1..cda97dfb 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/Normalizer.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/Normalizer.kt
@@ -126,7 +126,6 @@ internal class Normalizer(
key = key,
fields = fieldValues,
mutationId = null,
- dates = emptyMap(),
metadata = metadata,
)
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/RecordWeigher.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/RecordWeigher.kt
index 1d4bbe8f..0677da7e 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/RecordWeigher.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/api/internal/RecordWeigher.kt
@@ -27,7 +27,6 @@ internal object RecordWeigher {
size += key.length + weighField(value)
}
size += weighField(record.metadata)
- size += weighField(record.dates)
return size
}
diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/ApolloCacheInterceptor.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/ApolloCacheInterceptor.kt
index a6716b92..10690708 100644
--- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/ApolloCacheInterceptor.kt
+++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/ApolloCacheInterceptor.kt
@@ -77,7 +77,7 @@ internal class ApolloCacheInterceptor(
val cacheKeys = if (response.data != null) {
var cacheHeaders = request.cacheHeaders + response.cacheHeaders
if (request.storeReceiveDate) {
- cacheHeaders += nowDateCacheHeaders()
+ cacheHeaders += nowReceiveDateCacheHeaders()
}
if (request.memoryCacheOnly) {
cacheHeaders += CacheHeaders.Builder().addHeader(ApolloCacheHeaders.MEMORY_CACHE_ONLY, "true").build()
@@ -276,8 +276,8 @@ internal class ApolloCacheInterceptor(
}
companion object {
- private fun nowDateCacheHeaders(): CacheHeaders {
- return CacheHeaders.Builder().addHeader(ApolloCacheHeaders.DATE, (currentTimeMillis() / 1000).toString()).build()
+ private fun nowReceiveDateCacheHeaders(): CacheHeaders {
+ return CacheHeaders.Builder().addHeader(ApolloCacheHeaders.RECEIVED_DATE, (currentTimeMillis() / 1000).toString()).build()
}
}
}
diff --git a/normalized-cache-incubating/src/commonTest/kotlin/com/apollographql/cache/normalized/DefaultRecordMergerTest.kt b/normalized-cache-incubating/src/commonTest/kotlin/com/apollographql/cache/normalized/DefaultRecordMergerTest.kt
new file mode 100644
index 00000000..929d945e
--- /dev/null
+++ b/normalized-cache-incubating/src/commonTest/kotlin/com/apollographql/cache/normalized/DefaultRecordMergerTest.kt
@@ -0,0 +1,79 @@
+package com.apollographql.cache.normalized
+
+import com.apollographql.cache.normalized.api.DefaultRecordMerger
+import com.apollographql.cache.normalized.api.Record
+import kotlin.test.Test
+import kotlin.test.assertEquals
+
+class DefaultRecordMergerTest {
+ @Test
+ fun mergeMetaData() {
+ val existing = Record(
+ key = "key",
+ fields = mapOf(
+ "field1" to "value1",
+ "field2" to "value2"
+ ),
+ mutationId = null,
+ metadata = mapOf(
+ "field1" to mapOf(
+ "field1meta1" to "field1metaValue1",
+ "field1meta2" to "field1metaValue2",
+ ),
+ "field2" to mapOf(
+ "field2meta1" to "field2metaValue1",
+ "field2meta2" to "field2metaValue2",
+ ),
+ ),
+ )
+
+ val incoming = Record(
+ key = "key",
+ fields = mapOf(
+ "field1" to "value1.incoming",
+ "field3" to "value3",
+ ),
+ mutationId = null,
+ metadata = mapOf(
+ "field1" to mapOf(
+ "field1meta1" to "field1metaValue1.incoming",
+ "field1meta3" to "field1metaValue3",
+ ),
+ "field3" to mapOf(
+ "field3meta1" to "field3metaValue1",
+ "field3meta2" to "field3metaValue2",
+ ),
+ ),
+ )
+
+ val mergedRecord = DefaultRecordMerger.merge(existing, incoming).first
+
+ val expected = Record(
+ key = "key",
+ fields = mapOf(
+ "field1" to "value1.incoming",
+ "field2" to "value2",
+ "field3" to "value3",
+ ),
+ mutationId = null,
+ metadata = mapOf(
+ "field1" to mapOf(
+ "field1meta1" to "field1metaValue1.incoming",
+ "field1meta2" to "field1metaValue2",
+ "field1meta3" to "field1metaValue3",
+ ),
+ "field2" to mapOf(
+ "field2meta1" to "field2metaValue1",
+ "field2meta2" to "field2metaValue2",
+ ),
+ "field3" to mapOf(
+ "field3meta1" to "field3metaValue1",
+ "field3meta2" to "field3metaValue2",
+ ),
+ ),
+ )
+
+ assertEquals(expected.fields, mergedRecord.fields)
+ assertEquals(expected.metadata, mergedRecord.metadata)
+ }
+}
diff --git a/normalized-cache-incubating/src/commonTest/kotlin/com/apollographql/cache/normalized/RecordWeigherTest.kt b/normalized-cache-incubating/src/commonTest/kotlin/com/apollographql/cache/normalized/RecordWeigherTest.kt
index 3e93ea05..34633d70 100644
--- a/normalized-cache-incubating/src/commonTest/kotlin/com/apollographql/cache/normalized/RecordWeigherTest.kt
+++ b/normalized-cache-incubating/src/commonTest/kotlin/com/apollographql/cache/normalized/RecordWeigherTest.kt
@@ -29,7 +29,7 @@ class RecordWeigherTest {
)
)
- assertTrue(record.sizeInBytes <= 264)
- assertTrue(record.sizeInBytes >= 258) // JS takes less space, maybe for strings?
+ assertTrue(record.sizeInBytes <= 248)
+ assertTrue(record.sizeInBytes >= 242) // JS takes less space, maybe for strings?
}
}
diff --git a/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/SqlNormalizedCache.kt b/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/SqlNormalizedCache.kt
index 5aa47ba0..fcf476b9 100644
--- a/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/SqlNormalizedCache.kt
+++ b/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/SqlNormalizedCache.kt
@@ -8,6 +8,7 @@ import com.apollographql.cache.normalized.api.CacheKey
import com.apollographql.cache.normalized.api.NormalizedCache
import com.apollographql.cache.normalized.api.Record
import com.apollographql.cache.normalized.api.RecordMerger
+import com.apollographql.cache.normalized.api.withDates
import com.apollographql.cache.normalized.sql.internal.RecordDatabase
import kotlin.reflect.KClass
@@ -87,16 +88,12 @@ class SqlNormalizedCache internal constructor(
}
}
- private fun CacheHeaders.date(): Long? {
- return headerValue(ApolloCacheHeaders.DATE)?.toLong()
- }
-
override fun merge(record: Record, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set {
if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE) || cacheHeaders.hasHeader(ApolloCacheHeaders.MEMORY_CACHE_ONLY)) {
return emptySet()
}
return try {
- internalUpdateRecord(record = record, recordMerger = recordMerger, date = cacheHeaders.date())
+ internalUpdateRecord(record = record, cacheHeaders = cacheHeaders, recordMerger = recordMerger)
} catch (e: Exception) {
// Unable to merge the record in the database, it is possibly corrupted - treat this as a cache miss
apolloExceptionHandler(Exception("Unable to merge a record from the database", e))
@@ -109,7 +106,7 @@ class SqlNormalizedCache internal constructor(
return emptySet()
}
return try {
- internalUpdateRecords(records = records, recordMerger = recordMerger, date = cacheHeaders.date())
+ internalUpdateRecords(records = records, cacheHeaders = cacheHeaders, recordMerger = recordMerger)
} catch (e: Exception) {
// Unable to merge the records in the database, it is possibly corrupted - treat this as a cache miss
apolloExceptionHandler(Exception("Unable to merge records from the database", e))
@@ -144,8 +141,10 @@ class SqlNormalizedCache internal constructor(
*
* This is an optimization over [internalUpdateRecord]
*/
- private fun internalUpdateRecords(records: Collection, recordMerger: RecordMerger, date: Long?): Set {
+ private fun internalUpdateRecords(records: Collection, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set {
var updatedRecordKeys: Set = emptySet()
+ val receivedDate = cacheHeaders.headerValue(ApolloCacheHeaders.RECEIVED_DATE)
+ val expirationDate = cacheHeaders.headerValue(ApolloCacheHeaders.EXPIRATION_DATE)
recordDatabase.transaction {
val oldRecords = internalGetRecords(
keys = records.map { it.key },
@@ -154,12 +153,12 @@ class SqlNormalizedCache internal constructor(
updatedRecordKeys = records.flatMap { record ->
val oldRecord = oldRecords[record.key]
if (oldRecord == null) {
- recordDatabase.insert(record.withDate(date))
+ recordDatabase.insert(record.withDates(receivedDate = receivedDate, expirationDate = expirationDate))
record.fieldKeys()
} else {
- val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record, newDate = date)
+ val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record)
if (mergedRecord.isNotEmpty()) {
- recordDatabase.update(mergedRecord)
+ recordDatabase.update(mergedRecord.withDates(receivedDate = receivedDate, expirationDate = expirationDate))
}
changedKeys
}
@@ -168,33 +167,21 @@ class SqlNormalizedCache internal constructor(
return updatedRecordKeys
}
- private fun Record.withDate(date: Long?): Record {
- if (date == null) {
- return this
- }
- return Record(
- key = key,
- fields = fields,
- mutationId = mutationId,
- dates = fields.mapValues { date },
- metadata = metadata
- )
- }
-
/**
* Update a single [Record], loading the previous one
*/
- private fun internalUpdateRecord(record: Record, recordMerger: RecordMerger, date: Long?): Set {
+ private fun internalUpdateRecord(record: Record, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set {
+ val receivedDate = cacheHeaders.headerValue(ApolloCacheHeaders.RECEIVED_DATE)
+ val expirationDate = cacheHeaders.headerValue(ApolloCacheHeaders.EXPIRATION_DATE)
return recordDatabase.transaction {
val oldRecord = recordDatabase.select(record.key)
-
if (oldRecord == null) {
- recordDatabase.insert(record.withDate(date))
+ recordDatabase.insert(record.withDates(receivedDate = receivedDate, expirationDate = expirationDate))
record.fieldKeys()
} else {
- val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record, newDate = date)
+ val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record)
if (mergedRecord.isNotEmpty()) {
- recordDatabase.update(mergedRecord)
+ recordDatabase.update(mergedRecord.withDates(receivedDate = receivedDate, expirationDate = expirationDate))
}
changedKeys
}
diff --git a/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/internal/Blob2RecordDatabase.kt b/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/internal/Blob2RecordDatabase.kt
index 89896944..e5e6b145 100644
--- a/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/internal/Blob2RecordDatabase.kt
+++ b/normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/sql/internal/Blob2RecordDatabase.kt
@@ -1,5 +1,6 @@
package com.apollographql.cache.normalized.sql.internal
+import com.apollographql.cache.normalized.api.ApolloCacheHeaders
import com.apollographql.cache.normalized.api.Record
import com.apollographql.cache.normalized.api.internal.BlobRecordSerializer
import com.apollographql.cache.normalized.sql.internal.blob2.Blob2Queries
@@ -43,21 +44,21 @@ internal class Blob2RecordDatabase(private val blobQueries: Blob2Queries) : Reco
}
override fun insert(record: Record) {
- blobQueries.insert(record.key, BlobRecordSerializer.serialize(record), record.date())
+ blobQueries.insert(record.key, BlobRecordSerializer.serialize(record), record.receivedDate())
}
override fun update(record: Record) {
- blobQueries.update(BlobRecordSerializer.serialize(record), record.date(), record.key)
+ blobQueries.update(BlobRecordSerializer.serialize(record), record.receivedDate(), record.key)
}
override fun selectAll(): List {
TODO("Not yet implemented")
}
- private fun Record.date(): Long? {
- /**
- * The
- */
- return dates?.values?.filterNotNull()?.maxOrNull()
+ /**
+ * The most recent of the fields' received dates.
+ */
+ private fun Record.receivedDate(): Long? {
+ return metadata.values.mapNotNull { it[ApolloCacheHeaders.RECEIVED_DATE] as? Long }.maxOrNull()
}
}
diff --git a/normalized-cache-sqlite-incubating/src/jvmTest/kotlin/com/apollographql/cache/normalized/sql/TrimTest.kt b/normalized-cache-sqlite-incubating/src/jvmTest/kotlin/com/apollographql/cache/normalized/sql/TrimTest.kt
index 86079157..970f7e54 100644
--- a/normalized-cache-sqlite-incubating/src/jvmTest/kotlin/com/apollographql/cache/normalized/sql/TrimTest.kt
+++ b/normalized-cache-sqlite-incubating/src/jvmTest/kotlin/com/apollographql/cache/normalized/sql/TrimTest.kt
@@ -3,6 +3,7 @@ package com.apollographql.cache.normalized.sql
import com.apollographql.cache.normalized.api.CacheHeaders
import com.apollographql.cache.normalized.api.DefaultRecordMerger
import com.apollographql.cache.normalized.api.Record
+import com.apollographql.cache.normalized.api.withDates
import org.junit.Test
import java.io.File
import kotlin.test.assertEquals
@@ -25,9 +26,8 @@ class TrimTest {
key = "old",
fields = mapOf("key" to "value"),
mutationId = null,
- dates = mapOf("key" to 0L),
metadata = emptyMap()
- )
+ ).withDates(receivedDate = "0", expirationDate = null)
cache.merge(oldRecord, CacheHeaders.NONE, recordMerger = DefaultRecordMerger)
val newRecords = 0.until(2 * 1024).map {
@@ -35,9 +35,8 @@ class TrimTest {
key = "new$it",
fields = mapOf("key" to largeString),
mutationId = null,
- dates = mapOf("key" to 1 + it.toLong()),
metadata = emptyMap()
- )
+ ).withDates(receivedDate = it.toString(), expirationDate = null)
}
cache.merge(newRecords, CacheHeaders.NONE, recordMerger = DefaultRecordMerger)
diff --git a/tests/expiration/src/commonTest/kotlin/ClientSideExpirationTest.kt b/tests/expiration/src/commonTest/kotlin/ClientSideExpirationTest.kt
index 8ad44eb4..d20bc710 100644
--- a/tests/expiration/src/commonTest/kotlin/ClientSideExpirationTest.kt
+++ b/tests/expiration/src/commonTest/kotlin/ClientSideExpirationTest.kt
@@ -82,7 +82,7 @@ class ClientSideExpirationTest {
assertTrue(response2.data?.user?.name == "John")
}
- private fun cacheHeaders(date: Long): CacheHeaders {
- return CacheHeaders.Builder().addHeader(ApolloCacheHeaders.DATE, date.toString()).build()
+ private fun cacheHeaders(receivedDate: Long): CacheHeaders {
+ return CacheHeaders.Builder().addHeader(ApolloCacheHeaders.RECEIVED_DATE, receivedDate.toString()).build()
}
}
diff --git a/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt b/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt
index 53e84634..aa21755f 100644
--- a/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt
+++ b/tests/pagination/src/commonTest/kotlin/OffsetBasedWithPagePaginationTest.kt
@@ -241,7 +241,6 @@ internal fun assertChainedCachesAreEqual(apolloStore: ApolloStore) {
val record2 = cache2[key]!!
assertEquals(record1.key, record2.key)
assertEquals(record1.fields, record2.fields)
- assertEquals(record1.dates.filterValues { it != null }, record2.dates.filterValues { it != null })
assertEquals(record1.metadata, record2.metadata)
}
}