From 77613bb895c43e8dce0268bedbee645d51e3b3cb Mon Sep 17 00:00:00 2001 From: BoD Date: Tue, 5 Nov 2024 16:14:18 +0100 Subject: [PATCH 1/4] Add ApolloStore.writeOptimisticUpdates for fragments --- .../cache/normalized/ApolloStore.kt | 21 ++ .../normalized/internal/DefaultApolloStore.kt | 25 ++ .../normalizer/HeroAndFriendsName.graphql | 7 + .../commonTest/kotlin/OptimisticCacheTest.kt | 248 +++++++++++------- 4 files changed, 203 insertions(+), 98 deletions(-) diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt index 4c5e1bba..579db09a 100644 --- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt +++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt @@ -130,6 +130,27 @@ interface ApolloStore { customScalarAdapters: CustomScalarAdapters = CustomScalarAdapters.Empty, ): Set + /** + * Writes a fragment to the optimistic store. + * + * This is a synchronous operation that might block if the underlying cache is doing IO. + * + * @param fragment the fragment to write + * @param cacheKey the root where to write the fragment data to + * @param fragmentData the fragment data to write + * @param mutationId a unique identifier for this optimistic update + * @return the changed keys + * + * @see publish + */ + fun writeOptimisticUpdates( + fragment: Fragment, + cacheKey: CacheKey, + fragmentData: D, + mutationId: Uuid, + customScalarAdapters: CustomScalarAdapters = CustomScalarAdapters.Empty, + ): Set + /** * Rollback operation data optimistic updates. * This is a synchronous operation that might block if the underlying cache is doing IO. diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/DefaultApolloStore.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/DefaultApolloStore.kt index 9312d9bc..af22a7f8 100644 --- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/DefaultApolloStore.kt +++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/internal/DefaultApolloStore.kt @@ -222,6 +222,31 @@ internal class DefaultApolloStore( return cache.addOptimisticUpdates(records) } + override fun writeOptimisticUpdates( + fragment: Fragment, + cacheKey: CacheKey, + fragmentData: D, + mutationId: Uuid, + customScalarAdapters: CustomScalarAdapters, + ): Set { + val records = fragment.normalize( + data = fragmentData, + customScalarAdapters = customScalarAdapters, + cacheKeyGenerator = cacheKeyGenerator, + metadataGenerator = metadataGenerator, + fieldKeyGenerator = fieldKeyGenerator, + embeddedFieldsProvider = embeddedFieldsProvider, + rootKey = cacheKey.key + ).values.map { record -> + Record( + key = record.key, + fields = record.fields, + mutationId = mutationId + ) + } + return cache.addOptimisticUpdates(records) + } + override fun rollbackOptimisticUpdates( mutationId: Uuid, ): Set { diff --git a/tests/normalized-cache/src/commonMain/graphql/normalizer/HeroAndFriendsName.graphql b/tests/normalized-cache/src/commonMain/graphql/normalizer/HeroAndFriendsName.graphql index 04cef245..296d0d0b 100644 --- a/tests/normalized-cache/src/commonMain/graphql/normalizer/HeroAndFriendsName.graphql +++ b/tests/normalized-cache/src/commonMain/graphql/normalizer/HeroAndFriendsName.graphql @@ -6,3 +6,10 @@ query HeroAndFriendsNames($episode: Episode) { } } } + +fragment HeroAndFriendsNamesFragment on Character { + name + friends { + name + } +} diff --git a/tests/normalized-cache/src/commonTest/kotlin/OptimisticCacheTest.kt b/tests/normalized-cache/src/commonTest/kotlin/OptimisticCacheTest.kt index 923dd836..51374210 100644 --- a/tests/normalized-cache/src/commonTest/kotlin/OptimisticCacheTest.kt +++ b/tests/normalized-cache/src/commonTest/kotlin/OptimisticCacheTest.kt @@ -6,6 +6,7 @@ import com.apollographql.apollo.testing.awaitElement import com.apollographql.apollo.testing.internal.runTest import com.apollographql.cache.normalized.ApolloStore import com.apollographql.cache.normalized.FetchPolicy +import com.apollographql.cache.normalized.api.CacheKey import com.apollographql.cache.normalized.api.IdCacheKeyGenerator import com.apollographql.cache.normalized.fetchPolicy import com.apollographql.cache.normalized.memory.MemoryCacheFactory @@ -23,6 +24,8 @@ import normalizer.HeroAndFriendsNamesWithIDsQuery import normalizer.HeroNameWithIdQuery import normalizer.ReviewsByEpisodeQuery import normalizer.UpdateReviewMutation +import normalizer.fragment.HeroAndFriendsNamesFragment +import normalizer.fragment.HeroAndFriendsNamesFragmentImpl import normalizer.type.ColorInput import normalizer.type.Episode import normalizer.type.ReviewInput @@ -49,7 +52,7 @@ class OptimisticCacheTest { * roll them back, make sure we're back to the initial state */ @Test - fun programmaticOptimiticUpdates() = runTest(before = { setUp() }, after = { tearDown() }) { + fun programmaticOptimisticUpdates() = runTest(before = { setUp() }, after = { tearDown() }) { val query = HeroAndFriendsNamesQuery(Episode.JEDI) mockServer.enqueueString(testFixtureToUtf8("HeroAndFriendsNameResponse.json")) @@ -78,19 +81,68 @@ class OptimisticCacheTest { var response = apolloClient.query(query).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response.data?.hero?.name, "R222-D222") - assertEquals(response.data?.hero?.friends?.size, 2) - assertEquals(response.data?.hero?.friends?.get(0)?.name, "SuperMan") - assertEquals(response.data?.hero?.friends?.get(1)?.name, "Batman") + assertEquals("R222-D222", response.data?.hero?.name) + assertEquals(2, response.data?.hero?.friends?.size) + assertEquals("SuperMan", response.data?.hero?.friends?.get(0)?.name) + assertEquals("Batman", response.data?.hero?.friends?.get(1)?.name) store.rollbackOptimisticUpdates(mutationId) response = apolloClient.query(query).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response.data?.hero?.name, "R2-D2") - assertEquals(response.data?.hero?.friends?.size, 3) - assertEquals(response.data?.hero?.friends?.get(0)?.name, "Luke Skywalker") - assertEquals(response.data?.hero?.friends?.get(1)?.name, "Han Solo") - assertEquals(response.data?.hero?.friends?.get(2)?.name, "Leia Organa") + assertEquals("R2-D2", response.data?.hero?.name) + assertEquals(3, response.data?.hero?.friends?.size) + assertEquals("Luke Skywalker", response.data?.hero?.friends?.get(0)?.name) + assertEquals("Han Solo", response.data?.hero?.friends?.get(1)?.name) + assertEquals("Leia Organa", response.data?.hero?.friends?.get(2)?.name) + } + + /** + * Write the updates programmatically, make sure they are seen, + * roll them back, make sure we're back to the initial state + */ + @Test + fun programmaticOptimisticFragmentUpdates() = runTest(before = { setUp() }, after = { tearDown() }) { + val query = HeroAndFriendsNamesQuery(Episode.JEDI) + + mockServer.enqueueString(testFixtureToUtf8("HeroAndFriendsNameResponse.json")) + apolloClient.query(query).fetchPolicy(FetchPolicy.NetworkOnly).execute() + + val mutationId = uuid4() + val data = HeroAndFriendsNamesFragment( + "R222-D222", + listOf( + HeroAndFriendsNamesFragment.Friend( + "SuperMan" + ), + HeroAndFriendsNamesFragment.Friend( + "Batman" + ) + ) + ) + store.writeOptimisticUpdates( + HeroAndFriendsNamesFragmentImpl(), + mutationId = mutationId, + cacheKey = CacheKey("""hero({"episode":"JEDI"})"""), + fragmentData = data, + ).also { + store.publish(it) + } + + var response = apolloClient.query(query).fetchPolicy(FetchPolicy.CacheOnly).execute() + + assertEquals("R222-D222", response.data?.hero?.name) + assertEquals(2, response.data?.hero?.friends?.size) + assertEquals("SuperMan", response.data?.hero?.friends?.get(0)?.name) + assertEquals("Batman", response.data?.hero?.friends?.get(1)?.name) + + store.rollbackOptimisticUpdates(mutationId) + response = apolloClient.query(query).fetchPolicy(FetchPolicy.CacheOnly).execute() + + assertEquals("R2-D2", response.data?.hero?.name) + assertEquals(3, response.data?.hero?.friends?.size) + assertEquals("Luke Skywalker", response.data?.hero?.friends?.get(0)?.name) + assertEquals("Han Solo", response.data?.hero?.friends?.get(1)?.name) + assertEquals("Leia Organa", response.data?.hero?.friends?.get(2)?.name) } /** @@ -132,13 +184,13 @@ class OptimisticCacheTest { // check if query1 see optimistic updates var response1 = apolloClient.query(query1).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response1.data?.hero?.id, "2001") - assertEquals(response1.data?.hero?.name, "R222-D222") - assertEquals(response1.data?.hero?.friends?.size, 2) - assertEquals(response1.data?.hero?.friends?.get(0)?.id, "1000") - assertEquals(response1.data?.hero?.friends?.get(0)?.name, "SuperMan") - assertEquals(response1.data?.hero?.friends?.get(1)?.id, "1003") - assertEquals(response1.data?.hero?.friends?.get(1)?.name, "Batman") + assertEquals("2001", response1.data?.hero?.id) + assertEquals("R222-D222", response1.data?.hero?.name) + assertEquals(2, response1.data?.hero?.friends?.size) + assertEquals("1000", response1.data?.hero?.friends?.get(0)?.id) + assertEquals("SuperMan", response1.data?.hero?.friends?.get(0)?.name) + assertEquals("1003", response1.data?.hero?.friends?.get(1)?.id) + assertEquals("Batman", response1.data?.hero?.friends?.get(1)?.name) // execute query2 val query2 = HeroNameWithIdQuery() @@ -163,46 +215,46 @@ class OptimisticCacheTest { // check if query1 sees data2 response1 = apolloClient.query(query1).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response1.data?.hero?.id, "2001") - assertEquals(response1.data?.hero?.name, "R222-D222") - assertEquals(response1.data?.hero?.friends?.size, 2) - assertEquals(response1.data?.hero?.friends?.get(0)?.id, "1000") - assertEquals(response1.data?.hero?.friends?.get(0)?.name, "Beast") - assertEquals(response1.data?.hero?.friends?.get(1)?.id, "1003") - assertEquals(response1.data?.hero?.friends?.get(1)?.name, "Batman") + assertEquals("2001", response1.data?.hero?.id) + assertEquals("R222-D222", response1.data?.hero?.name) + assertEquals(2, response1.data?.hero?.friends?.size) + assertEquals("1000", response1.data?.hero?.friends?.get(0)?.id) + assertEquals("Beast", response1.data?.hero?.friends?.get(0)?.name) + assertEquals("1003", response1.data?.hero?.friends?.get(1)?.id) + assertEquals("Batman", response1.data?.hero?.friends?.get(1)?.name) // check if query2 sees data2 var response2 = apolloClient.query(query2).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response2.data?.hero?.id, "1000") - assertEquals(response2.data?.hero?.name, "Beast") + assertEquals("1000", response2.data?.hero?.id) + assertEquals("Beast", response2.data?.hero?.name) // rollback data1 store.rollbackOptimisticUpdates(mutationId1) // check if query2 sees the rollback response1 = apolloClient.query(query1).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response1.data?.hero?.id, "2001") - assertEquals(response1.data?.hero?.name, "R2-D2") - assertEquals(response1.data?.hero?.friends?.size, 3) - assertEquals(response1.data?.hero?.friends?.get(0)?.id, "1000") - assertEquals(response1.data?.hero?.friends?.get(0)?.name, "Beast") - assertEquals(response1.data?.hero?.friends?.get(1)?.id, "1002") - assertEquals(response1.data?.hero?.friends?.get(1)?.name, "Han Solo") - assertEquals(response1.data?.hero?.friends?.get(2)?.id, "1003") - assertEquals(response1.data?.hero?.friends?.get(2)?.name, "Leia Organa") + assertEquals("2001", response1.data?.hero?.id) + assertEquals("R2-D2", response1.data?.hero?.name) + assertEquals(3, response1.data?.hero?.friends?.size) + assertEquals("1000", response1.data?.hero?.friends?.get(0)?.id) + assertEquals("Beast", response1.data?.hero?.friends?.get(0)?.name) + assertEquals("1002", response1.data?.hero?.friends?.get(1)?.id) + assertEquals("Han Solo", response1.data?.hero?.friends?.get(1)?.name) + assertEquals("1003", response1.data?.hero?.friends?.get(2)?.id) + assertEquals("Leia Organa", response1.data?.hero?.friends?.get(2)?.name) // check if query2 see the latest optimistic updates response2 = apolloClient.query(query2).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response2.data?.hero?.id, "1000") - assertEquals(response2.data?.hero?.name, "Beast") + assertEquals("1000", response2.data?.hero?.id) + assertEquals("Beast", response2.data?.hero?.name) // rollback query2 optimistic updates store.rollbackOptimisticUpdates(mutationId2) // check if query2 see the latest optimistic updates response2 = apolloClient.query(query2).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response2.data?.hero?.id, "1000") - assertEquals(response2.data?.hero?.name, "SuperMan") + assertEquals("1000", response2.data?.hero?.id) + assertEquals("SuperMan", response2.data?.hero?.name) } @Test @@ -222,16 +274,16 @@ class OptimisticCacheTest { var watcherData = channel.receive() // before mutation and optimistic updates - assertEquals(watcherData?.reviews?.size, 3) - assertEquals(watcherData?.reviews?.get(0)?.id, "empireReview1") - assertEquals(watcherData?.reviews?.get(0)?.stars, 1) - assertEquals(watcherData?.reviews?.get(0)?.commentary, "Boring") - assertEquals(watcherData?.reviews?.get(1)?.id, "empireReview2") - assertEquals(watcherData?.reviews?.get(1)?.stars, 2) - assertEquals(watcherData?.reviews?.get(1)?.commentary, "So-so") - assertEquals(watcherData?.reviews?.get(2)?.id, "empireReview3") - assertEquals(watcherData?.reviews?.get(2)?.stars, 5) - assertEquals(watcherData?.reviews?.get(2)?.commentary, "Amazing") + assertEquals(3, watcherData?.reviews?.size) + assertEquals("empireReview1", watcherData?.reviews?.get(0)?.id) + assertEquals(1, watcherData?.reviews?.get(0)?.stars) + assertEquals("Boring", watcherData?.reviews?.get(0)?.commentary) + assertEquals("empireReview2", watcherData?.reviews?.get(1)?.id) + assertEquals(2, watcherData?.reviews?.get(1)?.stars) + assertEquals("So-so", watcherData?.reviews?.get(1)?.commentary) + assertEquals("empireReview3", watcherData?.reviews?.get(2)?.id) + assertEquals(5, watcherData?.reviews?.get(2)?.stars) + assertEquals("Amazing", watcherData?.reviews?.get(2)?.commentary) /** * There is a small potential for a race condition here. The changedKeys event from the optimistic updates might @@ -266,30 +318,30 @@ class OptimisticCacheTest { * optimistic updates */ watcherData = channel.receive() - assertEquals(watcherData?.reviews?.size, 3) - assertEquals(watcherData?.reviews?.get(0)?.id, "empireReview1") - assertEquals(watcherData?.reviews?.get(0)?.stars, 1) - assertEquals(watcherData?.reviews?.get(0)?.commentary, "Boring") - assertEquals(watcherData?.reviews?.get(1)?.id, "empireReview2") - assertEquals(watcherData?.reviews?.get(1)?.stars, 5) - assertEquals(watcherData?.reviews?.get(1)?.commentary, "Great") - assertEquals(watcherData?.reviews?.get(2)?.id, "empireReview3") - assertEquals(watcherData?.reviews?.get(2)?.stars, 5) - assertEquals(watcherData?.reviews?.get(2)?.commentary, "Amazing") + assertEquals(3, watcherData?.reviews?.size) + assertEquals("empireReview1", watcherData?.reviews?.get(0)?.id) + assertEquals(1, watcherData?.reviews?.get(0)?.stars) + assertEquals("Boring", watcherData?.reviews?.get(0)?.commentary) + assertEquals("empireReview2", watcherData?.reviews?.get(1)?.id) + assertEquals(5, watcherData?.reviews?.get(1)?.stars) + assertEquals("Great", watcherData?.reviews?.get(1)?.commentary) + assertEquals("empireReview3", watcherData?.reviews?.get(2)?.id) + assertEquals(5, watcherData?.reviews?.get(2)?.stars) + assertEquals("Amazing", watcherData?.reviews?.get(2)?.commentary) // after mutation with rolled back optimistic updates @Suppress("DEPRECATION") watcherData = channel.awaitElement() - assertEquals(watcherData?.reviews?.size, 3) - assertEquals(watcherData?.reviews?.get(0)?.id, "empireReview1") - assertEquals(watcherData?.reviews?.get(0)?.stars, 1) - assertEquals(watcherData?.reviews?.get(0)?.commentary, "Boring") - assertEquals(watcherData?.reviews?.get(1)?.id, "empireReview2") - assertEquals(watcherData?.reviews?.get(1)?.stars, 4) - assertEquals(watcherData?.reviews?.get(1)?.commentary, "Not Bad") - assertEquals(watcherData?.reviews?.get(2)?.id, "empireReview3") - assertEquals(watcherData?.reviews?.get(2)?.stars, 5) - assertEquals(watcherData?.reviews?.get(2)?.commentary, "Amazing") + assertEquals(3, watcherData?.reviews?.size) + assertEquals("empireReview1", watcherData?.reviews?.get(0)?.id) + assertEquals(1, watcherData?.reviews?.get(0)?.stars) + assertEquals("Boring", watcherData?.reviews?.get(0)?.commentary) + assertEquals("empireReview2", watcherData?.reviews?.get(1)?.id) + assertEquals(4, watcherData?.reviews?.get(1)?.stars) + assertEquals("Not Bad", watcherData?.reviews?.get(1)?.commentary) + assertEquals("empireReview3", watcherData?.reviews?.get(2)?.id) + assertEquals(5, watcherData?.reviews?.get(2)?.stars) + assertEquals("Amazing", watcherData?.reviews?.get(2)?.commentary) job.cancel() } @@ -346,58 +398,58 @@ class OptimisticCacheTest { // check if query1 see optimistic updates var response1 = apolloClient.query(query1).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response1.data?.hero?.id, "2001") - assertEquals(response1.data?.hero?.name, "R222-D222") - assertEquals(response1.data?.hero?.friends?.size, 2) - assertEquals(response1.data?.hero?.friends?.get(0)?.id, "1000") - assertEquals(response1.data?.hero?.friends?.get(0)?.name, "Spiderman") - assertEquals(response1.data?.hero?.friends?.get(1)?.id, "1003") - assertEquals(response1.data?.hero?.friends?.get(1)?.name, "Batman") + assertEquals("2001", response1.data?.hero?.id) + assertEquals("R222-D222", response1.data?.hero?.name) + assertEquals(2, response1.data?.hero?.friends?.size) + assertEquals("1000", response1.data?.hero?.friends?.get(0)?.id) + assertEquals("Spiderman", response1.data?.hero?.friends?.get(0)?.name) + assertEquals("1003", response1.data?.hero?.friends?.get(1)?.id) + assertEquals("Batman", response1.data?.hero?.friends?.get(1)?.name) // check if query2 see the latest optimistic updates var response2 = apolloClient.query(query2).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response2.data?.hero?.id, "1000") - assertEquals(response2.data?.hero?.name, "Spiderman") + assertEquals("1000", response2.data?.hero?.id) + assertEquals("Spiderman", response2.data?.hero?.name) // rollback query2 optimistic updates store.rollbackOptimisticUpdates(mutationId2) // check if query1 see the latest optimistic updates response1 = apolloClient.query(query1).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response1.data?.hero?.id, "2001") - assertEquals(response1.data?.hero?.name, "R222-D222") - assertEquals(response1.data?.hero?.friends?.size, 2) - assertEquals(response1.data?.hero?.friends?.get(0)?.id, "1000") - assertEquals(response1.data?.hero?.friends?.get(0)?.name, "Robocop") - assertEquals(response1.data?.hero?.friends?.get(1)?.id, "1003") - assertEquals(response1.data?.hero?.friends?.get(1)?.name, "Batman") + assertEquals("2001", response1.data?.hero?.id) + assertEquals("R222-D222", response1.data?.hero?.name) + assertEquals(2, response1.data?.hero?.friends?.size) + assertEquals("1000", response1.data?.hero?.friends?.get(0)?.id) + assertEquals("Robocop", response1.data?.hero?.friends?.get(0)?.name) + assertEquals("1003", response1.data?.hero?.friends?.get(1)?.id) + assertEquals("Batman", response1.data?.hero?.friends?.get(1)?.name) // check if query2 see the latest optimistic updates response2 = apolloClient.query(query2).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response2.data?.hero?.id, "1000") - assertEquals(response2.data?.hero?.name, "Robocop") + assertEquals("1000", response2.data?.hero?.id) + assertEquals("Robocop", response2.data?.hero?.name) // rollback query1 optimistic updates store.rollbackOptimisticUpdates(mutationId1) // check if query1 see the latest non-optimistic updates response1 = apolloClient.query(query1).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response1.data?.hero?.id, "2001") - assertEquals(response1.data?.hero?.name, "R2-D2") - assertEquals(response1.data?.hero?.friends?.size, 3) - assertEquals(response1.data?.hero?.friends?.get(0)?.id, "1000") - assertEquals(response1.data?.hero?.friends?.get(0)?.name, "SuperMan") - assertEquals(response1.data?.hero?.friends?.get(1)?.id, "1002") - assertEquals(response1.data?.hero?.friends?.get(1)?.name, "Han Solo") - assertEquals(response1.data?.hero?.friends?.get(2)?.id, "1003") - assertEquals(response1.data?.hero?.friends?.get(2)?.name, "Leia Organa") + assertEquals("2001", response1.data?.hero?.id) + assertEquals("R2-D2", response1.data?.hero?.name) + assertEquals(3, response1.data?.hero?.friends?.size) + assertEquals("1000", response1.data?.hero?.friends?.get(0)?.id) + assertEquals("SuperMan", response1.data?.hero?.friends?.get(0)?.name) + assertEquals("1002", response1.data?.hero?.friends?.get(1)?.id) + assertEquals("Han Solo", response1.data?.hero?.friends?.get(1)?.name) + assertEquals("1003", response1.data?.hero?.friends?.get(2)?.id) + assertEquals("Leia Organa", response1.data?.hero?.friends?.get(2)?.name) // check if query2 see the latest non-optimistic updates response2 = apolloClient.query(query2).fetchPolicy(FetchPolicy.CacheOnly).execute() - assertEquals(response2.data?.hero?.id, "1000") - assertEquals(response2.data?.hero?.name, "SuperMan") + assertEquals("1000", response2.data?.hero?.id) + assertEquals("SuperMan", response2.data?.hero?.name) } } From 6a2ee75aced5a894ceb414de3421dda85fefdd86 Mon Sep 17 00:00:00 2001 From: BoD Date: Tue, 5 Nov 2024 16:14:40 +0100 Subject: [PATCH 2/4] Tweak ApolloStore KDoc --- .../cache/normalized/ApolloStore.kt | 88 +++++++++++-------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt index 579db09a..5a8df39a 100644 --- a/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt +++ b/normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/cache/normalized/ApolloStore.kt @@ -41,10 +41,11 @@ interface ApolloStore { val changedKeys: SharedFlow> /** - * Read GraphQL operation from store. - * This is a synchronous operation that might block if the underlying cache is doing IO + * Reads an operation from the store. * - * @param operation to be read + * This is a synchronous operation that might block if the underlying cache is doing IO. + * + * @param operation the operation to read * * @throws [com.apollographql.apollo.exception.CacheMissException] on cache miss * @throws [com.apollographql.apollo.exception.ApolloException] on other cache read errors @@ -58,11 +59,12 @@ interface ApolloStore { ): ReadResult /** - * Read a GraphQL fragment from the store. - * This is a synchronous operation that might block if the underlying cache is doing IO + * Reads a fragment from the store. + * + * This is a synchronous operation that might block if the underlying cache is doing IO. * - * @param fragment to be read - * @param cacheKey [CacheKey] to be used to find cache record for the fragment + * @param fragment the fragment to read + * @param cacheKey the root where to read the fragment data from * * @throws [com.apollographql.apollo.exception.CacheMissException] on cache miss * @throws [com.apollographql.apollo.exception.ApolloException] on other cache read errors @@ -77,11 +79,12 @@ interface ApolloStore { ): ReadResult /** - * Write an operation data to the store. - * This is a synchronous operation that might block if the underlying cache is doing IO + * Write an operation to the store. * - * @param operation [Operation] response data of which should be written to the store - * @param operationData [Operation.Data] operation response data to be written to the store + * This is a synchronous operation that might block if the underlying cache is doing IO. + * + * @param operation the operation to write + * @param operationData the operation data to write * @return the changed keys * * @see publish @@ -94,12 +97,13 @@ interface ApolloStore { ): Set /** - * Write a fragment data to the store. - * This is a synchronous operation that might block if the underlying cache is doing IO + * Write a fragment to the store. * - * @param fragment data to be written to the store - * @param cacheKey [CacheKey] to be used as root record key - * @param fragmentData [Fragment.Data] to be written to the store + * This is a synchronous operation that might block if the underlying cache is doing IO. + * + * @param fragment the fragment to write + * @param cacheKey the root where to write the fragment data to + * @param fragmentData the fragment data to write * @return the changed keys * * @see publish @@ -113,12 +117,13 @@ interface ApolloStore { ): Set /** - * Write operation data to the optimistic store. + * Writes an operation to the optimistic store. + * * This is a synchronous operation that might block if the underlying cache is doing IO. * - * @param operation [Operation] response data of which should be written to the store - * @param operationData [Operation.Data] operation response data to be written to the store - * @param mutationId mutation unique identifier + * @param operation the operation to write + * @param operationData the operation data to write + * @param mutationId a unique identifier for this optimistic update * @return the changed keys * * @see publish @@ -152,46 +157,53 @@ interface ApolloStore { ): Set /** - * Rollback operation data optimistic updates. + * Rollbacks optimistic updates. + * * This is a synchronous operation that might block if the underlying cache is doing IO. * - * @param mutationId mutation unique identifier + * @param mutationId the unique identifier of the optimistic update to rollback * @return the changed keys + * + * @see publish */ fun rollbackOptimisticUpdates( mutationId: Uuid, ): Set /** - * Clear all records from this [ApolloStore]. - * This is a synchronous operation that might block if the underlying cache is doing IO + * Clears all records. + * + * This is a synchronous operation that might block if the underlying cache is doing IO. * * @return `true` if all records were successfully removed, `false` otherwise */ fun clearAll(): Boolean /** - * Remove cache record by the key - * This is a synchronous operation that might block if the underlying cache is doing IO + * Removes a record by its key. + * + * This is a synchronous operation that might block if the underlying cache is doing IO. * - * @param cacheKey of record to be removed - * @param cascade defines if remove operation is propagated to the referenced entities + * @param cacheKey the key of the record to remove + * @param cascade whether referenced records should also be removed * @return `true` if the record was successfully removed, `false` otherwise */ fun remove(cacheKey: CacheKey, cascade: Boolean = true): Boolean /** - * Remove a list of cache records - * This is an optimized version of [remove] for caches that can batch operations - * This is a synchronous operation that might block if the underlying cache is doing IO + * Removes a list of records by their keys. + * This is an optimized version of [remove] for caches that can batch operations. * - * @param cacheKeys keys of records to be removed + * This is a synchronous operation that might block if the underlying cache is doing IO. + * + * @param cacheKeys the keys of the records to remove + * @param cascade whether referenced records should also be removed * @return the number of records that have been removed */ fun remove(cacheKeys: List, cascade: Boolean = true): Int /** - * Normalize [data] to a map of [Record] keyed by [Record.key]. + * Normalizes operation data to a map of [Record] keyed by [Record.key]. */ fun normalize( operation: Operation, @@ -200,12 +212,17 @@ interface ApolloStore { ): Map /** + * Publishes a set of keys that have changed. This will notify subscribers of [changedKeys]. + * + * @see changedKeys + * * @param keys A set of keys of [Record] which have changed. */ suspend fun publish(keys: Set) /** * Direct access to the cache. + * * This is a synchronous operation that might block if the underlying cache is doing IO. * * @param block a function that can access the cache. @@ -213,13 +230,14 @@ interface ApolloStore { fun accessCache(block: (NormalizedCache) -> R): R /** - * Dump the content of the store for debugging purposes. + * Dumps the content of the store for debugging purposes. + * * This is a synchronous operation that might block if the underlying cache is doing IO. */ fun dump(): Map, Map> /** - * Release resources associated with this store. + * Releases resources associated with this store. */ fun dispose() From b56693b9e5ae46bb7065e26232eb03976e84de7d Mon Sep 17 00:00:00 2001 From: BoD Date: Tue, 5 Nov 2024 16:19:44 +0100 Subject: [PATCH 3/4] Update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 389ff768..3954c7ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ - For consistency, `MemoryCacheFactory` and `MemoryCache` are now in the `com.apollographql.cache.normalized.memory` package - Remove deprecated symbols - Add `IdCacheKeyGenerator` and `IdCacheKeyResolver` (#41) +- Add `ApolloStore.writeOptimisticUpdates` API for fragments (#55) # Version 0.0.3 _2024-09-20_ From 829a08a87c22b37849f39b4843875f624d635a24 Mon Sep 17 00:00:00 2001 From: BoD Date: Wed, 6 Nov 2024 11:58:33 +0100 Subject: [PATCH 4/4] Update API Dump --- normalized-cache-incubating/api/normalized-cache-incubating.api | 2 ++ .../api/normalized-cache-incubating.klib.api | 1 + 2 files changed, 3 insertions(+) diff --git a/normalized-cache-incubating/api/normalized-cache-incubating.api b/normalized-cache-incubating/api/normalized-cache-incubating.api index b82bd953..2bac00be 100644 --- a/normalized-cache-incubating/api/normalized-cache-incubating.api +++ b/normalized-cache-incubating/api/normalized-cache-incubating.api @@ -13,6 +13,7 @@ public abstract interface class com/apollographql/cache/normalized/ApolloStore { public abstract fun rollbackOptimisticUpdates (Ljava/util/UUID;)Ljava/util/Set; public abstract fun writeFragment (Lcom/apollographql/apollo/api/Fragment;Lcom/apollographql/cache/normalized/api/CacheKey;Lcom/apollographql/apollo/api/Fragment$Data;Lcom/apollographql/apollo/api/CustomScalarAdapters;Lcom/apollographql/cache/normalized/api/CacheHeaders;)Ljava/util/Set; public abstract fun writeOperation (Lcom/apollographql/apollo/api/Operation;Lcom/apollographql/apollo/api/Operation$Data;Lcom/apollographql/apollo/api/CustomScalarAdapters;Lcom/apollographql/cache/normalized/api/CacheHeaders;)Ljava/util/Set; + public abstract fun writeOptimisticUpdates (Lcom/apollographql/apollo/api/Fragment;Lcom/apollographql/cache/normalized/api/CacheKey;Lcom/apollographql/apollo/api/Fragment$Data;Ljava/util/UUID;Lcom/apollographql/apollo/api/CustomScalarAdapters;)Ljava/util/Set; public abstract fun writeOptimisticUpdates (Lcom/apollographql/apollo/api/Operation;Lcom/apollographql/apollo/api/Operation$Data;Ljava/util/UUID;Lcom/apollographql/apollo/api/CustomScalarAdapters;)Ljava/util/Set; } @@ -23,6 +24,7 @@ public final class com/apollographql/cache/normalized/ApolloStore$DefaultImpls { public static synthetic fun remove$default (Lcom/apollographql/cache/normalized/ApolloStore;Ljava/util/List;ZILjava/lang/Object;)I public static synthetic fun writeFragment$default (Lcom/apollographql/cache/normalized/ApolloStore;Lcom/apollographql/apollo/api/Fragment;Lcom/apollographql/cache/normalized/api/CacheKey;Lcom/apollographql/apollo/api/Fragment$Data;Lcom/apollographql/apollo/api/CustomScalarAdapters;Lcom/apollographql/cache/normalized/api/CacheHeaders;ILjava/lang/Object;)Ljava/util/Set; public static synthetic fun writeOperation$default (Lcom/apollographql/cache/normalized/ApolloStore;Lcom/apollographql/apollo/api/Operation;Lcom/apollographql/apollo/api/Operation$Data;Lcom/apollographql/apollo/api/CustomScalarAdapters;Lcom/apollographql/cache/normalized/api/CacheHeaders;ILjava/lang/Object;)Ljava/util/Set; + public static synthetic fun writeOptimisticUpdates$default (Lcom/apollographql/cache/normalized/ApolloStore;Lcom/apollographql/apollo/api/Fragment;Lcom/apollographql/cache/normalized/api/CacheKey;Lcom/apollographql/apollo/api/Fragment$Data;Ljava/util/UUID;Lcom/apollographql/apollo/api/CustomScalarAdapters;ILjava/lang/Object;)Ljava/util/Set; public static synthetic fun writeOptimisticUpdates$default (Lcom/apollographql/cache/normalized/ApolloStore;Lcom/apollographql/apollo/api/Operation;Lcom/apollographql/apollo/api/Operation$Data;Ljava/util/UUID;Lcom/apollographql/apollo/api/CustomScalarAdapters;ILjava/lang/Object;)Ljava/util/Set; } diff --git a/normalized-cache-incubating/api/normalized-cache-incubating.klib.api b/normalized-cache-incubating/api/normalized-cache-incubating.klib.api index 747f46b3..cc545542 100644 --- a/normalized-cache-incubating/api/normalized-cache-incubating.klib.api +++ b/normalized-cache-incubating/api/normalized-cache-incubating.klib.api @@ -62,6 +62,7 @@ abstract interface com.apollographql.cache.normalized.api/RecordMerger { // com. abstract interface com.apollographql.cache.normalized/ApolloStore { // com.apollographql.cache.normalized/ApolloStore|null[0] abstract fun <#A1: com.apollographql.apollo.api/Fragment.Data> readFragment(com.apollographql.apollo.api/Fragment<#A1>, com.apollographql.cache.normalized.api/CacheKey, com.apollographql.apollo.api/CustomScalarAdapters = ..., com.apollographql.cache.normalized.api/CacheHeaders = ...): com.apollographql.cache.normalized/ApolloStore.ReadResult<#A1> // com.apollographql.cache.normalized/ApolloStore.readFragment|readFragment(com.apollographql.apollo.api.Fragment<0:0>;com.apollographql.cache.normalized.api.CacheKey;com.apollographql.apollo.api.CustomScalarAdapters;com.apollographql.cache.normalized.api.CacheHeaders){0§}[0] abstract fun <#A1: com.apollographql.apollo.api/Fragment.Data> writeFragment(com.apollographql.apollo.api/Fragment<#A1>, com.apollographql.cache.normalized.api/CacheKey, #A1, com.apollographql.apollo.api/CustomScalarAdapters = ..., com.apollographql.cache.normalized.api/CacheHeaders = ...): kotlin.collections/Set // com.apollographql.cache.normalized/ApolloStore.writeFragment|writeFragment(com.apollographql.apollo.api.Fragment<0:0>;com.apollographql.cache.normalized.api.CacheKey;0:0;com.apollographql.apollo.api.CustomScalarAdapters;com.apollographql.cache.normalized.api.CacheHeaders){0§}[0] + abstract fun <#A1: com.apollographql.apollo.api/Fragment.Data> writeOptimisticUpdates(com.apollographql.apollo.api/Fragment<#A1>, com.apollographql.cache.normalized.api/CacheKey, #A1, com.benasher44.uuid/Uuid, com.apollographql.apollo.api/CustomScalarAdapters = ...): kotlin.collections/Set // com.apollographql.cache.normalized/ApolloStore.writeOptimisticUpdates|writeOptimisticUpdates(com.apollographql.apollo.api.Fragment<0:0>;com.apollographql.cache.normalized.api.CacheKey;0:0;com.benasher44.uuid.Uuid;com.apollographql.apollo.api.CustomScalarAdapters){0§}[0] abstract fun <#A1: com.apollographql.apollo.api/Operation.Data> normalize(com.apollographql.apollo.api/Operation<#A1>, #A1, com.apollographql.apollo.api/CustomScalarAdapters): kotlin.collections/Map // com.apollographql.cache.normalized/ApolloStore.normalize|normalize(com.apollographql.apollo.api.Operation<0:0>;0:0;com.apollographql.apollo.api.CustomScalarAdapters){0§}[0] abstract fun <#A1: com.apollographql.apollo.api/Operation.Data> readOperation(com.apollographql.apollo.api/Operation<#A1>, com.apollographql.apollo.api/CustomScalarAdapters = ..., com.apollographql.cache.normalized.api/CacheHeaders = ...): com.apollographql.cache.normalized/ApolloStore.ReadResult<#A1> // com.apollographql.cache.normalized/ApolloStore.readOperation|readOperation(com.apollographql.apollo.api.Operation<0:0>;com.apollographql.apollo.api.CustomScalarAdapters;com.apollographql.cache.normalized.api.CacheHeaders){0§}[0] abstract fun <#A1: com.apollographql.apollo.api/Operation.Data> writeOperation(com.apollographql.apollo.api/Operation<#A1>, #A1, com.apollographql.apollo.api/CustomScalarAdapters = ..., com.apollographql.cache.normalized.api/CacheHeaders = ...): kotlin.collections/Set // com.apollographql.cache.normalized/ApolloStore.writeOperation|writeOperation(com.apollographql.apollo.api.Operation<0:0>;0:0;com.apollographql.apollo.api.CustomScalarAdapters;com.apollographql.cache.normalized.api.CacheHeaders){0§}[0]