diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index c7cdaf0..0000000 --- a/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -# sudo: required -# services: -# - docker -# before_install: -# - docker build -t open-source-labs/obsidian . -# script: -# - docker run open-source-labs/obsidian test --allow-net --allow-read --allow-env --unstable deno.test.ts -# env: -# global: -# secure: sfaHXoKGXnAkwS/QK2pdTPC1NVqd9+pVWImEcz8W9IXFRsOcHpt9lVsmB0dFvDpVm+9KFpcBwnpfOtiyoj6Q9NGIY71jG58kYHdbcWBlR3onS7/JBvgEu94DC7HZR+rQ4/GW+ROh4avBt6RjDSuLk4qQ73Yc3+SDKAl+M0PTADlVZpkicCID59qcdynbAjXu5W8lW2Hp0hqO72Prx/8hgmchI0I7zSYcPBFSy3WaEPJa52yKesVwsHcFtzOBMrDAdE+R028AzdBAXUoiqh6cTVeLSTL1jnIWbCBtfAROlTR82cZyo4c7PJxYyqT3mhRSZvBN/3hdW7+xMOzq6gmpmcl1UO2Q5i4xXEGnatfuzMVa/8SqJZoG2IFIWZ4mvelwufHVuLgF+6JvK2BKSpjFfSUGo0p9G0bMg+GHwRipTPIq1If3ELkflAM6QJwL7TritwtWzWXfAfoZ3KALdPTiFzJAKyQfFvSwWbfXqAgqZIbLjlzSgOJ4QKWD6CBksU7b4Oky6hr/+R+ZihzQLtWKkk/8cklEG/NJlknS2vPRG8xRRF7/C+vSFPrCkmsakPc8c1iGfai8J3Vc09Pg0UeShJDWkSQ6QP165ub6LEL5nz0Qzp0CD1sSQu5re5/M5ef9V69L2pdYhEj0RaZ241DF5efzYAgLI8SvMr5TcTr06+8= diff --git a/ObsidianWrapper/ObsidianWrapper.jsx b/ObsidianWrapper/ObsidianWrapper.jsx index a5fff78..ced3c9a 100644 --- a/ObsidianWrapper/ObsidianWrapper.jsx +++ b/ObsidianWrapper/ObsidianWrapper.jsx @@ -1,35 +1,61 @@ import * as React from "https://esm.sh/react@18"; import LFUCache from '../src/Browser/lfuBrowserCache.js'; import LRUCache from '../src/Browser/lruBrowserCache.js'; +import WTinyLFUCache from "../src/Browser/wTinyLFUBrowserCache.js"; import { insertTypenames } from '../src/Browser/insertTypenames.js'; +import { sha256 } from 'https://denopkg.com/chiefbiiko/sha256@v1.0.0/mod.ts'; const cacheContext = React.createContext(); function ObsidianWrapper(props) { - const { algo, capacity } = props - const [cache, setCache] = React.useState(new LFUCache(Number(capacity || 2000))); - if(algo === 'LRU') setCache(new LRUCache(Number(capacity || 2000))); // You have to put your Google Chrome Obsidian developer tool extension id to connect Obsidian Wrapper with dev tool - const chromeExtensionId = 'apcpdmmbhhephobnmnllbklplpaoiemo'; - // initialice cache in local storage - //window.localStorage.setItem('cache', JSON.stringify(cache)); + // props to be inputted by user when using the Obsdian Wrapper + const { algo, capacity, searchTerms, useCache, persistQueries } = props; + // if useCache hasn't been set, default caching to true + let caching = true; + // if it has been set to false, turn client-side caching off + if (useCache === false) caching = false; + + // algo defaults to LFU, capacity defaults to 2000 + const setAlgoCap = (algo, capacity) => { + let cache; + if(caching && algo === 'LRU'){ + cache = new LRUCache(Number(capacity || 2000)) + } else if (caching && algo === 'W-TinyLFU'){ + cache = new WTinyLFUCache(Number(capacity || 2000)) + } else if (caching) { + cache = new LFUCache(Number(capacity || 2000)) + } + return cache; + } + + // once cache is initialized, cannot setCache + // state for cache is initialized based on developer settings in wrapper + // to successfully change between algo types for testing, kill the server, change the algo type in wrapper, then restart server + const [cache, setCache] = React.useState(setAlgoCap(algo, capacity)); + + // FOR DEVTOOL - listening for message from content.js to be able to send algo type and capacity to devtool + window.addEventListener('message', msg => { + if(msg.data.type === 'algocap'){ + window.postMessage({ + algo: algo ? algo : 'LFU', + capacity: capacity ? capacity : 2000 + }) + } + }); async function query(query, options = {}) { - // dev tool messages + // FOR DEVTOOL - startTime is used to calculate the performance of the cache + // startDate is to find out when query was made, this data is passed to devtools const startTime = Date.now(); - /* - chrome.runtime.sendMessage(chromeExtensionId, { query: query }); - chrome.runtime.sendMessage(chromeExtensionId, { - cache: window.localStorage.getItem('cache'), - }); - */ + const startDate = new Date(Date.now()); // set the options object default properties if not provided const { endpoint = '/graphql', - cacheRead = true, - cacheWrite = true, + cacheRead = !caching ? false : true, + cacheWrite = !caching ? false : true, pollInterval = null, - wholeQuery = true, + wholeQuery = false, //Note: logic for true is currently nonfunctional } = options; // when pollInterval is not null the query will be sent to the server every inputted number of milliseconds @@ -45,70 +71,101 @@ function ObsidianWrapper(props) { return interval; } - // when cacheRead set to true - if (cacheRead) { + // when cacheRead set to true & we are utilizing client side caching + if (cacheRead && caching) { let resObj; // when the developer decides to only utilize whole query for cache - if (!wholeQuery) resObj = await cache.readWholeQuery(query); + if (wholeQuery) resObj = await cache.readWholeQuery(query); + // attempt to read from the cache else resObj = await cache.read(query); // check if query is stored in cache if (resObj) { // returning cached response as a promise const cacheHitResponseTime = Date.now() - startTime; - // Allow for access of the response time - // const cacheCopy = {...cache}; - // cacheCopy.callTime = cacheHitResponseTime; - // setCache(cacheCopy); - resObj['time'] = cacheHitResponseTime + // FOR DEVTOOL - sends message to content.js with query metrics when query is a hit + window.postMessage({ + type: 'query', + time: cacheHitResponseTime, + date: startDate.toDateString().slice(0, 24), + query: query, + hit: true + }); - console.log( - "From cacheRead: Here's the response time on the front end: ", - cacheHitResponseTime - ); - /*chrome.runtime.sendMessage(chromeExtensionId, { - cacheHitResponseTime: cacheHitResponseTime, - });*/ return new Promise((resolve, reject) => resolve(resObj)); } // execute graphql fetch request if cache miss return new Promise((resolve, reject) => resolve(hunt(query))); - // when cacheRead set to false } - if (!cacheRead) { + // when cacheRead set to false & not using client-side cache + if (!cacheRead || !caching) { return new Promise((resolve, reject) => resolve(hunt(query))); } - // when cache miss or on intervals + // function to be called on cache miss or on intervals or not looking in the cache async function hunt(query) { - if (wholeQuery) query = insertTypenames(query); + if (!wholeQuery) query = insertTypenames(query); try { - // send fetch request with query - const resJSON = await fetch(endpoint, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - Accept: 'application/json', - }, - body: JSON.stringify({ query }), - }); + let resJSON; + // IF WE ARE USING PERSIST QUERIES + if (persistQueries) { + // SEND THE HASH + const hash = sha256(query, 'utf8', 'hex'); + resJSON = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Accept: 'application/json', + }, + body: JSON.stringify({ hash }), + }); + + // IF HASH WAS NOT FOUND IN HASH TABLE + if (resJSON.status === 204) { + // SEND NEW REQUEST WITH HASH AND QUERY + resJSON = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Accept: 'application/json', + }, + body: JSON.stringify({ hash, query }), + }); + + } + + // IF WE ARE NOT USING PERSIST QUERIES + } else { + // JUST SEND THE QUERY ONLY + resJSON = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Accept: 'application/json', + }, + body: JSON.stringify({ query }), + }); + } + const resObj = await resJSON.json(); const deepResObj = { ...resObj }; // update result in cache if cacheWrite is set to true - if (cacheWrite && resObj.data[Object.keys(resObj.data)[0]] !== null) { - if (!wholeQuery) cache.writeWholeQuery(query, deepResObj); + if (cacheWrite && caching && resObj.data[Object.keys(resObj.data)[0]] !== null) { + if (wholeQuery) cache.writeWholeQuery(query, deepResObj); else if(resObj.data[Object.keys(resObj.data)[0]].length > cache.capacity) console.log('Please increase cache capacity'); - else cache.write(query, deepResObj); + else cache.write(query, deepResObj, searchTerms); } const cacheMissResponseTime = Date.now() - startTime; - /*chrome.runtime.sendMessage(chromeExtensionId, { - cacheMissResponseTime: cacheMissResponseTime, - });*/ - resObj['time'] = cacheMissResponseTime - console.log( - "After the hunt: Here's the response time on the front end: ", - cacheMissResponseTime - ); + + // FOR DEVTOOL - sends message to content.js when query is a miss + window.postMessage({ + type: 'query', + time: cacheMissResponseTime, + date: startDate.toDateString().slice(0, 24), + query: query, + hit: false + }); + return resObj; } catch (e) { console.log(e); @@ -121,20 +178,19 @@ function ObsidianWrapper(props) { cache.cacheClear(); } + // NOTE - FOR DEVTOOL - no messages are currently being passed for mutations + // so some logic in content.js and background.js may be missing to handle mutations + // breaking out writethrough logic vs. non-writethrough logic async function mutate(mutation, options = {}) { - // dev tool messages - // chrome.runtime.sendMessage(chromeExtensionId, { - // mutation: mutation, - // }); const startTime = Date.now(); mutation = insertTypenames(mutation); const { endpoint = '/graphql', - cacheWrite = true, + cacheWrite = !caching ? false : true, toDelete = false, update = null, - writeThrough = true, // not true + writeThrough = true, // unsure if boolean is symantically backwards or not } = options; try { if (!writeThrough) { @@ -147,9 +203,6 @@ function ObsidianWrapper(props) { endpoint ); const deleteMutationResponseTime = Date.now() - startTime; - chrome.runtime.sendMessage(chromeExtensionId, { - deleteMutationResponseTime: deleteMutationResponseTime, - }); return responseObj; } else { // for add mutation @@ -168,15 +221,9 @@ function ObsidianWrapper(props) { // GQL call to make changes and synchronize database console.log('WriteThrough - false ', responseObj); const addOrUpdateMutationResponseTime = Date.now() - startTime; - chrome.runtime.sendMessage(chromeExtensionId, { - addOrUpdateMutationResponseTime: addOrUpdateMutationResponseTime, - }); return responseObj; } } else { - // copy-paste mutate logic from 4. - - // use cache.write instead of cache.writeThrough const responseObj = await fetch(endpoint, { method: 'POST', headers: { @@ -185,10 +232,10 @@ function ObsidianWrapper(props) { }, body: JSON.stringify({ query: mutation }), }).then((resp) => resp.json()); - if (!cacheWrite) return responseObj; + if (!cacheWrite || !caching) return responseObj; // first behaviour when delete cache is set to true if (toDelete) { - cache.write(mutation, responseObj, true); + cache.write(mutation, responseObj, searchTerms, true); return responseObj; } // second behaviour if update function provided @@ -196,7 +243,7 @@ function ObsidianWrapper(props) { update(cache, responseObj); } - if(!responseObj.errors) cache.write(mutation, responseObj); + if(!responseObj.errors) cache.write(mutation, responseObj, searchTerms); // third behaviour just for normal update (no-delete, no update function) console.log('WriteThrough - true ', responseObj); return responseObj; diff --git a/README.md b/README.md index 22ea69e..224f019 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Obsidian](./assets/logoSilver.jpg) +![Obsidian](./assets/bannerfull_gradient.png)
GraphQL, built for Deno.
@@ -9,8 +9,6 @@ Tweet -

from Lascaux

-

@@ -22,9 +20,12 @@ ## Features -- (New!) Server-side cache invalidation only on affected entries -- (New!) Flexible cache responds with only data requested from selected fields -- (New!) Developer tool for Obsidian is now updated to Manifest version 3 and invalid Bootstrap module imports were also fixed along with CodeMirror dependencies +- (New!) Support for W-TinyLFU client-side cache that brings great hit-ratio performance with minimal memory overhead +- (New!) Option to provide Obsidian with the search types your application uses, allowing data cached from complete dataset pulls to be accessible later on in searches for individual items +- (New!) Refactored server-side caching with Redis +- (New!) Rebuilt developer tool for Obsidian 8.0 for testing and analytics related to the new client caching options +- (New!) Option for persistent queries, allowing only a smaller hash to be sent to the server on client-side cache misses, minimizing the cost of queries. Note that while this will increase the overall performance for frequent, repeat queries. +- Flexible cache responds only with data requested from selected fields - GraphQL query abstraction and caching improving the performance of your app - SSR React wrapper, allowing you to cache in browser - Configurable caching options, giving you complete control over your cache @@ -60,21 +61,37 @@ const GraphQLRouter = (await ObsidianRouter) < ObsRouter > { - Router, - typeDefs: types, - resolvers: resolvers, - redisPort: 6379, //Desired redis port - useCache: true, //Boolean to toggle all cache functionality - usePlayground: true, //Boolean to allow for graphQL playground - useQueryCache: true, //Boolean to toogle full query cache - useRebuildCache: true, //Boolean to toggle rebuilding from normalized data - customIdentifier: ['id', '__typename'], - mutationTableMap = {}, //Object where keys are add mutation types and value is an array of affected tables (e.g. {addPlants: ['plants'], addMovie: ['movies']}) + Router, // your router in deno + typeDefs: types, // graphQL typeDefs + resolvers: resolvers, // graphQL resolvers }; -// attach the graphql routers routes to our app +// attach the graphql router's routes to your deno app app.use(GraphQLRouter.routes(), GraphQLRouter.allowedMethods()); ``` +## Selecting options for the Router +```javascript +const GraphQLRouter = + (await ObsidianRouter) < + ObsRouter > + { + Router, // Router that is initialized by server. + path: '/graphql', // endpoint for graphQL queries, default to '/graphql' + typeDefs: types, // graphQL typeDefs + resolvers: resolvers, // graphQL resolvers + usePlayground: true, // Boolean to allow for graphQL playground, default to false + useCache: true, // Boolean to toggle all cache functionality, default to true + redisPort: 6379, // Desired redis port, default to 6379 + policy: 'allkeys-lru', // Option select your Redis policy, default to allkeys-lru + maxmemory: '2000mb', // Option to select Redis capacity, default to 2000mb + searchTerms: [] //Optional array to allow broad queries to store according to search fields so individual searches are found in cache + persistQueries: true, //Boolean to toggle the use of persistent queries, default to false - NOTE: if using, must also be enabled in client wrapper + hashTableSize: 16, // Size of hash table for persistent queries, default to 16 + maxQueryDepth: 0, // Maximum depth of query, default to 0 + customIdentifier: ['__typename', '_id'], // keys to be used to idedntify and normalize object + mutationTableMap: {}, //Object where keys are add mutation types and value is an array of affected tables (e.g. {addPlants: ['plants'], addMovie: ['movies']}) + }; +``` ## Creating the Wrapper @@ -90,6 +107,20 @@ const App = () => { }; ``` +## Selecting options for the Wrapper + +```javascript + + + +``` + ## Making a Query ```javascript @@ -151,46 +182,52 @@ const MovieApp = () => { ); } ``` +## Setting up Redis -## Selecting LFU/LRU and capacity; default (if not provided) LFU, 2000 +In order to utilize server side caching, a Redis instance must be available and running. Redis installation and quick-start documentation can be found [here](https://redis.io/docs/getting-started/). Make sure to keep a redis instance running whenever the application is utilizing server side caching to avoid running into issues. + +To connect Obsidian to Redis, create a .env file in the root directory of the application with the following information: ```javascript - - - +REDIS_HOST= //string of redis host name, typically defaulted to '127.0.0.1' by Redis ``` +Be sure to also specify the Redis TCP port by passing in the port number as an argument into Obsidian Router (see Selecting options for the Router above). + ## Documentation -[obsidian.land](http://obsidian.land) +[getobsidian.io](http://getobsidian.io/) ## Developer Tool Information and instructions on how to use our developer tool can be found here
-works with Obsidian 5.0
-[oslabs-beta/obsidian-developer-tool](https://github.com/oslabs-beta/obsidian-developer-tool) +works with Obsidian 8.0
+[open-source-labs/obsidian-developer-tool](https://github.com/open-source-labs/obsidian-developer-tool) -## Obsidian 5.0 Demo +## Obsidian 8.0 Demo Github for a demo with some example code to play with:
-[oslabs-beta/obsidian-demo-5.0](https://github.com/oslabs-beta/obsidian-demo-5.0) - -## Dockerized Demo - -Working demo to install locally in docker: -[oslabs-beta/obsidian-demo-docker](https://github.com/oslabs-beta/obsidian-demo-docker) +[oslabs-beta/obsidian-demo-8.0](https://github.com/oslabs-beta/obsidian-8.0-demo) ## Features In Progress -- Ability to query the database for only those fields missing from the cache -- Developer Tool Settings component, fully functioning Playground component +- Server-side caching improvements +- More comprehensive mutation support +- searchTerms option optimization +- Ability to store/read only the whole query +- Hill Climber optimization for W-TinyLFU cache size allocation +- Developer Tool server-side cache integration +- Developer Tool View Cache component, and Playground component ## Authors - -[Alex Lopez](https://github.com/AlexLopez7) -[Kevin Huang](https://github.com/kevin-06-huang) -[Matthew Weisker](https://github.com/mweisker) -[Ryan Ranjbaran](https://github.com/ranjrover) +[David Kim](https://github.com/davidtoyoukim) +[David Norman](https://github.com/DavidMNorman) +[Eileen Cho](https://github.com/exlxxn) +[Joan Manto](https://github.com/JoanManto) +[Alex Lopez](https://github.com/AlexLopez7) +[Kevin Huang](https://github.com/kevin-06-huang) +[Matthew Weisker](https://github.com/mweisker) +[Ryan Ranjbaran](https://github.com/ranjrover) [Derek Okuno](https://github.com/okunod) [Liam Johnson](https://github.com/liamdimitri) [Josh Reed](https://github.com/joshreed104) diff --git a/assets/Obsidian_New.png b/assets/Obsidian_New.png new file mode 100644 index 0000000..d6ebfd1 Binary files /dev/null and b/assets/Obsidian_New.png differ diff --git a/assets/Obsidian_New_nobg.png b/assets/Obsidian_New_nobg.png new file mode 100644 index 0000000..dc5557a Binary files /dev/null and b/assets/Obsidian_New_nobg.png differ diff --git a/assets/banner_black.png b/assets/banner_black.png new file mode 100644 index 0000000..6360a18 Binary files /dev/null and b/assets/banner_black.png differ diff --git a/assets/banner_gradient.png b/assets/banner_gradient.png new file mode 100644 index 0000000..67a75a4 Binary files /dev/null and b/assets/banner_gradient.png differ diff --git a/assets/bannerfull_gradient.png b/assets/bannerfull_gradient.png new file mode 100644 index 0000000..6472b07 Binary files /dev/null and b/assets/bannerfull_gradient.png differ diff --git a/assets/bannerfull_mutegradient.png b/assets/bannerfull_mutegradient.png new file mode 100644 index 0000000..7cccf81 Binary files /dev/null and b/assets/bannerfull_mutegradient.png differ diff --git a/assets/full_black.png b/assets/full_black.png new file mode 100644 index 0000000..cad9aa5 Binary files /dev/null and b/assets/full_black.png differ diff --git a/assets/logoSilver.jpg b/assets/logoSilver.jpg deleted file mode 100644 index 215115e..0000000 Binary files a/assets/logoSilver.jpg and /dev/null differ diff --git a/assets/logo_whitebg.png b/assets/logo_whitebg.png new file mode 100644 index 0000000..b0890b4 Binary files /dev/null and b/assets/logo_whitebg.png differ diff --git a/documentation/browserCache/cache-documentation.js b/documentation/browserCache/cache-documentation.js deleted file mode 100644 index 9fbca09..0000000 --- a/documentation/browserCache/cache-documentation.js +++ /dev/null @@ -1,689 +0,0 @@ -// SCHEMA EXAMPLE ===================================================== -// sample schema for examples - -const typeDefs = gql` - enum MovieGenre { - ACTION - SCIFI - DRAMA - } - enum releaseYearOrder { - LATESTFIRST - EARLIESTFIRST - } - - enum alphabeticalOrder { - ASCENDING - DESCENDING - } - - type Movie { - id: ID! - title: String! - releaseYear: Int! - actors: [Actor] - genre: MovieGenre! - isFavorite: Boolean! - } - - type Actor { - id: ID! - firstName: String! - lastName: String! - age: Int! - films: [Movie]! - isFavorite: Boolean! - } - - input MovieInput { - genre: MovieGenre - order: releaseYearOrder - } - input ActorInput { - orderFirstName: alphabeticalOrder - orderLastName: alphabeticalOrder - } - input newMovieInput { - title: String! - releaseYear: Int! - genre: MovieGenre! - } - - type Query { - movie(id: ID!): Movie! - movies(input: MovieInput): [Movie]! - actor(id: ID!): Actor! - actors(input: ActorInput): [Actor]! - } - - type Mutation { - addMovie(input: NewMovieInput): Movie! - favoriteMovie(id: ID!): Movie! - favoriteActor(id: ID!): Actor! - } -`; - -// QUERY EXAMPLES ================================================================= - -// EXAMPLE 1 -// ================================================================================ -// sample query to show how the cachee stores basic queries - -const ALL_MOVIES = gql` - query AllMovies { - movies { - id - title - actors { - id - firstName - } - } - } -`; -const respAllMovies = { - data: { - movies: [ - { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '2', firstName: 'Sean' }, - ], - }, - { - id: '2', - title: 'Empire Strikes Back', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '3', firstName: 'Mark' }, - ], - }, - { - id: '3', - title: 'Witness', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '4', firstName: 'Patti' }, - ], - }, - { - id: '4', - title: 'Air Force One', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '5', firstName: 'Gary' }, - ], - }, - ], - }, -}; - -const cache = { - ROOT_QUERY: { - movies: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~4'], - }, - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - }, - 'Movie~3': { id: '3', title: 'Witness', actors: ['Actor~1', 'Actor~4'] }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - actors: ['Actor~1', 'Actor~5'], - }, - 'Actor~1': { id: '1', firstName: 'Harrison' }, - 'Actor~2': { id: '2', firstName: 'Sean' }, - 'Actor~3': { id: '3', firstName: 'Mark' }, - 'Actor~4': { id: '4', firstName: 'Patti' }, - 'Actor~5': { id: '4', firstName: 'Gary' }, -}; - -// EXAMPLE 2 -// ================================================================================ -// sample query to show how the cache stores queries with arguments - -const ALL_ACTION_MOVIES = gql` - query AllActionMovies { - movies(input: { genre: ACTION }) { - id - title - genre - releaseYear - } - } -`; - -const respAllActionMovies = { - data: { - movies: [ - { - id: '1', - title: 'Indiana Jones and the Last Crusade', - genre: 'ACTION', - releaseYear: 1989, - }, - { - id: '4', - title: 'Air Force One', - genre: 'ACTION', - releaseYear: 1997, - }, - ], - }, -}; - -const cache2 = { - ROOT_QUERY: { - movies: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~4'], - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], // Added - }, - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', // Added - releaseYear: 1989, // Added - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - }, - 'Movie~3': { id: '3', title: 'Witness', actors: ['Actor~1', 'Actor~4'] }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - actors: ['Actor~1', 'Actor~5'], - genre: 'ACTION', // Added - releaseYear: 1997, // Added - }, - 'Actor~1': { id: '1', firstName: 'Harrison' }, - 'Actor~2': { id: '2', firstName: 'Sean' }, - 'Actor~3': { id: '3', firstName: 'Mark' }, - 'Actor~4': { id: '4', firstName: 'Patti' }, - 'Actor~5': { id: '5', firstName: 'Gary' }, -}; - -// EXAMPLE 3 -// ================================================================================ -// Another sample query to show how the cacbe stores queries with arguments and preserves order of response data - -const ALL_MOVIES_CHRONOLOGICAL = gql` - query AllMoviesChronological { - movies(input: { order: EARLIESTFIRST }) { - id - title - releaseYear - } - } -`; - -const respAllMoviesChronological = { - data: { - movies: [ - { - id: '2', - title: 'Empire Strikes Back', - releaseYear: 1980, - }, - { - id: '3', - title: 'Witness', - releaseYear: 1985, - }, - { - id: '1', - title: 'Indiana Jones and the Last Crusade', - releaseYear: 1989, - }, - { - id: '4', - title: 'Air Force One', - releaseYear: 1997, - }, - ], - }, -}; - -const cache3 = { - ROOT_QUERY: { - movies: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~4'], - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - // added - 'movies(input:{order:EARLIESTFIRST})': [ - 'Movie~2', - 'Movie~3', - 'Movie~1', - 'Movie~4', - ], - }, - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', - releaseYear: 1989, - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - releaseYear: 1980, // added - }, - 'Movie~3': { - id: '3', - title: 'Witness', - actors: ['Actor~1', 'Actor~4'], - releaseYear: 1985, // added - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - actors: ['Actor~1', 'Actor~5'], - genre: 'ACTION', - releaseYear: 1997, - }, - 'Actor~1': { id: '1', firstName: 'Harrison' }, - 'Actor~2': { id: '2', firstName: 'Sean' }, - 'Actor~3': { id: '3', firstName: 'Mark' }, - 'Actor~4': { id: '4', firstName: 'Patti' }, - 'Actor~5': { id: '5', firstName: 'Gary' }, -}; - -// EXAMPLE 4 -// ================================================================================ -// Another sample query to show how the cacbe stores queries with arguments and preserves order of response data - -const ALL_ACTORS_ALPHABETICAL_LAST_NAME = gql` - query AllActorsAlphabeticalLastName { - actors(input: { orderLastName: DESCENDING }) { - id - firstName - LastName - } - } -`; - -const respAllActorsAlphabeticalLastName = { - data: { - actors: [ - { - id: '2', - firstName: 'Sean', - lastName: 'Connery', - }, - { - id: '1', - firstName: 'Harrion', - lastName: 'Ford', - }, - { - id: '3', - firstName: 'Mark', - lastName: 'Hamill', - }, - { - id: '4', - firstName: 'Patti', - lastName: 'LuPone', - }, - { - id: '5', - firstName: 'Gary', - lastName: 'Oldman', - }, - ], - }, -}; - -const cache4 = { - ROOT_QUERY: { - movies: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~4'], - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - 'movies(input:{order:EARLIESTFIRST})': [ - 'Movie~2', - 'Movie~3', - 'Movie~1', - 'Movie~4', - ], - // added - 'actors(input:{ orderLastName:DESCENDING})': [ - 'Actor~2', - 'Actor~1', - 'Actor~3', - 'Actor~4', - 'Actor~5', - ], - }, - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', - releaseYear: 1989, - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - releaseYear: 1980, - }, - 'Movie~3': { - id: '3', - title: 'Witness', - actors: ['Actor~1', 'Actor~4'], - releaseYear: 1985, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - actors: ['Actor~1', 'Actor~5'], - genre: 'ACTION', - releaseYear: 1997, - }, - 'Actor~1': { id: '1', firstName: 'Harrison', lastName: 'Ford' }, //added lastName to actors - 'Actor~2': { id: '2', firstName: 'Sean', lastName: 'Connery' }, - 'Actor~3': { id: '3', firstName: 'Mark', lastName: 'Hamill' }, - 'Actor~4': { id: '4', firstName: 'Patti', lastName: 'LuPone' }, - 'Actor~5': { id: '5', firstName: 'Gary', lastName: 'Oldman' }, -}; - -// EXAMPLE 5 -// ================================================================================ -// A sample query by id that we might want to create soecial logic for to save network requests - -const GET_ACTOR_BY_ID = gql` - query getActorById { - actor(id: 1) { - id - firstName - LastName - } - } -`; - -const respGetActorById = { - data: { - actor: [ - { - id: '1', - firstName: 'Harrion', - lastName: 'Ford', - }, - ], - }, -}; - -// is there any way to stop this request from going to the server and just serve from the cache if we have all the information??? -// do we ant to hard code specialized check for arguments that are just a single id???? - -const cache5 = { - ROOT_QUERY: { - 'actor(id:1)': 'Actor~1', // Added CAN WE STOP IT? - movies: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~4'], - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - 'movies(input:{order:EARLIESTFIRST})': [ - 'Movie~2', - 'Movie~3', - 'Movie~1', - 'Movie~4', - ], - 'actors(input:{ orderLastName:DESCENDING})': [ - 'Actor~2', - 'Actor~1', - 'Actor~3', - 'Actor~4', - 'Actor~5', - ], - }, - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', - releaseYear: 1989, - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - releaseYear: 1980, - }, - 'Movie~3': { - id: '3', - title: 'Witness', - actors: ['Actor~1', 'Actor~4'], - releaseYear: 1985, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - actors: ['Actor~1', 'Actor~5'], - genre: 'ACTION', - releaseYear: 1997, - }, - 'Actor~1': { id: '1', firstName: 'Harrison', lastName: 'Ford' }, - 'Actor~2': { id: '2', firstName: 'Sean', lastName: 'Connery' }, - 'Actor~3': { id: '3', firstName: 'Mark', lastName: 'Hamill' }, - 'Actor~4': { id: '4', firstName: 'Patti', lastName: 'LuPone' }, - 'Actor~5': { id: '5', firstName: 'Gary', lastName: 'Oldman' }, -}; - -// EXAMPLE 6 -// ================================================================================ -// The following queries should be able to be served from the cache without making a network request - -const ALL_MOVIES_WITH_RELEASE_YEAR = gql` - query AllMoviesWithReleaseYear { - movies { - id - title - releaseYear - } - } -`; - -const ALL_MOVIES_WITH_ACTOR_LAST_NAMES = gql` - query AllMoviesWithActorLastNames { - movies { - id - title - actors { - id - lastName - } - } - } -`; - -// MUTATIONS - -// EXAMPLE 7 -// ================================================================================ -// simple update example the cache would automatically update - -const ADD_FAVORITE_MOVIE = gql` - mutation AddFavoriteMovie { - favoriteMovie(id: 2) { - id - isFavorite - } - } -`; - -const respAddFavoriteMovie = { - data: { - favoriteMovie: [ - { - id: '2', - isFavorite: true, - }, - ], - }, -}; - -const cache6 = { - ROOT_QUERY: { - 'actor(id:1)': 'Actor~1', - movies: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~4'], - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - 'movies(input:{order:EARLIESTFIRST})': [ - 'Movie~2', - 'Movie~3', - 'Movie~1', - 'Movie~4', - ], - 'actors(input:{ orderLastName:DESCENDING})': [ - 'Actor~2', - 'Actor~1', - 'Actor~3', - 'Actor~4', - 'Actor~5', - ], - }, - ROOT_MUTATION: { - 'favoriteMovie(id:2)': 'Movie~2', // Added - }, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', - releaseYear: 1989, - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - releaseYear: 1980, - isFavorite: true, // Added - }, - 'Movie~3': { - id: '3', - title: 'Witness', - actors: ['Actor~1', 'Actor~4'], - releaseYear: 1985, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - actors: ['Actor~1', 'Actor~5'], - genre: 'ACTION', - releaseYear: 1997, - }, - 'Actor~1': { id: '1', firstName: 'Harrison', lastName: 'Ford' }, - 'Actor~2': { id: '2', firstName: 'Sean', lastName: 'Connery' }, - 'Actor~3': { id: '3', firstName: 'Mark', lastName: 'Hamill' }, - 'Actor~4': { id: '4', firstName: 'Patti', lastName: 'LuPone' }, - 'Actor~5': { id: '5', firstName: 'Gary', lastName: 'Oldman' }, -}; - -// EXAMPLE 8 -// ================================================================================ -// add movie mutation example: the returned data would automically be cached. -// but the developer would have to assist in adding the movie to the appropriate spot in the root queries. - -const ADD_MOVIE = gql` - mutation AddMovie { - addMovie(input: {title: 'The Fugitive', releaseYear: 1993, genre: ACTION }) { - id - title - releaseYear - genre - isFavorite - } - } -`; - -const respAddMovie = { - data: { - addMovie: [ - { - id: '5', - title: 'The Fugitive', - releaseYear: 1993, - genre: 'ACTION', - isFavorite: false, - }, - ], - }, -}; - -const cache7 = { - ROOT_QUERY: { - 'actor(id:1)': 'Actor~1', - movies: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~4', 'Movie~5'], // Added added new movie with help from developer - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4', 'Movie~5'], // Added added new movie with help from developer - // Added new movie with help from developer - 'movies(input:{order:EARLIESTFIRST})': [ - 'Movie~2', - 'Movie~3', - 'Movie~1', - 'Movie~5', - 'Movie~4', - ], - 'actors(input:{ orderLastName:DESCENDING})': [ - 'Actor~2', - 'Actor~1', - 'Actor~3', - 'Actor~4', - 'Actor~5', - ], - }, - ROOT_MUTATION: { - 'favoriteMovie(id:2)': 'Movie~2', - "addMovie(input: {title: 'The Fugitive', releaseYear: 1993, genre: ACTION })": - 'Movie~5', - }, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', - releaseYear: 1989, - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - releaseYear: 1980, - isFavorite: true, - }, - 'Movie~3': { - id: '3', - title: 'Witness', - actors: ['Actor~1', 'Actor~4'], - releaseYear: 1985, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - actors: ['Actor~1', 'Actor~5'], - genre: 'ACTION', - releaseYear: 1997, - }, - // Added - 'Movie~5': { - id: '5', - title: 'The Fugitive', - genre: 'ACTION', - releaseYear: 1993, - isFavorite: false, - }, - 'Actor~1': { id: '1', firstName: 'Harrison', lastName: 'Ford' }, - 'Actor~2': { id: '2', firstName: 'Sean', lastName: 'Connery' }, - 'Actor~3': { id: '3', firstName: 'Mark', lastName: 'Hamill' }, - 'Actor~4': { id: '4', firstName: 'Patti', lastName: 'LuPone' }, - 'Actor~5': { id: '5', firstName: 'Gary', lastName: 'Oldman' }, -}; diff --git a/documentation/browserCache/cache-refactor-doc.js b/documentation/browserCache/cache-refactor-doc.js deleted file mode 100644 index 19b975c..0000000 --- a/documentation/browserCache/cache-refactor-doc.js +++ /dev/null @@ -1,94 +0,0 @@ -/** - * GOALS - * 1. refactor all cache related functionality as methods on Cache object - * 2. Create a one source of truth for the naming of those different methods and defining their agruments, return, other functionality - * 3. Create a template for refactoring our cache interaction so that it will work for both redis cache and the object cache - * - */ - -// CLIENT-SIDE OBJECT CACHE VS SERVER-SIDE REDIS CACHE -/** - * NOTES: - * 1. Any direct reads/writes of the cache will be replaced with the cacheRead and cacheWrite methods. - * 2. These methods will be dual purpose functions that will read and write key value pairs from the client/side Cache - * 3. Both will first have a check to see if we are in the client side cache or server side cache - * 4. Client-Side: will read/write to cache.storage via normal object lookup/assignment - * 5. Server-Side: will read/write to redis a JSON.stringified version of the value using redis methods - * 6. See Obsidian 1.0's dbOps for reference/inspiration - * 7. Reading or writing to the Root_Query or Root_Mutation will now be a 2 part process via cacheRead, cacheWrite: - * first retrieving the entire ROOT_QUERY object from the cache and then reading values from that object. - * 8. We can no longer pass the cache in as an argument, clone the cache or update the cache with Object.assign - * - */ - -// Cache constructor - -class Cache { - constructor( - cache = { - ROOT_QUERY: {}, - ROOT_MUTATION: {}, - } - ) { - this.storage = cache; - this.context = window.deno ? 'server' : 'client'; - } - - // Main functionality methods - read(qryStr) { - // readCache; returns gql response object || undefined - } - write(qryStr, respObj) { - // writeCache; updates cache with all data from response object - } - delete(qryStr, respObj) { - // deleteCache; sets any top level hashed values of response object to 'DELETE' - } - gc() { - // garbageCollection; garbage collection: removes any inaccessible hashes from the cache - } - - // cache read/write helper methods - cacheRead(hash) { - // returns value from either object cache or redis cache || 'DELETED' || undefined - if (this.context === 'client') { - return this.storage[hash]; - } else { - throw Error('Redis functionality has not been implemented'); - } - } - cacheWrite(hash, value) { - // writes value to object cache or JSON.stringified value to redis cache - if (this.context === 'client') { - this.storage[hash] = value; - } else { - throw Error('Redis functionality has not been implemented'); - } - } - cacheDelete(hash) { - // deletes the hash/value pair on either object cache or redis cache - if (this.context === 'client') { - delete this.storage[hash]; - } else { - throw Error('Redis functionality has not been implemented'); - } - } - cacheClear(hash) { - // erases either object cache or redis cache - if (this.context === 'client') { - this.storage = {}; - } else { - throw Error('Redis functionality has not been implemented'); - } - } -} - -/** - * OPEN QUESTIONS - * (We think no) 1. Do we need any more than two functions for cache read/write. why does Obsidian 1.0 have 4??? - * (yes, maybe) 2. Can we utilize helper methods in our main methods (will we have to worry about binding context?) - * (could maybe explore 2020) 3. We will be exposing all these methods, when we probably only want to expose some to the devleloper. Is this an issue? - * 4. Will we run into any issues with live incremental updates of the cache object (always the same reference)? - * (how will React know the cache has been updated?) - * - */ diff --git a/documentation/browserCache/destructure-documentation.js b/documentation/browserCache/destructure-documentation.js deleted file mode 100644 index de8f99f..0000000 --- a/documentation/browserCache/destructure-documentation.js +++ /dev/null @@ -1,256 +0,0 @@ -// DESTRUCTURE - -destructureQueries(query, ObsidianSchema, cache); - -// INPUT: - -const query = gql` - { - Country(_id: "4425") { - _id - name - population - flag { - _id - - emoji - } - - borders { - _id - name - capital - } - } - } -`; - -const obsidianSchema = { - returnTypes: { - Country: { - kind: 'NamedType', - type: 'Country', - }, - }, - argTypes: { - Country: { _id: 'ID' }, - }, - obsidianTypeSchema: { - Country: { - borders: { type: 'Country', scalar: false }, - capital: { type: 'String', scalar: true }, - flag: { type: 'Flag', scalar: false }, - name: { type: 'String', scalar: true }, - population: { type: 'Int', scalar: true }, - _id: { type: 'ID', scalar: true }, - }, - Flag: { - emoji: { type: 'String', scalar: true }, - _id: { type: 'ID', scalar: true }, - }, - }, -}; - -const cache = { - 'Country(_id:"4425"){_idnamepopulationflag{_idemoji}borders{_idnamecapital}}': { - 'Country~4425~name': true, - 'Country~4425~population': true, - 'Country~4425~flag': true, - 'Country~4425~borders': true, - }, - 'Country~860~capital': 'Ottawa', - 'Country~860~name': 'Canada', - 'Country~2741~capital': 'Mexico City', - 'Country~2741~name': 'Mexico', - 'Country~4425~borders': { 'Country~2741': true, 'Country~860': true }, - 'Country~4425~flag': 'Flag~4440', - 'Country~4425~name': 'United States of America', - 'Country~4425~population': 323947000, - 'Flag~4440~emoji': '🇺🇸', -}; - -// PROCESS: -// finds the specific queries if there is more than one -findSpecificQueries; -// checks to see if the fields on each querie are currerently stored in the cache -createQueryObj; -// converts the query string into a query object for reference -// creates a hash array from the keys on queryHashes -buildResultsObject; -// attempts to build result object by comparing the cache, queryObj, and hashes - -// OUTPUT: - -// if any part of the query string is a mutation return 'mutation'??? - -// if everything is not found in cache return undefined; - -// if everything is found in cache -const obsidianReturn = { - data: { - Country: [ - { - borders: [ - { _id: '2741', name: 'Mexico', capital: 'Mexico City' }, - { _id: '860', name: 'Canada', capital: 'Ottawa' }, - ], - flag: { - emoji: '', - _id: '4440', - }, - _id: '4425', - name: 'United States of America', - population: 323947000, - }, - ], - }, -}; - -// ===================================================================== -queryHashes = findSpecificQueries(query, obsidianSchema, cache); -// INPUT: nothing new - -// PROCESS: -findqueryname; // finds the first query name -// iterate through the rest of the query to find all of the other querie names and hash them onto the queryHashes object -specificQueryParser; -// this is what query hashes looks like now -queryHashes = { - Country: - 'Country(_id:"4425"){_idnamepopulationflag{_idemoji}borders{_idnamecapital}}', -}; -checkAndRetrieveHash; - -// OUTPUT: -const queryHashes = { Country: undefined }; // if cache does not have everything -// if cache has everything -const queryHashes = { - Country: { - 'Country~4425~borders': true, - 'Country~4425~flag': true, - 'Country~4425~name': true, - 'Country~4425~population': true, - }, -}; - -// ===================================================================== -const nameOfQuery = findQueryName(query); - -// INPUT: nothing new - -// PROCESS: -// parsing function to find the name of the query - -// OUTPUT: -const nameOfQuery = 'Country'; -// will return 'mutation' if it's a mutation query; this will casue the destucture to break out - -// ========================================================== -const next = specificQueryParser(startIndexOfName, query); - -// INPUT: startIndexOfName is where we left off in the query parsing -const startIndexOfName = 9; -// PROCESS: -// parses individual queries into minified strings and finds the end index -// OUTPUT: -const next = { - output: - 'Country(_id:"4425"){_idnamepopulationflag{_idemoji}borders{_idnamecapital}}', - endIdx: 256, -}; - -// ======================================================================== - -redisResults[queryHash] = checkAndRetrieveQuery(queryHashes[queryHash], cache); - -// INPUT: individual query hash for one query and the cache - -// PROCESS: -// checks to see if the minified query string is stored in cache, if so return corresp[ponding object - -// OUTPUT: -// if cache doesn't have everything return undefined - -// if cache has everything -const redisResults = { - 'Country~4425~name': true, - 'Country~4425~population': true, - 'Country~4425~flag': true, - 'Country~4425~borders': true, -}; - -// ======================================================================= -const queryObj = createQueryObj(queryName, query, obsidianSchema); - -// INPUT: nothing special - -// PROCESS: -// parses query string and converts it to query object -// NOTES: -// this seems to be the first place they actually use the schema. do they need the schema? -// There is a note here about not supporting any paramters other than ID?? -// We should look at this closer later and see if there is abetter way; -// OUTPUT: - -const queryObj = { - queryName: 'Country', - paramaters: { _id: '"4425"' }, - properties: { - _id: true, - name: true, - population: true, - flag: { _id: true, emoji: true }, - borders: { _id: true, name: true, capital: true }, - }, -}; - -// ========================================================================== -result.data[queryName] = await buildResultsObject( - hashes, - obsidianSchema, - queryObj, - cache -); - -// INPUT: -const hashes = [ - 'Country~4425~borders', - 'Country~4425~flag', - 'Country~4425~name', - 'Country~4425~population', -]; - -// PROCESS: -// parsing to store the 3 parts of a hash as variables -retrieveScalar; -restrieveComplex; -batchHash; -nestedPropertyHashConstructor; -// finds the corresponding values in the cache and stores them as appropriate on the passed in results object -// Notes: -// this is also using the schema? is there a better way? -// this is complex parsing? should definitly take another look at some point -// OUTPUT: - -// if not able to find everything return undefined - -// if able to find everything in cache -const obsidianReturn = { - data: { - Country: [ - { - borders: [ - { _id: '2741', name: 'Mexico', capital: 'Mexico City' }, - { _id: '860', name: 'Canada', capital: 'Ottawa' }, - ], - flag: { - emoji: '', - _id: '4440', - }, - _id: '4425', - name: 'United States of America', - population: 323947000, - }, - ], - }, -}; diff --git a/documentation/browserCache/garbage-collection-doc.js b/documentation/browserCache/garbage-collection-doc.js deleted file mode 100644 index b3dad76..0000000 --- a/documentation/browserCache/garbage-collection-doc.js +++ /dev/null @@ -1,123 +0,0 @@ -/** - * NOTES: - * 1. is a method on the cache object that removes all references to DELETED and inaccessible hashes - * 2. a reference is considered inaccessible if there is no way to access it from any of the root queries - * 3. How/when should this be called? - * after every query/mutation? - * on some sort of time interval? - * only when the developer asks for it? - * when the cache reaches a certain size? - */ -class Cache { - gc() { - // where the magic happens - } -} - -/** - * Possible high-level approach - * 1. iterate through all the hashes and generate a Set of all the deleted hashes. - * 2. delete those hashes - * 3. iterate through all of the non-wholeQuery ROOT_QUERIES - * - remove any hash reference that is a member of the deleted hash Set - * - for any hash reference that has not been deleted - * - add that hash to a Set of accessible hashes - * - recursively trace that hash and continue removing any deleted hash references and updating the Set of accesible hashes - * 4. remove any hashes that are not a member of the accessible hash Set - */ - -// EXAMPLE =========================================================================================== -const cacheBeforeGC = { - ROOT_QUERY: { - 'actor(id:1)': 'Actor~1', - favoriteMovies: ['Movie~1', 'Movie~2', 'Movie~3'], // includes reference to deleted hash - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~3', 'Movie~5'], // includes reference to deleted hash - }, - ROOT_MUTATION: { - 'favoriteMovie(id:2)': 'Movie~2', - "addMovie(input: {title: 'The Fugitive', releaseYear: 1993, genre: ACTION })": - 'Movie~5', - 'deleteMovie(id:3)': 'Movie~3', - }, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', - releaseYear: 1989, - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - releaseYear: 1980, - isFavorite: true, - }, - // DELETED - 'Movie~3': 'DELETED', - 'Movie~5': { - id: '5', - title: 'The Fugitive', - genre: 'ACTION', - releaseYear: 1993, - isFavorite: false, - }, - 'Actor~1': { - id: '1', - firstName: 'Harrison', - lastName: 'Ford', - films: ['Movie~1', 'Movie~2', 'Movie~3', 'Movie~5'], // includes reference to deleted hash - }, - 'Actor~2': { id: '2', firstName: 'Sean', lastName: 'Connery' }, - 'Actor~3': { id: '3', firstName: 'Mark', lastName: 'Hamill' }, - 'Actor~4': { id: '4', firstName: 'Patti', lastName: 'LuPone' }, // INACCESSIBLE - 'Actor~5': { id: '5', firstName: 'Gary', lastName: 'Oldman' }, // INACCESSIBLE -}; - -const cacheAfterGC = { - ROOT_QUERY: { - 'actor(id:1)': 'Actor~1', - favoriteMovies: ['Movie~1', 'Movie~2'], // deleted reference removed - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~5'], // deleted reference removed - }, - ROOT_MUTATION: { - 'favoriteMovie(id:2)': 'Movie~2', - "addMovie(input: {title: 'The Fugitive', releaseYear: 1993, genre: ACTION })": - 'Movie~5', - // 'deleteMovie(id:4)': 'Movie~4', // mistake? - 'deleteMovie(id:3)': 'Movie~3', - }, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: ['Actor~1', 'Actor~2'], - genre: 'ACTION', - releaseYear: 1989, - }, - 'Movie~2': { - id: '2', - title: 'Empire Strikes Back', - actors: ['Actor~1', 'Actor~3'], - releaseYear: 1980, - isFavorite: true, - }, - // deleted hash removed - 'Movie~5': { - id: '5', - title: 'The Fugitive', - genre: 'ACTION', - releaseYear: 1993, - isFavorite: false, - }, - 'Actor~1': { - id: '1', - firstName: 'Harrison', - lastName: 'Ford', - films: ['Movie~1', 'Movie~2', 'Movie~5'], // deleted reference removed - }, - 'Actor~2': { id: '2', firstName: 'Sean', lastName: 'Connery' }, - 'Actor~3': { id: '3', firstName: 'Mark', lastName: 'Hamill' }, - // inaccessible hashes removed -}; diff --git a/documentation/browserCache/mutation-documentation.js b/documentation/browserCache/mutation-documentation.js deleted file mode 100644 index dbe2ab4..0000000 --- a/documentation/browserCache/mutation-documentation.js +++ /dev/null @@ -1,310 +0,0 @@ -/** - * NOTES: - * 1. This implementation does not handle variables currently - * 2. This function will always send the inputted mutation operation string to the inputted endpoint - * 3. Once receiving a response object it will have three different behaviors depending on what is passed - * into the options object: - * 1. Will update fields for any elements that we find if the hash is present and not set to 'DELETE'. - * - will not do anything with any fields associated with unknown hashes. - * 2. If the delete flag is set to true, the function will set the value of every top level hash that currently exists to 'DELETE' - * - cache.read() will need to be updated to ignore any hashes with the value 'DELETE' (not treat as cache miss) - * 3. If the update property is set to a function. That function will be executed causing a cache update as specified by the developer. - * - the cache object and respObj will automatically be passed into the update object as arguments - * 4. After implementing garbage collection: This function would invoke gc() every time a mutation is made except when an update function is provided by the developer. - * 5. This implementation would update the cache only if the flag cache is set to true. - * 6. This function takes in a mutation string and an optional options object and returns the response object from the request made. - */ - -function mutate(mutation, options) { - // where the magic happens -} - -// options object -const options = { - endpoint: '/graphql', // the endpoint where the post request with mutation string will be sent; DEFAULT: '/graphql' - cache: true, // flag to enable automatic cache updates; DEFAULT: 'true' - delete: false, // flag the developer can set to indicate delete mutation; DEFAULT: 'false' - update: updateFunc(cache, respObj), // optional update function to customize cache updating behavior; DEFAULT: null -}; - -// EXAMPLES - -// EXAMPLE 1: SIMPLE UPDATE =================================================================================================== - -const cachePreMut = { - ROOT_QUERY: { - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - }, - ROOT_MUTATION: {}, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - genre: 'ACTION', - releaseYear: 1989, - isFavorite: false, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - genre: 'ACTION', - releaseYear: 1997, - isFavorite: false, - }, -}; - -const ADD_FAVORITE_MOVIE = gql` - mutation AddFavoriteMovie { - favoriteMovie(id: 4) { - __typename - id - isFavorite - } - } -`; - -mutate(ADD_FAVORITE_MOVIE); // we don't need an options object since we are using /graphql endpoint - -const respObj = { - data: { - favoriteMovie: { - __typename: 'Movie', - id: '4', - isFavorite: true, - }, - }, -}; - -const cachePostMut = { - ROOT_QUERY: { - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - }, - ROOT_MUTATION: { - 'favoriteMovie(id: 4)': 'Movie~4', - }, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - genre: 'ACTION', - releaseYear: 1989, - isFavorite: false, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - genre: 'ACTION', - releaseYear: 1997, - isFavorite: true, // updated value - }, -}; - -// SPECIAL NOTE: this mutation string would result in no cache change because Movie~2 is not currently cached -const ADD_FAVORITE_MOVIE = gql` - mutation AddFavoriteMovie { - favoriteMovie(id: 2) { - id - isFavorite - } - } -`; - -// EXAMPLE 2: SIMPLE DELETE =================================================================================================== - -const cachePreMut = { - ROOT_QUERY: { - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - }, - ROOT_MUTATION: {}, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - genre: 'ACTION', - releaseYear: 1989, - isFavorite: false, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - genre: 'ACTION', - releaseYear: 1997, - isFavorite: false, - }, -}; - -const DELETE_MOVIE = gql` - mutation DeleteMovie { - deleteMovie(id: 4) { - __typename - id - } - } -`; - -mutate(DELETE_MOVIE, { delete: true }); - -const respObj = { - data: { - deleteMovie: { - __typename: 'Movie', - id: '4', - }, - }, -}; - -const cachePostMut = { - ROOT_QUERY: { - 'movies(input:{genre:ACTION})': ['Movie~1', 'Movie~4'], - }, - ROOT_MUTATION: { - 'deleteMovie(id:4)': 'Movie~4', - }, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - genre: 'ACTION', - releaseYear: 1989, - isFavorite: false, - }, - 'Movie~4': 'DELETED', // Movie~4 set to DELETED -}; - -// SPECIAL NOTE: DELETED hashes will be ignored in future queries and not throw a cache miss - -const ALL_MOVIES = gql` - query movies(input: {genre: ACTION}) { - movies { - __typename - id - title - } - } -`; - -gather(ALL_MOVIES); - -// this will be the response object served for the above query from the cache -const respObj = { - data: { - movies: [ - { - __typename, - id: '1', - title: 'Indiana Jones and the Last Crusade', - }, - ], - }, -}; - -// EXAMPLE 3: SIMPLE CREATE =================================================================================================== - -const ALL_MOVIES_BY_RELEASE_DATE = gql` - query AllMoviesByDate { - movies(sort: { release: ASC }) { - __typename - id - title - releaseYear - genre - isFavorite - } - } -`; - -// cache after the above query -const cachePreMut = { - ROOT_QUERY: { - 'movies(sort:{release:ASC})': ['Movie~1', 'Movie~4'], - }, - ROOT_MUTATION: {}, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - genre: 'ACTION', - releaseYear: 1989, - isFavorite: false, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - genre: 'ACTION', - releaseYear: 1997, - isFavorite: false, - }, -}; - -const ADD_MOVIE = gql` - mutation AddMovie { - addMovie(input: {title: 'The Fugitive', releaseYear: 1993, genre: ACTION }) { - __typename - id - title - releaseYear - genre - isFavorite - } - } -`; - -// developer defined update function to correctly add movies into ALL_MOVIES_BY_RELEASE_DATE query -function movieUpdate(cache, respObj) { - const result = cache.read(ALL_MOVIES_BY_RELEASE_DATE); - const { movies } = result.data; - const newMovie = respObj.data.addMovie; - const updatedMovieArr = movies.push(newMovie).sort((movie1, movie2) => { - return movie1.releaseYear - movie2.releaseYear; - }); - const updatedRespObj = { data: { movies: updatedMovieArr } }; - cache.write(ALL_MOVIES_BY_RELEASE_DATE, updatedRespObj); -} - -mutate(ADD_MOVIE, { update: movieUpdate }); - -const respAddMovie = { - data: { - addMovie: { - __typename: 'Movie', - id: '5', - title: 'The Fugitive', - releaseYear: 1993, - genre: 'ACTION', - isFavorite: false, - }, - }, -}; - -const cachePostMut = { - ROOT_QUERY: { - 'movies(sort:{release:ASC})': ['Movie~1', 'Movie~5', 'Movie~4'], // Movie~5 is slotted into the appropriate place - }, - ROOT_MUTATION: { - "addMovie(input:{title:'TheFugitive',releaseYear:1993,genre:ACTION})": - 'Movie~5', - }, - - 'Movie~1': { - id: '1', - title: 'Indiana Jones and the Last Crusade', - genre: 'ACTION', - releaseYear: 1989, - isFavorite: false, - }, - 'Movie~4': { - id: '4', - title: 'Air Force One', - genre: 'ACTION', - releaseYear: 1997, - isFavorite: false, - }, - // Movie~5 added - 'Movie~5': { - id: '5', - title: 'The Fugitive', - genre: 'ACTION', - releaseYear: 1993, - isFavorite: false, - }, -}; diff --git a/documentation/browserCache/normalize-documentation.js b/documentation/browserCache/normalize-documentation.js deleted file mode 100644 index f6ef50a..0000000 --- a/documentation/browserCache/normalize-documentation.js +++ /dev/null @@ -1,370 +0,0 @@ -//*=========================================================================*// -/* - normalizeResult - - Description: Takes a query, a response object, an obsidianSchema, and a cache - and 'normalizes' (flattens) the data, merges it into the cache, and returns - the updated cache - - Summary: - Breaks response object into various queries - Creates an object of hashes and values from the response object - { - hash: queryStringHashed - value: { - dataHashes: true, - ..., - ... - } - } - Recursively adds all base data to the cache - Checks to see if the hash exists in the cache and returns a new cache -*/ - -normalizeResult(query, result, obsidianSchema, cache) - -// INPUT: -const query = gql` - { - Country(_id: "4425") { - _id - name - population - flag { - _id - emoji - } - borders { - _id - name - capital - } - } - } -`; - -const result = { - data: { - Country: [ - { - _id: '4425', - name: 'United States of America', - population: 323947000, - flag: { - _id: '4440', - emoji:'🇺🇸' - }, - borders: [ - { - capital: "Mexico City", - name: "Mexico", - _id: "2741" - }, - { - capital: "Ottawa", - name: "Canada", - _id: "860", - } - ], - }, - ] - } -}; - -const obsidianSchema = { - returnTypes: { - Country: { - kind: 'NamedType', - type: 'Country', - }, - }, - argTypes: { - Country: { _id: 'ID' }, - }, - obsidianTypeSchema: { - Country: { - borders: { type: 'Country', scalar: false }, - capital: { type: 'String', scalar: true }, - flag: { type: 'Flag', scalar: false }, - name: { type: 'String', scalar: true }, - population: { type: 'Int', scalar: true }, - _id: { type: 'ID', scalar: true }, - }, - Flag: { - emoji: { type: 'String', scalar: true }, - _id: { type: 'ID', scalar: true }, - }, - }, -}; - -const cache = {}; - -// PROCESS: -hashSpecificQuery; // CORE NORMALIZATION FLOW, stores base data and returns queryHash and data -checkAndInsert; // checks if the query string is in the cache, then caches - -// OUTPUT: -const obsidianReturn = [{ - data: { - Country: [ - { - borders: [ - { _id: '2741', name: 'Mexico', capital: 'Mexico City' }, - { _id: '860', name: 'Canada', capital: 'Ottawa' }, - ], - flag: { - emoji: '', - _id: '4440', - }, - _id: '4425', - name: 'United States of America', - population: 323947000, - }, - ], - }, -}]; - -//*=========================================================================*// -/* - hashSpecificQuery - - Description: Takes a query and spits out an object composed of - hashes for that query and values for that query -*/ -async function hashSpecificQuery(queryType, fields, returnTypes, query, obsidianTypeSchema, cache) - -// INPUT: -queryType = 'Country' - -fields = [ - { - _id: '4425', - name: 'United States of America', - population: 323947000, - flag: { - _id: '4440', - emoji:'🇺🇸' - }, - borders: [ - { - capital: "Mexico City", - name: "Mexico", - _id: "2741" - }, - { - capital: "Ottawa", - name: "Canada", - _id: "860", - } - ], - }, -] -returnTypes -query -obsidianTypeSchema -cache - -// PROCESS: -specificQueryParser; -hashAndStoreFields; - -// OUTPUT: -hashedQuery = { - hash: "Country(_id:\"4425\"){_idnamepopulationflag{_idemoji}borders{_idnamecapital}}", - value: { - 'Country~4425~borders': true, - 'Country~4425~flag': true, - 'Country~4425~name': true, - 'Country~4425~population': true - } -} - -//*=========================================================================*// -/* - checkAndInsert - - Description: Checks if the hash exists in the cache, if not then insert the - hash and its value into the cache - - Doesn't appear to upidate previously hashed data -*/ -async function checkAndInsert(hash, value, cache, expiration = 20) - -// INPUT -hash -value -cache -expiration - -// PROCESS -connectFunc - -// OUTPUT -cache = { - "Country(_id:\"4425\"){_idnamepopulationflag{_idemoji}borders{_idnamecapital}}": { - "Country~4425~borders": true, - "Country~4425~flag": true, - "Country~4425~name": true, - "Country~4425~population": true - }, - "Country~860~capital": "Ottawa", - "Country~860~name": "Canada", - "Country~2741~capital": "Mexico City", - "Country~2741~name": "Mexico", - "Country~4425~flag": "Flag~4440", - "Country~4425~name": "United States of America", - "Country~4425~population": 323947000, - "Flag~4440~emoji": "🇺🇸", - "Country~4425~borders": { - "Country~2741": true, "Country~860": true - }, -} - -//*=========================================================================*// -/* - specificQueryParser - - Description: takes a starting index and a query string and returns a - minified query and end index -*/ -specificQueryParser(startIdx, query).output; - -// INPUT -startIdx // starting index of query -query - -// PROCESS: - -// OUTPUT: -output = "Country(_id:\"4425\"){_idnamepopulationflag{_idemoji}borders{_idnamecapital}}" - -//*=========================================================================*// -/* - hashAndStoreFields - - Description: Takes a set of fields, generates hashes with them, gives them a value of true and - and outputs an object -*/ -await hashAndStoreFields(queryType, fields, returnTypes, obsidianTypeSchema, cache); -//INPUT -queryType -fields -returnTypes -obsidianTypeSchema -cache - -//PROCESS -hashAndStoreFieldsOfObject - -//OUTPUT -output = { - 'Country~4425~borders': true, - 'Country~4425~flag': true, - 'Country~4425~name': true, - 'Country~4425~population': true -} - -//*=========================================================================*// -/* - hashAndStoreFieldsOfObject - - Description: Takes in an object of properties and eventually - returns an object of hashes with values true -*/ -async function hashAndStoreFieldsOfObject(typeSchemaName, fields, obsidianTypeSchema, queryType, returnTypes, cache) - -// INPUT -typeSchemaName -fields = { - _id: '4425', - name: 'United States of America', - population: 323947000, - flag: { - _id: '4440', - emoji:'🇺🇸' - }, - borders: [ - { - capital: "Mexico City", - name: "Mexico", - _id: "2741" - }, - { - capital: "Ottawa", - name: "Canada", - _id: "860", - } - ], -} -obsidianTypeSchema -queryType -returnTypes -cache - -// PROCESS -oldReduce - -// OUTPUT -hashes = { - "Country~4425~name": true, - "Country~4425~population": true, - "Country~4425~flag": true, - "Country~4425~borders": true -} - -//*=========================================================================*// -/* - oldReduce - - Description: Takes a property, creates a hash, and enters it into the - hash object -*/ -async function oldReduce(property) - -// INPUT -property = "name" - -// PROCESS -hashGenerator // generates a hash -hashAndStoreFields // hashes and stores values within the nested obj -checkID // returns null or id of a named type (if it exists) -checkAndInsert // hashes pieces of data with their values - -// OUTPUT -// Adds an element to the hash object - - -//*=========================================================================*// -/* - hashGenerator - - Description: takes various fields and creates a hash -*/ -async function hashGenerator(typeSchemaName, id, property) - -// INPUT -typeSchemaName -id -property - -// PROCESS - -// OUTPUT -hash = "Country~4425~name" - -//*=========================================================================*// -/* - checkID - - Description: takes a the value associated with a property - and returns null or an id -*/ - -function checkID(propObj) -// INPUT -propObj - -// PROCESS - -// OUTPUT -newID = id || null \ No newline at end of file diff --git a/documentation/browserCache/query-documentation.js b/documentation/browserCache/query-documentation.js deleted file mode 100644 index 858a72b..0000000 --- a/documentation/browserCache/query-documentation.js +++ /dev/null @@ -1,150 +0,0 @@ -/** - * NOTES: - * 1. This implementation does not handle variables or aliases currently - * 2. Potential updates needed for implementation: - * - gather_hunt.jsx will need to be updated to handle the combining of hunt and gather. - * - making simple edits to gather_hunt should be sufficient for handling the cacheRead, cacheWrite, PollInterval flags; - * - some combination of newNormalize, newDestructure, and the read and write methods on the cache - * would need to be updated to account for wholeQuery caching - * - it may make more sense to create dedicated cache methods for whole query caching - */ - -function query(query, options) { - // where the magic happens -} - -// options object -const options = { - endpoint: '/graphql', // the endpoint where the post request with mutation string will be sent; DEFAULT: '/graphql' - cacheRead: true, // determines whether the cache should be checked before making a server request; DEFAULT: true - cacheWrite: true, // determines whether the response from a server request should be written into the cache; DEFAULT: true - pollInterval: null, // if non-null the query will be sent the server every inputted number of ms; DEFAULT: null - wholeQuery: false, // for any cache reads or writes this will conduct wholeQuery writes or retrieval; DEFAULT: false -}; - -/** - * cacheRead - * - If set to false the query will always be sent to the server; the cache will not be checked. - * - __typenames will still be inserted into this request - */ - -/** - * cacheWrite - * - If set to false, the cache will never be updated even if new data is retrieved from the server. - */ - -/** - * pollInterval - * - null disables this feature - * - This same query will be sent to the server every inputed number of milliseconds - * - This query will not check the client-side cache before being sent to the server - * - The response from the server will be written into cache upon receipt - */ - -/** - * wholeQuery - * - if enabled the entire query and response will be stored in the cache as one key value pair - * - a minified version of the entire query operation string will be stored as the hash key - * - the response object will be stored as the value without any normalization - * - the only way to retrieve a cached whole query is to make another query request with an identical - * operation query string and the wholeQuery flag set to true - * - if the WholeQuery flag is true the caheRead and cacheWrite flags will be ignored. - * - __typenames will not get inserted into wholeQuery requests - */ - -// WHOLE QUERY EXAMPLE ====================================================================================== - -const ALL_MOVIES = gql` - query AllMovies { - movies { - id - title - actors { - id - firstName - } - } - } -`; -const respAllMovies = { - data: { - movies: [ - { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '2', firstName: 'Sean' }, - ], - }, - { - id: '2', - title: 'Empire Strikes Back', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '3', firstName: 'Mark' }, - ], - }, - { - id: '3', - title: 'Witness', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '4', firstName: 'Patti' }, - ], - }, - { - id: '4', - title: 'Air Force One', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '5', firstName: 'Gary' }, - ], - }, - ], - }, -}; - -const cache = { - ROOT_QUERY: { - 'queryAllMovies{movies{idtitleactors{idfirstName}}}': { - data: { - movies: [ - { - id: '1', - title: 'Indiana Jones and the Last Crusade', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '2', firstName: 'Sean' }, - ], - }, - { - id: '2', - title: 'Empire Strikes Back', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '3', firstName: 'Mark' }, - ], - }, - { - id: '3', - title: 'Witness', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '4', firstName: 'Patti' }, - ], - }, - { - id: '4', - title: 'Air Force One', - actors: [ - { id: '1', firstName: 'Harrison' }, - { id: '5', firstName: 'Gary' }, - ], - }, - ], - }, - }, - }, - ROOT_MUTATION: {}, -}; diff --git a/src/Browser/CacheClassBrowser.js b/src/Browser/CacheClassBrowser.js deleted file mode 100644 index fb58f3f..0000000 --- a/src/Browser/CacheClassBrowser.js +++ /dev/null @@ -1,342 +0,0 @@ -/** @format */ - -import normalizeResult from "./normalize.js"; -import destructureQueries from "./destructure.js"; - -export default class BrowserCache { - constructor( - initialCache = { - ROOT_QUERY: {}, - ROOT_MUTATION: {}, - // match resolvers to types in order to add them in write-through - writeThroughInfo: {}, - }, - ) { - this.storage = initialCache; - this.context = "client"; - } - - // Main functionality methods - async read(queryStr) { - if (typeof queryStr !== "string") { - throw TypeError("input should be a string"); - } - // destructure the query string into an object - const queries = destructureQueries(queryStr).queries; - // breaks out of function if queryStr is a mutation - if (!queries) return undefined; - const responseObject = {}; - // iterate through each query in the input queries object - for (const query in queries) { - // get the entire str query from the name input query and arguments - const queryHash = queries[query].name.concat(queries[query].arguments); - const rootQuery = await this.cacheRead("ROOT_QUERY"); - // match in ROOT_QUERY - if (rootQuery[queryHash]) { - // get the hashs to populate from the existent query in the cache - const arrayHashes = rootQuery[queryHash]; - // Determines responseObject property labels - use alias if applicable, otherwise use name - const respObjProp = queries[query].alias ?? queries[query].name; - // invoke populateAllHashes and add data objects to the response object for each input query - responseObject[respObjProp] = await this.populateAllHashes( - arrayHashes, - queries[query].fields, - ); - if (!responseObject[respObjProp]) return undefined; - - // no match with ROOT_QUERY return null or ... - } else { - return undefined; - } - } - return { data: responseObject }; - } - - async writeThrough(queryStr, respObj, deleteFlag, endpoint) { - try { - const queryObj = destructureQueries(queryStr); - const mutationName = queryObj.mutations[0].name; - // check if it's a mutation - if (queryObj.mutations) { - // check to see if the mutation/type has been stored in the cache yet - // if so, make the graphQL call - if (!this.storage.writeThroughInfo.hasOwnProperty(mutationName)) { - respObj = await fetch(endpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - Accept: "application/json", - }, - body: JSON.stringify({ query: queryStr }), - }).then((resp) => resp.json()); - // store the mutation/type in cache - this.storage.writeThroughInfo[mutationName] = {}; - this.storage.writeThroughInfo[mutationName].type = - respObj.data[mutationName].__typename; - this.storage.writeThroughInfo[mutationName].lastId = - respObj.data[mutationName].id; - // below is for situations when the type is already stored - } else { - // construct the response object ourselves - const dummyResponse = await fetch(endpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - Accept: "application/json", - }, - body: JSON.stringify({ query: queryStr }), - }).then((resp) => resp.json()); - this.constructResponseObject(queryObj, respObj, deleteFlag); - } - // same logic for both situations - // normalize the result, invalidate the cache and return the appropriate object - await this.write(queryStr, respObj, deleteFlag); - return respObj; - } - } catch (e) { - console.log(e); - } - } - - async write(queryStr, respObj, deleteFlag) { - const queryObj = destructureQueries(queryStr); - const resFromNormalize = normalizeResult(queryObj, respObj, deleteFlag); - // update the original cache with same reference - for (const hash in resFromNormalize) { - const resp = await this.cacheRead(hash); - if (resFromNormalize[hash] === "DELETED") { - await this.cacheWrite(hash, "DELETED"); - } else if (resp) { - const newObj = Object.assign(resp, resFromNormalize[hash]); - await this.cacheWrite(hash, newObj); - } else { - await this.cacheWrite(hash, resFromNormalize[hash]); - } - } - } - - constructResponseObject(queryObj, respObj, deleteFlag) { - const mutationData = queryObj.mutations[0]; - const mutationName = mutationData.name; - const __typename = this.storage.writeThroughInfo[mutationName].type; - // this.storage.writeThroughInfo[mutationName].type; - respObj.data = {}; - const obj = {}; - respObj.data[mutationName] = obj; - obj.__typename = __typename; - // delete logic - if (deleteFlag) { - // add id and value from the queryObj - let idAndVal = mutationData.arguments; - idAndVal = idAndVal.split(":"); - const id = idAndVal[0].substring(1); - const val = idAndVal[1].substring(0, idAndVal[1].length - 1); - obj[id] = val; - // return out of this function so we don't continue - // onto add/update logic - return respObj; - } - // increment ID for ADD mutations only - obj.id = (++this.storage.writeThroughInfo[mutationName].lastId).toString(); - - // ADD mutation logic - // grab arguments (which is a string) - const argumentsStr = mutationData.arguments; - this.addNonScalarFields(argumentsStr, respObj, mutationData); - this.separateArguments(argumentsStr, respObj, mutationName); - } - - separateArguments(str, respObj, mutationName) { - const startIndex = str.indexOf("{"); - const slicedStr = str.slice(startIndex + 1, str.length - 2); - const argumentPairs = slicedStr.split(","); - for (const argumentPair of argumentPairs) { - const argumentKeyAndValue = argumentPair.split(":"); - const argumentKey = argumentKeyAndValue[0]; - let argumentValue = Number(argumentKeyAndValue[1]) - ? Number(argumentKeyAndValue[1]) - : argumentKeyAndValue[1]; - if (typeof argumentValue === "string") { - argumentValue = argumentValue.replace(/\"/g, ""); - } - respObj.data[mutationName][argumentKey] = argumentValue; - } - } - - addNonScalarFields(respObj, mutationData) { - for (const field in mutationData.fields) { - if ( - mutationData.fields[field] !== "scalar" && - mutationData.fields[field] !== "meta" - ) { - respObj.data[mutationData.name][field] = []; - } - } - } - - gc() { - // garbageCollection; garbage collection: removes any inaccessible hashes from the cache - const badHashes = getBadHashes(); - const goodHashes = rootQueryCleaner(badHashes); - const goodHashes2 = getGoodHashes(badHashes, goodHashes); - removeInaccessibleHashes(badHashes, goodHashes2); - } - - // remove hashes that are flagged for deletion and store records of them in a set badHashes for removal inside root queries - getBadHashes() { - const badHashes = new Set(); - for (let key in this.storage) { - if (key === "ROOT_QUERY" || key === "ROOT_MUTATION") continue; - if (this.storage[key] === "DELETED") { - badHashes.add(key); - delete this.storage[key]; - } - } - return badHashes; - } - - // go through root queries, remove all instances of bad hashes, add remaining hashes into goodHashes set - rootQueryCleaner(badHashes) { - const goodHashes = new Set(); - const rootQuery = this.storage["ROOT_QUERY"]; - for (let key in rootQuery) { - if (Array.isArray(rootQuery[key])) { - rootQuery[key] = rootQuery[key].filter((x) => !badHashes.has(x)); - if (rootQuery[key].length === 0) delete rootQuery[key]; - for (let el of rootQuery[key]) goodHashes.add(el); - } else { - badHashes.has(rootQuery[key]) - ? delete rootQuery[key] - : goodHashes.add(rootQuery[key]); - } - } - return goodHashes; - } - - // Go through the cache, check good hashes for any nested hashes and add them to goodHashes set - getGoodHashes(badHashes, goodHashes) { - for (let key in this.storage) { - if (key === "ROOT_QUERY" || key === "ROOT_MUTATION") continue; - for (let i in this.storage[key]) { - if (Array.isArray(this.storage[key][i])) { - for (let el of this.storage[key][i]) { - if (el.includes("~") && !badHashes.has(el)) { - goodHashes.add(el); - } - } - } else if (typeof this.storage[key][i] === "string") { - if ( - this.storage[key][i].includes("~") && - !badHashes.has(this.storage[key][i]) - ) { - goodHashes.add(this.storage[key][i]); - } - } - } - } - return goodHashes; - } - - // Remove inaccessible hashes by checking if they are in goodhashes set or not - removeInaccessibleHashes(badHashes, goodHashes) { - for (let key in this.storage) { - if (key === "ROOT_QUERY" || key === "ROOT_MUTATION") continue; - if (!goodHashes.has(key)) delete this.storage[key]; - for (let i in this.storage[key]) { - if (Array.isArray(this.storage[key][i])) { - this.storage[key][i] = this.storage[key][i].filter( - (x) => !badHashes.has(x), - ); - } else if (typeof this.storage[key][i] === "string") { - if ( - this.storage[key][i].includes("~") && - badHashes.has(this.storage[key][i]) - ) { - delete this.storage[key][i]; - } - } - } - } - } - - // cache read/write helper methods - async cacheRead(hash) { - return this.storage[hash]; - } - - async cacheWrite(hash, value) { - this.storage[hash] = value; - } - - async cacheDelete(hash) { - delete this.storage[hash]; - } - - async cacheClear() { - this.storage = { - ROOT_QUERY: {}, - ROOT_MUTATION: {}, - }; - } - - // functionality to stop polling - stopPollInterval(interval) { - clearInterval(interval); - } - - writeWholeQuery(queryStr, respObj) { - const hash = queryStr.replace(/\s/g, ""); - this.cacheWrite(ROOT_QUERY[hash], respObj); - return respObj; - } - - readWholeQuery(queryStr) { - const hash = queryStr.replace(/\s/g, ""); - const root = this.cacheRead("ROOT_QUERY"); - if (root[hash]) return { data: root[hash] }; - return undefined; - } - - // specialized helper methods - async populateAllHashes(allHashesFromQuery, fields) { - // include the hashname for each hash - if (!allHashesFromQuery.length) return []; - const hyphenIdx = allHashesFromQuery[0].indexOf("~"); - const typeName = allHashesFromQuery[0].slice(0, hyphenIdx); - return allHashesFromQuery.reduce(async (acc, hash) => { - // for each hash from the input query, build the response object - const readVal = await this.cacheRead(hash); - // return undefine if hash has been garbage collected - if (readVal === undefined) return undefined; - if (readVal === "DELETED") return acc; - const dataObj = {}; - for (const field in fields) { - if (readVal[field] === "DELETED") continue; - // for each field in the fields input query, add the corresponding value from the cache if the field is not another array of hashs - if (readVal[field] === undefined && field !== "__typename") { - return undefined; - } else if (typeof fields[field] !== "object") { - // add the typename for the type - if (field === "__typename") { - dataObj[field] = typeName; - } else dataObj[field] = readVal[field]; - } else { - // case where the field from the input query is an array of hashes, recursively invoke populateAllHashes - dataObj[field] = await this.populateAllHashes( - readVal[field], - fields[field], - ); - if (dataObj[field] === undefined) return undefined; - } - } - // acc is an array within a Response object for each hash - try { - const resolvedProm = await Promise.resolve(acc); - resolvedProm.push(dataObj); - return resolvedProm; - } catch (error) { - return undefined; - } - }, []); - } -} diff --git a/src/Browser/FrequencySketch.js b/src/Browser/FrequencySketch.js new file mode 100644 index 0000000..92143df --- /dev/null +++ b/src/Browser/FrequencySketch.js @@ -0,0 +1,163 @@ +export function FrequencySketch() { + + const RESET_MASK = 0x77777777; // 011101110111... 0001 0000 0000 0001 0000 + const ONE_MASK = 0x11111111; // 0001 0001 0001 + + let sampleSize, blockMask, size; + let table = []; + + /** + * Initializes and increases the capacity of this FrequencySketch instance + * so it can accurately estimate the popularity of data given the maximum + * size of the cache. Frequency counts become zero when resizing. + * + * @param maxSize cache capacity + */ + this.updateCapacity = function(maxSize) { + const max = Math.floor(maxSize); //to ensure it's an integer + if(table.length >= max) return; + + table = Array(Math.max(nearestPowerOfTwo(max), 8)).fill().map(()=>Array(2).fill(0)); + sampleSize = (maxSize === 0) ? 10 : (10*max); + blockMask = (table.length >>> 3) - 1; + + if (sampleSize <= 0) sampleSize = Number.MAX_SAFE_INTEGER; + size = 0; + } + /** + * Returns true if the sketch has not been initialized, indicating updateCapcity + * needs to be called before tracking frequencies. + */ + const isNotInitialized = () => { + return table.length === 0; + } + /** + * Returns the estimated frequency of an element, up to the maximum(15). + * + * @param el the element being counted + * @return the estimated frequency - required to be nonnegative + */ + + this.frequency = function(el) { + if(isNotInitialized()) return 0; + const count = Array(4); + + const blockHash = supphash(hashCode(el)); + const counterHash = rehash(blockHash); + const block = (blockHash & blockMask) << 3; + + for (let i = 0; i < 4; i++) { + const h = counterHash >>> (i << 3); + const index = (h >>> 1) & 15; + const row = index % 2; + const offset = h & 1; + count[i] = ((table[block+offset+(i<<1)][row] >>> ((index >> 1) << 2)) & 15); + } + return Math.min(...count); + } + + /** + * Increment the frequency of the element if it does not exceed the maximum(15) + * @param el element to add + */ + this.increment = function(el) { + if (isNotInitialized()) return; + + const index = Array(8); + const blockHash = supphash(hashCode(el)); + const counterHash = rehash(blockHash); + const block = (blockHash & blockMask) << 3; + //in case we get that [Object object] bs + + for (let i = 0; i < 4; i++) { + const h = counterHash >>> (i << 3); + index[i] = (h >>> 1) & 15; + const offset = h & 1; + index[i + 4] = block + offset + (i << 1); + } + const incremented = + incrementAt(index[4], index[0]) + | incrementAt(index[5], index[1]) + | incrementAt(index[6], index[2]) + | incrementAt(index[7], index[3]); + if (incremented && (++size == sampleSize)) { + reset(); + } + + } + + /** + * Increments the specified counter by 1 if it is not already at the maximum value (15). + * + * @param i the table index (16 counters) + * @param j the counter to increment + * @return if incremented + */ + const incrementAt = (i,j) => { + const row = j % 2; + const offset = (j >> 1) << 2; + const mask = (15 << offset); + if ((table[i][row] & mask) != mask) { //if curr counter is not at maximum(15) + table[i][row] += (1 << offset); + return true; + } + return false; + } + + /** Reduces every counter by half of its original value. */ + const reset = () => { + let count = 0; + for (let i = 0; i < table.length; i++) { + count += bitCount(table[i][0] & ONE_MASK) + bitCount(table[i][1] & ONE_MASK); + table[i][0] = (table[i][0] >>> 1) & RESET_MASK; + table[i][1] = (table[i][1] >>> 1) & RESET_MASK; + } + size = (size - (count >>> 2)) >>> 1; + } + /** Applies a supplemental hash functions for less collisions. */ + const supphash = x => { + x ^= x >> 17; + x *= 0xed5ad4bb; + x ^= x >> 11; + x *= 0xac4c1b51; + x ^= x >> 15; + return x; +} + + /** Applies another round of hashing to acheive three round hashing. */ + const rehash = x => { + x *= 0x31848bab; + x ^= x >> 14; + return x; + } + + const nearestPowerOfTwo = num => { + const exp = Math.floor(Math.log2(num)); + if (Math.pow(2, exp) === num) return num; + + return Math.pow(2, exp+1); + } + + const hashCode = (input) => { + let hash, code; + hash = 0; + for (let i = 0; i < input.length; i++) { + code = input.charCodeAt(i); + hash = ((hash<<5)-hash)+code; + hash = hash & hash; + } + return hash; + } + + + /** bitcounting for 32-bit integers (reference: https://graphics.stanford.edu/~seander/bithacks.html) */ + + const bitCount = n => { + n = n - ((n >> 1) & 0x55555555); + n = (n & 0x33333333) + ((n >> 2) & 0x33333333); + const count = ((n + (n >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; + return count; + } +} + +FrequencySketch(); \ No newline at end of file diff --git a/src/Browser/lfuBrowserCache.js b/src/Browser/lfuBrowserCache.js index 814773c..172f9f8 100644 --- a/src/Browser/lfuBrowserCache.js +++ b/src/Browser/lfuBrowserCache.js @@ -1,5 +1,5 @@ /** @format */ -import { plural } from "https://deno.land/x/deno_plural/mod.ts"; +import { plural } from "https://deno.land/x/deno_plural@2.0.0/mod.ts"; import normalizeResult from "./normalize.js"; import destructureQueries from "./destructure.js"; @@ -29,14 +29,14 @@ class DoublyLinkedList { } removeNode(node) { - let prev = node.prev; - let next = node.next; + const prev = node.prev; + const next = node.next; prev.next = next; next.prev = prev; } removeTail() { - let node = this.tail.prev; + const node = this.tail.prev; this.removeNode(node); return node.key; } @@ -152,7 +152,7 @@ LFUCache.prototype.read = async function (queryStr) { return { data: responseObject }; }; -LFUCache.prototype.write = async function (queryStr, respObj, deleteFlag) { +LFUCache.prototype.write = async function (queryStr, respObj, searchTerms, deleteFlag) { let nullFlag = false; let deleteMutation = ""; for(const query in respObj.data) { @@ -195,6 +195,20 @@ LFUCache.prototype.write = async function (queryStr, respObj, deleteFlag) { this.ROOT_QUERY[key].push(hash); } } + /**** + * if search terms were provided in the wrapper and the query is an + * "all"-type query, build out queries in ROOT_QUERY that match the + * search terms for each item retrieved from the "all"-type query so + * that future single queries can be looked up directly from the cache + ****/ + if (searchTerms && queryStr.slice(8, 11) === 'all'){ + searchTerms.forEach(el => { + const elVal = resFromNormalize[hash][el].replaceAll(' ', ''); + const hashKey = `one${typeName}(${el}:"${elVal}")`; + if (!this.ROOT_QUERY[hashKey]) this.ROOT_QUERY[hashKey] = []; + this.ROOT_QUERY[hashKey].push(hash); + }); + } } } } @@ -205,13 +219,13 @@ function labelId(obj) { return obj.__typename + "~" + id; } -LFUCache.prototype.cacheDelete = async function (hash) { - let node = this.nodeHash.get(hash); +LFUCache.prototype.cacheDelete = function (hash) { + const node = this.nodeHash.get(hash); this.freqHash.get(node.freq).removeNode(node); this.nodeHash.delete(hash); }; -LFUCache.prototype.cacheClear = async function () { +LFUCache.prototype.cacheClear = function () { this.currentSize = 0; this.leastFreq = 0; this.ROOT_QUERY = {}; diff --git a/src/Browser/lruBrowserCache.js b/src/Browser/lruBrowserCache.js index 42ebbbd..7da6dec 100644 --- a/src/Browser/lruBrowserCache.js +++ b/src/Browser/lruBrowserCache.js @@ -16,8 +16,10 @@ export default function LRUCache(capacity) { this.currentSize = 0; this.ROOT_QUERY = {}; this.ROOT_MUTATION = {}; + // node hash for cache lookup and storage this.nodeHash = new Map(); + // doubly-linked list to keep track of recency and handle eviction this.head = new Node('head', null); this.tail = new Node('tail', null); this.head.next = this.tail; @@ -31,6 +33,7 @@ LRUCache.prototype.removeNode = function (node) { next.prev = prev; }; + LRUCache.prototype.addNode = function (node) { const tempTail = this.tail.prev; tempTail.next = node; @@ -61,15 +64,15 @@ LRUCache.prototype.put = function (key, value) { this.addNode(newNode); this.nodeHash.set(key, newNode); - // check capacity - if over capacity, remove and reassign head node - // if (Object.nodeHash[this.nodeHash].length > capacity) + // check capacity - if over capacity, remove and reassign head node if (this.nodeHash.get(key).size > this.capacity){ const tempHead = this.head.next; this.removeNode(tempHead); - this.nodeHash.delete(tempTail.key); + this.nodeHash.delete(tempHead.key); } } +// read from the cache and generate a response object to be populated with values from cache LRUCache.prototype.read = async function (queryStr) { if (typeof queryStr !== "string") throw TypeError("input should be a string"); // destructure the query string into an object @@ -105,7 +108,7 @@ LRUCache.prototype.read = async function (queryStr) { return { data: responseObject }; }; -LRUCache.prototype.write = async function (queryStr, respObj, deleteFlag) { +LRUCache.prototype.write = async function (queryStr, respObj, searchTerms, deleteFlag) { let nullFlag = false; let deleteMutation = ""; for(const query in respObj.data) { @@ -148,6 +151,20 @@ LRUCache.prototype.write = async function (queryStr, respObj, deleteFlag) { this.ROOT_QUERY[key].push(hash); } } + /**** + * if search terms were provided in the wrapper and the query is an + * "all"-type query, build out queries in ROOT_QUERY that match the + * search terms for each item retrieved from the "all"-type query so + * that future single queries can be looked up directly from the cache + ****/ + if (searchTerms && queryStr.slice(8, 11) === 'all'){ + searchTerms.forEach(el => { + const elVal = resFromNormalize[hash][el].replaceAll(' ', ''); + const hashKey = `one${typeName}(${el}:"${elVal}")`; + if (!this.ROOT_QUERY[hashKey]) this.ROOT_QUERY[hashKey] = []; + this.ROOT_QUERY[hashKey].push(hash); + }); + } } } } @@ -158,6 +175,7 @@ function labelId(obj) { return obj.__typename + "~" + id; } +// fills in placeholder data in response object with values found in cache LRUCache.prototype.populateAllHashes = function ( allHashesFromQuery, fields diff --git a/src/Browser/normalize.js b/src/Browser/normalize.js index b14ec50..6c8c04f 100644 --- a/src/Browser/normalize.js +++ b/src/Browser/normalize.js @@ -15,7 +15,7 @@ export default function normalizeResult(queryObj, resultObj, deleteFlag) { ); //iterate thru the different response objects that were mutated -4 +4 // Please do not disturb the mysterious, load-bearing 4. This is its home. const obj = resultObj.data; //checks if the current element is an array if (Array.isArray(obj)) { diff --git a/src/Browser/wTinyLFU Sub-Caches/lruSub-cache.js b/src/Browser/wTinyLFU Sub-Caches/lruSub-cache.js new file mode 100644 index 0000000..5781d5c --- /dev/null +++ b/src/Browser/wTinyLFU Sub-Caches/lruSub-cache.js @@ -0,0 +1,98 @@ +import { plural } from "https://deno.land/x/deno_plural@2.0.0/mod.ts"; + +class Node { + constructor (key, value) { + this.key = key; + this.value = value; + this.next = this.prev = null; + } +} + +export default function LRUCache(capacity) { + this.capacity = capacity; + this.currentSize = 0; + // node hash for cache lookup and storage + this.nodeHash = new Map(); + + // doubly-linked list to keep track of recency and handle eviction + this.head = new Node('head', null); + this.tail = new Node('tail', null); + this.head.next = this.tail; + this.tail.prev = this.head; +} + +LRUCache.prototype.removeNode = function (node) { + const prev = node.prev; + const next = node.next; + prev.next = next; + next.prev = prev; +}; + + +LRUCache.prototype.addNode = function (node) { + const tempTail = this.tail.prev; + tempTail.next = node; + + this.tail.prev = node; + node.next = this.tail; + node.prev = tempTail; +} + +// Like get, but doesn't update anything +LRUCache.prototype.peek = function(key) { + const node = this.nodeHash.get(key); + if (!node) return null; + return node.value; +} + +// Like removeNode, but takes key and deletes from hash +LRUCache.prototype.delete = function (key) { + const node = this.nodeHash.get(key); + const prev = node.prev; + const next = node.next; + prev.next = next; + next.prev = prev; + this.nodeHash.delete(key); +} + +LRUCache.prototype.get = function(key) { + const node = this.nodeHash.get(key); + + // check if node does not exist in nodeHash obj + if (!node) return null; + // update position to most recent in list + this.removeNode(node); + this.addNode(node); + return node.value; +} + +// used by wTinyLFU to get SLRU eviction candidates for TinyLFU decision +LRUCache.prototype.getCandidate = function () { + const tempHead = this.head.next; + this.removeNode(tempHead); + this.nodeHash.delete(tempHead.key); + return {key: tempHead.key, value: tempHead.value}; +} + +LRUCache.prototype.put = function (key, value) { + // create a new node + const newNode = new Node(key, value); + + // remove node from old position + const node = this.nodeHash.get(key); + if (node) this.removeNode(node); + + // add new node to tail + this.addNode(newNode); + this.nodeHash.set(key, newNode); + + // check capacity - if over capacity, remove and reassign head node + if (this.nodeHash.size > this.capacity){ + const tempHead = this.head.next; + this.removeNode(tempHead); + this.nodeHash.delete(tempHead.key); + // return tempHead for use in w-TinyLFU's SLRU cache + return {key: tempHead.key, value: tempHead.value}; + } +} + diff --git a/src/Browser/wTinyLFU Sub-Caches/slruSub-cache.js b/src/Browser/wTinyLFU Sub-Caches/slruSub-cache.js new file mode 100644 index 0000000..62bbb8f --- /dev/null +++ b/src/Browser/wTinyLFU Sub-Caches/slruSub-cache.js @@ -0,0 +1,58 @@ +import LRUCache from './lruSub-cache.js'; + +/***** +* Main SLRU Cache +*****/ +export default function SLRUCache(capacity) { + // Probationary LRU Cache using existing LRU structure in lruBrowserCache.js + this.probationaryLRU = new LRUCache(capacity * .20); + // Protected LRU Cache + this.protectedLRU = new LRUCache(capacity * .80); +} + +// Get item from cache, updates last access, +// and promotes existing items to protected +SLRUCache.prototype.get = function (key) { + // get the item from the protectedLRU + const protectedItem = this.protectedLRU.get(key); + // check to see if the item is in the probationaryLRU + const probationaryItem = this.probationaryLRU.peek(key); + + // If the item is in neither segment, return undefined + if (protectedItem === null && probationaryItem === null) return; + + // If the item only exists in the protected segment, return that item + if (protectedItem !== null) return protectedItem; + + // If the item only exists in the probationary segment, promote to protected and return item + // if adding an item to the protectedLRU results in ejection, demote ejected node + this.probationaryLRU.delete(key); + this.putAndDemote(key, probationaryItem); + return probationaryItem; +} + +// add or update item in cache +SLRUCache.prototype.put = function (key, node) { + // if the item is in the protected segment, update it + if (this.protectedLRU.nodeHash.get(key)) this.putAndDemote(key, node); + else if (this.probationaryLRU.nodeHash(key)) { + // if the item is in the probationary segment, + // promote and update it + this.probationaryLRU.delete(key); + this.putAndDemote(key, node); + } + // if in neither, add item to the probationary segment + else this.probationaryLRU.put(key, node) +} + +// Check to see if the item exists in the cache without updating access +SLRUCache.prototype.has = function (key) { + return this.protectedLRU.nodeHash.get(key) || this.probationaryLRU.nodeHash.get(key); +} + +// Adds a node to the protectedLRU +SLRUCache.prototype.putAndDemote = function (key, value) { + // if adding an item to the protectedLRU results in ejection, demote ejected node + const demoted = this.protectedLRU.put(key, value); + if (demoted) this.probationaryLRU.put(demoted.key, demoted.value); +} \ No newline at end of file diff --git a/src/Browser/wTinyLFUBrowserCache.js b/src/Browser/wTinyLFUBrowserCache.js new file mode 100644 index 0000000..1b54e5f --- /dev/null +++ b/src/Browser/wTinyLFUBrowserCache.js @@ -0,0 +1,222 @@ +import { plural } from "https://deno.land/x/deno_plural@2.0.0/mod.ts"; + +import normalizeResult from "./normalize.js"; +import destructureQueries from "./destructure.js"; +import SLRUCache from "./wTinyLFU%20Sub-Caches/slruSub-cache.js" +import LRUCache from "./wTinyLFU%20Sub-Caches/lruSub-cache.js"; +import { FrequencySketch } from './FrequencySketch.js'; + +/***** +* Overall w-TinyLFU Cache +*****/ +export default function WTinyLFUCache (capacity) { + this.capacity = capacity; + this.ROOT_QUERY = {}; + this.ROOT_MUTATION = {}; + this.sketch = new FrequencySketch(); + + // initialize window cache with access to frequency sketch + this.WLRU = new LRUCache(capacity * .01); + this.WLRU.sketch = this.sketch; + // initialize segmented main cache with access to frequency sketch + this.SLRU = new SLRUCache(capacity * .99); + this.SLRU.probationaryLRU.sketch = this.sketch; + this.SLRU.protectedLRU.sketch = this.sketch; +} + +WTinyLFUCache.prototype.putAndPromote = async function (key, value) { + const WLRUCandidate = this.WLRU.put(key, value); + // if adding to the WLRU cache results in an eviction... + if (WLRUCandidate) { + // if the probationary cache is at capacity... + let winner = WLRUCandidate; + if (this.SLRU.probationaryLRU.nodeHash.size >= Math.floor(this.SLRU.probationaryLRU.capacity)) { + // send the last accessed item in the probationary cache to the TinyLFU + const SLRUCandidate = this.SLRU.probationaryLRU.getCandidate(); + // determine which item will improve the hit-ratio most + winner = await this.TinyLFU(WLRUCandidate, SLRUCandidate); + } + // add the winner to the probationary SLRU + this.SLRU.probationaryLRU.put(winner.key, winner.value); + } +} + +// fills in placeholder data in response object with values found in cache +WTinyLFUCache.prototype.populateAllHashes = function ( + allHashesFromQuery, + fields +) { + if (!allHashesFromQuery.length) return []; + // isolate the type of search from the rest of the hash name + const hyphenIdx = allHashesFromQuery[0].indexOf("~"); + const typeName = allHashesFromQuery[0].slice(0, hyphenIdx); + const reduction = allHashesFromQuery.reduce(async (acc, hash) => { + // for each hash from the input query, build the response object + // first, check the SLRU cache + let readVal = await this.SLRU.get(hash); + // if the hash is not in the SLRU, check the WLRU + if (!readVal) readVal = await this.WLRU.get(hash); + if (readVal === "DELETED") return acc; + if (readVal) this.sketch.increment(JSON.stringify(readVal)); + if (!readVal) return undefined; + const dataObj = {}; + for (const field in fields) { + if (readVal[field] === "DELETED") continue; + // for each field in the fields input query, add the corresponding value from the cache + // if the field is not another array of hashes + if (readVal[field] === undefined && field !== "__typename") { + return undefined; + } + if (typeof fields[field] !== "object") { + // add the typename for the type + if (field === "__typename") { + dataObj[field] = typeName; + } else dataObj[field] = readVal[field]; // assign the value from the cache to the key in the response + } else { + // case where the field from the input query is an array of hashes, recursively invoke populateAllHashes + dataObj[field] = await this.populateAllHashes( + readVal[field], + fields[field] + ); + if (dataObj[field] === undefined) return undefined; + } + } + // acc is an array of response object for each hash + const resolvedProm = await Promise.resolve(acc); + resolvedProm.push(dataObj); + return resolvedProm; + }, []); + return reduction; +}; + +// read from the cache and generate a response object to be populated with values from cache +WTinyLFUCache.prototype.read = async function (queryStr) { + if (typeof queryStr !== "string") throw TypeError("input should be a string"); + // destructure the query string into an object + const queries = destructureQueries(queryStr).queries; + // breaks out of function if queryStr is a mutation + if (!queries) return undefined; + const responseObject = {}; + // iterate through each query in the input queries object + for (const query in queries) { + // get the entire str query from the name input query and arguments + const queryHash = queries[query].name.concat(queries[query].arguments); + const rootQuery = this.ROOT_QUERY; + // match in ROOT_QUERY + if (rootQuery[queryHash]) { + // get the hashes to populate from the existent query in the cache + const arrayHashes = rootQuery[queryHash]; + // Determines responseObject property labels - use alias if applicable, otherwise use name + const respObjProp = queries[query].alias ?? queries[query].name; + // invoke populateAllHashes and add data objects to the response object for each input query + responseObject[respObjProp] = await this.populateAllHashes( + arrayHashes, + queries[query].fields + ); + + if (!responseObject[respObjProp]) return undefined; + + // no match with ROOT_QUERY return null or ... + } else { + return null; + } + } + return { data: responseObject }; +}; + +WTinyLFUCache.prototype.write = async function (queryStr, respObj, searchTerms, deleteFlag) { + let nullFlag = false; + let deleteMutation = ""; + let wasFoundIn = null; + for(const query in respObj.data) { + if(respObj.data[query] === null) nullFlag = true + else if(query.toLowerCase().includes('delete')) deleteMutation = labelId(respObj.data[query]); + } + if(!nullFlag) { + const queryObj = destructureQueries(queryStr); + const resFromNormalize = normalizeResult(queryObj, respObj, deleteFlag); + // update the original cache with same reference + for (const hash in resFromNormalize) { + // first check SLRU + let resp = await this.SLRU.get(hash); + // next, check the window LRU + if (resp) wasFoundIn = 'SLRU' + if (!resp) resp = await this.WLRU.get(hash); + if (resp && !wasFoundIn) wasFoundIn = 'WLRU'; + if (resp) this.sketch.increment(JSON.stringify(resp)); + if (hash === "ROOT_QUERY" || hash === "ROOT_MUTATION") { + if(deleteMutation === "") { + this[hash] = Object.assign(this[hash], resFromNormalize[hash]); + } else { + const typeName = deleteMutation.slice(0, deleteMutation.indexOf('~')); + for(const key in this.ROOT_QUERY) { + if(key.includes(typeName + 's') || key.includes(plural(typeName))) { + for(let i = 0; i < this.ROOT_QUERY[key].length; i++) { + if(this.ROOT_QUERY[key][i] === deleteMutation) { + this.ROOT_QUERY[key].splice(i, 1); + i--; + } + } + } + } + } + } else if (resFromNormalize[hash] === "DELETED") { + // Should we delete directly or do we still need to flag as DELETED + if (wasFoundIn === 'SLRU') await this.SLRU.put(hash, "DELETED"); + else if (wasFoundIn === 'WLRU') await this.WLRU.put(hash, "DELETED"); + } else if (resp) { + const newObj = Object.assign(resp, resFromNormalize[hash]); + // write to the appropriate cache + if (wasFoundIn === 'SLRU') await this.SLRU.put(hash, newObj); + else if (wasFoundIn === 'WLRU') await this.WLRU.put(hash, newObj); + } else { + const typeName = hash.slice(0, hash.indexOf('~')); + await this.putAndPromote(hash, resFromNormalize[hash]); + for(const key in this.ROOT_QUERY) { + if(key.includes(typeName + 's') || key.includes(plural(typeName))) { + this.ROOT_QUERY[key].push(hash); + } + } + /**** + * if search terms were provided in the wrapper and the query is an + * "all"-type query, build out queries in ROOT_QUERY that match the + * search terms for each item retrieved from the "all"-type query so + * that future single queries can be looked up directly from the cache + ****/ + if (searchTerms && queryStr.slice(8, 11) === 'all'){ + searchTerms.forEach(el => { + const elVal = resFromNormalize[hash][el].replaceAll(' ', ''); + const hashKey = `one${typeName}(${el}:"${elVal}")`; + if (!this.ROOT_QUERY[hashKey]) this.ROOT_QUERY[hashKey] = []; + this.ROOT_QUERY[hashKey].push(hash); + }); + } + } + } + } +}; + +// Note: WholeQuery is not a currently-functioning option in Obsidian Wrapper +WTinyLFUCache.prototype.writeWholeQuery = function (queryStr, respObj) { + const hash = queryStr.replace(/\s/g, ""); + this.put(this.ROOT_QUERY[hash], respObj); + return respObj; +}; + +// Note: WholeQuery is not a currently-functioning option in Obsidian Wrapper +WTinyLFUCache.prototype.readWholeQuery = function (queryStr) { + const hash = queryStr.replace(/\s/g, ""); + if (this.ROOT_QUERY[hash]) return this.get(this.ROOT_QUERY[hash]); + return undefined; +}; + +/***** +* TinyLFU Admission Policy +*****/ +WTinyLFUCache.prototype.TinyLFU = async function (WLRUCandidate, SLRUCandidate) { + // get the frequency values of both items + const WLRUFreq = await this.sketch.frequency(JSON.stringify(WLRUCandidate.value)); + const SLRUFreq = await this.sketch.frequency(JSON.stringify(SLRUCandidate.value)); + // return the object with the higher frequency, prioritizing items in the window cache, + return WLRUFreq >= SLRUFreq ? WLRUCandidate : SLRUCandidate; +} \ No newline at end of file diff --git a/src/Obsidian.ts b/src/Obsidian.ts index 08f9de2..ea12e66 100644 --- a/src/Obsidian.ts +++ b/src/Obsidian.ts @@ -2,14 +2,12 @@ import { graphql } from 'https://cdn.pika.dev/graphql@15.0.0'; import { renderPlaygroundPage } from 'https://deno.land/x/oak_graphql@0.6.2/graphql-playground-html/render-playground-html.ts'; import { makeExecutableSchema } from 'https://deno.land/x/oak_graphql@0.6.2/graphql-tools/schema/makeExecutableSchema.ts'; import { Cache } from './quickCache.js'; -import LFUCache from './Browser/lfuBrowserCache.js'; import queryDepthLimiter from './DoSSecurity.ts'; import { restructure } from './restructure.ts'; -import { rebuildFromQuery } from './rebuild.js'; import { normalizeObject } from './normalize.ts'; -import { transformResponse, detransformResponse } from './transformResponse.ts'; import { isMutation, invalidateCache } from './invalidateCacheCheck.ts'; import { mapSelectionSet } from './mapSelections.js'; +import { HashTable } from './queryHash.js'; interface Constructable { new (...args: any): T & OakRouter; @@ -28,14 +26,15 @@ export interface ObsidianRouterOptions { resolvers: ResolversProps; context?: (ctx: any) => any; usePlayground?: boolean; - useCache?: boolean; // trivial parameter + useCache?: boolean; redisPort?: number; policy?: string; maxmemory?: string; + searchTerms?: string[]; + persistQueries?: boolean; + hashTableSize?: number; maxQueryDepth?: number; - useQueryCache?: boolean; // trivial parameter - useRebuildCache?: boolean; - customIdentifier?: Array; + customIdentifier?: string[]; mutationTableMap?: Record; // Deno recommended type name } @@ -48,6 +47,9 @@ export interface ResolversProps { // Export developer chosen port for redis database connection // export let redisPortExport: number = 6379; +// tentative fix to get invalidateCacheCheck.ts access to the cache; +export const scope: Record = {}; + /** * * @param param0 @@ -60,112 +62,119 @@ export async function ObsidianRouter({ resolvers, context, usePlayground = false, - useCache = true, + useCache = true, // default to true redisPort = 6379, policy = 'allkeys-lru', maxmemory = '2000mb', + searchTerms = [], // Developer can pass in array of search categories + persistQueries = false, // default to false + hashTableSize = 16, // default to 16 maxQueryDepth = 0, - useQueryCache = true, - useRebuildCache = true, - customIdentifier = ['id', '__typename'], + customIdentifier = ['__typename', '_id'], mutationTableMap = {}, // Developer passes in object where keys are add mutations and values are arrays of affected tables }: ObsidianRouterOptions): Promise { - redisPortExport = redisPort; const router = new Router(); const schema = makeExecutableSchema({ typeDefs, resolvers }); - // const cache = new LFUCache(50); // If using LFU Browser Caching, uncomment line - const cache = new Cache(); // If using Redis caching, uncomment line - cache.cacheClear(); - if (policy || maxmemory) { - // set redis configurations - cache.configSet('maxmemory-policy', policy); - cache.configSet('maxmemory', maxmemory); + + let cache, hashTable; + if (useCache) { + cache = new Cache(); + scope.cache = cache; + cache.connect(redisPort, policy, maxmemory); + } + if (persistQueries) { + hashTable = new HashTable(hashTableSize); } //post await router.post(path, async (ctx: any) => { - - const t0 = performance.now(); // Used for demonstration of cache vs. db performance times const { response, request } = ctx; if (!request.hasBody) return; + try { - const contextResult = context ? await context(ctx) : undefined; + let queryStr; let body = await request.body().value; + if (persistQueries && body.hash && !body.query) { + const { hash } = body; + queryStr = hashTable.get(hash); + // if not found in hash table, respond so we can send full query. + if (!queryStr) { + response.status = 204; + return; + } + } else if (persistQueries && body.hash && body.query) { + const { hash, query } = body; + hashTable.add(hash, query); + queryStr = query; + } else if (persistQueries && !body.hash) { + throw new Error('Unable to process request because hashed query was not provided'); + } else if (!persistQueries) { + queryStr = body.query; + } else { + throw new Error('Unable to process request because query argument not provided'); + } - const selectedFields = mapSelectionSet(body.query); // Gets requested fields from query and saves into an array - - if (maxQueryDepth) queryDepthLimiter(body.query, maxQueryDepth); // If a securty limit is set for maxQueryDepth, invoke queryDepthLimiter, which throws error if query depth exceeds maximum - let restructuredBody = { query: restructure(body) }; // Restructure gets rid of variables and fragments from the query - - let cacheQueryValue = await cache.read(body.query); // Parses query string into query key and checks cache for that key + const contextResult = context ? await context(ctx) : undefined; + // const selectedFields = mapSelectionSet(queryStr); // Gets requested fields from query and saves into an array + if (maxQueryDepth) queryDepthLimiter(queryStr, maxQueryDepth); // If a securty limit is set for maxQueryDepth, invoke queryDepthLimiter, which throws error if query depth exceeds maximum + let restructuredBody = { query: restructure({query: queryStr}) }; // Restructure gets rid of variables and fragments from the query + + // IF WE ARE USING A CACHE + if (useCache) { + + let cacheQueryValue = await cache.read(queryStr); // Parses query string into query key and checks cache for that key + + // ON CACHE MISS + if (!cacheQueryValue) { + // QUERY THE DATABASE + const gqlResponse = await (graphql as any)( + schema, + queryStr, + resolvers, + contextResult, + body.variables || undefined, + body.operationName || undefined + ); - // Is query in cache? - if (useCache && useQueryCache && cacheQueryValue) { - let detransformedCacheQueryValue = await detransformResponse( // Returns a nested object representing the original graphQL response object for a given queryKey - restructuredBody.query, - cacheQueryValue, - selectedFields - ); - if (!detransformedCacheQueryValue) { - // cache was evicted if any partial cache is missing, which causes detransformResponse to return undefined - cacheQueryValue = undefined; + // customIdentifier is a default param for Obsidian Router - defaults to ['__typename', '_id] + const normalizedGQLResponse = normalizeObject( // Recursively flattens an arbitrarily nested object into an objects with hash key and hashable object pairs + gqlResponse, + customIdentifier + ); - } else { // Successful cache hit + // If operation is mutation, invalidate relevant responses in cache + if (isMutation(restructuredBody)) { + invalidateCache(normalizedGQLResponse, queryStr, mutationTableMap); + // ELSE, simply write to the cache + } else { + await cache.write(queryStr, normalizedGQLResponse, searchTerms); + } + // AFTER HANDLING THE CACHE, RETURN THE ORIGINAL RESPONSE response.status = 200; - response.body = detransformedCacheQueryValue; // Returns response from cache - const t1 = performance.now(); - console.log( - '%c Obsidian retrieved data from cache and took ' + - (t1 - t0) + - ' milliseconds.', - 'background: #222; color: #00FF00' - ); + response.body = gqlResponse; + return; + // ON CACHE HIT + } else { + response.status = 200; + response.body = cacheQueryValue; // Returns response from cache + return; } - } // If not in cache: - if (useCache && useQueryCache && !cacheQueryValue) { + // IF NOT USING A CACHE + } else { + // DIRECTLY QUERY THE DATABASE const gqlResponse = await (graphql as any)( schema, - body.query, + queryStr, resolvers, contextResult, body.variables || undefined, body.operationName || undefined ); - const normalizedGQLResponse = normalizeObject( // Recursively flattens an arbitrarily nested object into an objects with hash key and hashable object pairs - gqlResponse, - customIdentifier - ); - - if (isMutation(restructuredBody)) { // If operation is mutation, invalidate relevant responses in cache - const queryString = body; - invalidateCache( - normalizedGQLResponse, - queryString.query, - mutationTableMap - ); - } - // If read query: run query, normalize GQL response, transform GQL response, write to cache, and write pieces of normalized GQL response objects - else { - const transformedGQLResponse = transformResponse( - gqlResponse, - customIdentifier - ); - await cache.write(body.query, transformedGQLResponse, false); - for (const key in normalizedGQLResponse) { - await cache.cacheWriteObject(key, normalizedGQLResponse[key]); - } - } response.status = 200; response.body = gqlResponse; // Returns response from database - const t1 = performance.now(); - console.log( - '%c Obsidian received new data and took ' + - (t1 - t0) + - ' milliseconds', - 'background: #222; color: #FFFF00' - ); + return; } } catch (error) { response.status = 400; diff --git a/src/Obsidian_old.ts b/src/Obsidian_old.ts deleted file mode 100644 index 163d22a..0000000 --- a/src/Obsidian_old.ts +++ /dev/null @@ -1,271 +0,0 @@ -import { graphql } from 'https://cdn.pika.dev/graphql@15.0.0'; -import { renderPlaygroundPage } from 'https://deno.land/x/oak_graphql@0.6.2/graphql-playground-html/render-playground-html.ts'; -import { makeExecutableSchema } from 'https://deno.land/x/oak_graphql@0.6.2/graphql-tools/schema/makeExecutableSchema.ts'; -import LFUCache from './Browser/lfuBrowserCache.js'; -import { Cache } from './quickCache.js'; -import queryDepthLimiter from './DoSSecurity.ts'; -import { restructure } from './restructure.ts'; -import { invalidateCacheCheck, invalidateCache } from './invalidateCacheCheck.js'; -import { normalizeResult, cachePrimaryFields } from './astNormalize.js' -import { rebuildFromQuery } from './rebuild.js' -import { mapSelectionSet } from './mapSelections.js' -import { normalizeObject } from './normalize.ts' -import { transformResult } from './transform_MC.ts' -import { detransformResult } from './detransform_MC.ts' - -interface Constructable { - new(...args: any): T & OakRouter; -} - -interface OakRouter { - post: any; - get: any; - obsidianSchema?: any; -} - -export interface ObsidianRouterOptions { - Router: Constructable; - path?: string; - typeDefs: any; - resolvers: ResolversProps; - context?: (ctx: any) => any; - usePlayground?: boolean; - useCache?: boolean; - redisPort?: number; - policy?: string; - maxmemory?: string; - maxQueryDepth?: number; - useQueryCache?: boolean; - useRebuildCache?: boolean; - customIdentifier?: Array; -} - -export interface ResolversProps { - Query?: any; - Mutation?: any; - [dynamicProperty: string]: any; -} - -// Export developer chosen port for redis database connection // -export let redisPortExport: number = 6379; - -/** - * - * @param param0 - * @returns - */ -export async function ObsidianRouter({ - Router, - path = '/graphql', - typeDefs, - resolvers, - context, - usePlayground = false, - useCache = true, - redisPort = 6379, - policy, - maxmemory, - maxQueryDepth = 0, - useQueryCache = true, - useRebuildCache = true, - customIdentifier = ["id", "__typename"], -}: ObsidianRouterOptions): Promise { - redisPortExport = redisPort; - const router = new Router(); - - const schema = makeExecutableSchema({ typeDefs, resolvers }); - - // If using LFU Browser Caching, the following cache line needs to be uncommented. - //const cache = new LFUCache(50); - - // If using Redis caching, the following lines need to be uncommented. - - const cache = new Cache(); - - // clear redis cache when restarting the server - - cache.cacheClear(); - - // set redis configurations - - if (policy || maxmemory) { - cache.configSet('maxmemory-policy', policy); - cache.configSet('maxmemory', maxmemory); - } - - await router.post(path, async (ctx: any): => { - var t0 = performance.now(); - - const { response, request } = ctx; - - if (request.hasBody) { - try { - const contextResult = context ? await context(ctx) : undefined; - let body = await request.body().value; - - - // If a securty limit is set for maxQueryDepth, invoke queryDepthLimiter - // which throws error if query depth exceeds maximum - if (maxQueryDepth) queryDepthLimiter(body.query, maxQueryDepth); - - // we run restructre to get rid of variables and fragments - - - body = { query: restructure(body) }; - console.log('Unprocessed body', await request.body().value) - console.log('Restructured body', body) - - - const isMutation = await invalidateCacheCheck(body); - if (isMutation) { - const mutationResponse = await (graphql as any)( // returns the response from mutation. This can be used to construct hash and check in redis if key already exists - schema, - body.query, - resolvers, - contextResult, - body.variables || undefined, - body.operationName || undefined - ); - console.log('Mutation response', mutationResponse) - await invalidateCache(normalizeObject(mutationResponse, customIdentifier)) - console.log('inside Obsidian.ts isMutation block') - response.body = await mutationResponse; - return; - } - - console.log('past isMutation block in obsidian.ts') - - // Variable to block the normalization of mutations // - let toNormalize = true; - - if (useCache && !isMutation) { - - // Send query off to be destructured and found in Redis if possible // - - let obsidianReturn - if (useQueryCache) { - obsidianReturn = await cache.read(body.query); - } - if (!obsidianReturn && useRebuildCache) { - - const rebuildReturn = await rebuildFromQuery(body.query); - - - obsidianReturn = rebuildReturn - } - - if (obsidianReturn) { - - // detransform MC - obsidianReturn = await detransformResult(body.query, obsidianReturn); - response.status = 200; - response.body = obsidianReturn; - var t1 = performance.now(); - console.log( - '%c Obsidian retrieved data from cache and took ' + - (t1 - t0) + - ' milliseconds.', "background: #222; color: #00FF00" - ); - - if (useQueryCache) { - // transform for big query - obsidianReturn = transformResult(obsidianReturn, customIdentifier); - await cache.write(body.query, obsidianReturn, false); - } - return; - } - } - - // if not in cache, it will fetch a new response from database - const result = await (graphql as any)( - schema, - body.query, - resolvers, - contextResult, - body.variables || undefined, - body.operationName || undefined - ); - - // Send database response to client // - response.status = 200; - response.body = result; - - //cache of whole query completely non normalized - //boolean to allow the full query cache - if (useQueryCache && useCache && !isMutation) { - const transformedResult = transformResult(result, customIdentifier); // MC - await cache.write(body.query, transformedResult, false); - } - - // Normalize response and store in cache // - if (useCache && toNormalize && !result.errors && useRebuildCache && !isMutation) { - - //run to map alias - // let map = mapSelectionSet(body.query) - - // this normalizeds the result and saves to REDIS - let normalized - // uses base id, __typename if given customIdentifer array is not populated - if (customIdentifier.length === 0) { - // normalized = await normalizeObject(response.body, customIdentifier) - return; - - } else { - // this uses the custom identifier if given - normalized = await normalizeObject(response.body, customIdentifier) - for(const key in normalized){ - await cache.cacheWriteObject(key, normalized[key]); - } - //loop thru normalized - // write to Redis - } - - // await cachePrimaryFields(normalized, body.query, map) //[Movie7, Movie15, Movie21]: - } - - var t1 = performance.now(); - console.log( - '%c Obsidian received new data and took ' + (t1 - t0) + ' milliseconds', 'background: #222; color: #FFFF00' - ); - - return; - } catch (error) { - response.status = 200; - response.body = { - data: null, - errors: [ - { - message: error.message ? error.message : error, - }, - ], - }; - console.error('Error: ', error.message); - return; - } - } - }); - // serve graphql playground - await router.get(path, async (ctx: any) => { - const { request, response } = ctx; - if (usePlayground) { - const prefersHTML = request.accepts('text/html'); - const optionsObj: any = { - 'schema.polling.enable': false, // enables automatic schema polling - } - - if (prefersHTML) { - - const playground = renderPlaygroundPage({ - endpoint: request.url.origin + path, - subscriptionEndpoint: request.url.origin, - settings: optionsObj - }); - response.status = 200; - response.body = playground; - return; - } - } - }); - - return router; -} diff --git a/src/astNormalize.js b/src/astNormalize.js deleted file mode 100644 index 93d29ff..0000000 --- a/src/astNormalize.js +++ /dev/null @@ -1,155 +0,0 @@ -/** @format */ - -import { gql } from "https://deno.land/x/oak_graphql/mod.ts"; - -import { redisdb } from "./quickCache.js"; -import { Cache } from './quickCache.js'; - -//graphql response is going to be in JSON; -// this is for breaking up AST feilds/parts into the hash -// and taking the response and pairing the resp info with hash -const cache = new Cache(); -//idArray so they can define hash nomenclature -const cacheWriteList = async (hash, array, overwrite = true) => { - if (overwrite) { - await redisdb.del(hash); - } - - array = array.map((element) => JSON.stringify(element)); - await redisdb.rpush(hash, ...array); - return; -}; - -export async function normalizeResult( - gqlResponse, - map, - idArray = ["id", "__typename"] -) { - // console.log('gqlResponse -> ', gqlResponse); - // console.log('map -> ', map); - - // console.log('%c astNormalize.js (normalizeResult) triggered', "background: #222; color: #F42504"); - - const recursiveObjectHashStore = (object, uniqueArray, map) => { - - if (object == null) object = {}; - - const keys = new Set(Object.keys(object)); // keys = ['data'], keys = ['id', '__typename', 'title', ...] - - // only the keys 'id' and '__typename' are hashable - const isHashable = uniqueArray.every((element) => keys.has(element)); // can turn this from O(N) to O(1) with Map/Set.has - - if (isHashable) { - let hash = ""; - - uniqueArray.forEach((id) => (hash += "~" + object[id])); //~7~Movie - - // if hash exists as key in Redis, skip code block below and only return hash variable - redisdb.exists(hash) - .then(data => { - if (!data) { - const returnObject = {}; - keys.forEach((key) => { - if (Array.isArray(object[key])) { - returnObject[hash][map[key]] = []; - object[key].forEach((element) => { - returnObject[hash][map[key]].push( - recursiveObjectHashStore(element, uniqueArray, map) - ); - }); - } else if (typeof object[key] == "object" && object[key] !== null) { - returnObject[hash][map[key]] = recursiveObjectHashStore( - object[key], - uniqueArray, - map - ); - } else { - if (!returnObject[hash]) { - returnObject[hash] = {}; - } - returnObject[hash][map[key]] = object[key]; - } - }); - // console.log('Returned returnObject', returnObject); - // console.log('cacheWriteObject called from astNormalize.js passing in hash and some object') - // console.log('some object being passed in', Object.values(returnObject)[0]) - cache.cacheWriteObject(hash, Object.values(returnObject)[0]); - // console.log('hash -->', hash) - // console.log('Object.keys(returnObject)[0] -->', Object.keys(returnObject)[0]) - // console.log('returnObject ->', returnObject) - } - }) - .catch(err => { - console.log('err occured when checking if hash in redis: ', err) - }) - - return hash; - } else { - //if object isn't hashable - let returnObject = {}; - Object.keys(object).forEach((key) => { - if (Array.isArray(object[key])) { - returnObject[key] = []; - object[key].forEach((element) => { - returnObject[key].push( - recursiveObjectHashStore(element, uniqueArray, map) - ); - }); - } else if (typeof object[key] == "object") { - returnObject[key] = recursiveObjectHashStore( - object[key], - uniqueArray, - map - ); - } else { - returnObject[key] = object[key]; - } - }); - - return returnObject; - } - //define hash from idArray (loop through, concatenate all items into one string) - //define query hash from name, - }; - - return await recursiveObjectHashStore(gqlResponse, idArray, map); -} - -export const cachePrimaryFields = async ( - normalizedResult, - queryString, - map -) => { - let ast = gql(queryString); - - const primaryFieldsArray = ast.definitions[0].selectionSet.selections; - - const expectedResultKeys = []; - const objectOfHashs = {}; - for (const primaryField of primaryFieldsArray) { - let title = primaryField.name.value; - if (primaryField.alias) { - title = primaryField.alias.value; - } else { - title = primaryField.name.value; - } - expectedResultKeys.push(title); - - let hashName = ""; - hashName = - hashName + - primaryField.name.value + - JSON.stringify(primaryField.arguments) + - JSON.stringify(primaryField.directives); - - objectOfHashs[hashName] = normalizedResult.data[title]; - - if (!Array.isArray(normalizedResult.data[title])) { - normalizedResult.data[title] = [normalizedResult.data[title]]; - } - - await cacheWriteList(hashName, normalizedResult.data[title]); - } - - return objectOfHashs; -}; diff --git a/src/invalidateCacheCheck.ts b/src/invalidateCacheCheck.ts index abffc24..5dc0c02 100644 --- a/src/invalidateCacheCheck.ts +++ b/src/invalidateCacheCheck.ts @@ -1,10 +1,11 @@ /** @format */ -import { gql } from 'https://deno.land/x/oak_graphql/mod.ts'; -import { visit } from 'https://deno.land/x/graphql_deno/mod.ts'; -import { redisdb, Cache } from './quickCache.js'; +import { gql } from "https://deno.land/x/oak_graphql@0.6.4/mod.ts"; +import { visit } from "https://deno.land/x/graphql_deno@v15.0.0/mod.ts"; +import { scope } from './Obsidian.ts'; +import { Cache } from './quickCache.js'; import { deepEqual } from './utils.js'; -const cache = new Cache(); +// const cache = new Cache(); /** * @param {any} gqlQuery - Object containing the query string @@ -57,14 +58,14 @@ export async function invalidateCache( // That's why the for loop is needed for (const redisKey in normalizedMutation) { normalizedData = normalizedMutation[redisKey]; - cachedVal = await cache.cacheReadObject(redisKey); + cachedVal = await scope.cache.cacheReadObject(redisKey); // if response objects from mutation and cache are deeply equal then we delete it from cache because it infers that it's a delete mutation if ( (cachedVal !== undefined && deepEqual(normalizedData, cachedVal)) || isDelete(queryString) ) { - await cache.cacheDelete(redisKey); + await scope.cache.cacheDelete(redisKey); } else { // Otherwise it's an update or add mutation because response objects from mutation and cache don't match. @@ -77,19 +78,19 @@ export async function invalidateCache( const staleRefs: Array = mutationTableMap[mutationType]; // Grabs array of affected data tables from dev specified mutationTableMap - const rootQueryContents = await redisdb.hgetall('ROOT_QUERY'); // Creates array of all query keys and values in ROOT_QUERY from Redis + const rootQueryContents = await scope.cache.redis.hgetall('ROOT_QUERY'); // Creates array of all query keys and values in ROOT_QUERY from Redis for (let j = 0; j < staleRefs.length; j++) { // Checks for all query keys that refer to the affected tables and deletes them from Redis for (let i = 0; i < rootQueryContents.length; i += 2) { if ( staleRefs[j] === rootQueryContents[i].slice(0, staleRefs[j].length) ) { - redisdb.hdel('ROOT_QUERY', rootQueryContents[i]); + scope.cache.redis.hdel('ROOT_QUERY', rootQueryContents[i]); } } } } - await cache.cacheWriteObject(redisKey, normalizedData); // Adds or updates reference in redis cache + await scope.cache.cacheWriteObject(redisKey, normalizedData); // Adds or updates reference in redis cache } } } diff --git a/src/mapSelections.js b/src/mapSelections.js index 1229dbd..9439f11 100644 --- a/src/mapSelections.js +++ b/src/mapSelections.js @@ -4,9 +4,9 @@ import { gql } from 'https://deno.land/x/oak_graphql/mod.ts'; export function mapSelectionSet(query) { // Gets fields from query and stores all in an array - used to selectively query cache - let selectionKeysMap = {}; - let ast = gql(query); - let selections = ast.definitions[0].selectionSet.selections; + const selectionKeysMap = {}; + const ast = gql(query); + const selections = ast.definitions[0].selectionSet.selections; const tableName = selections[0].name.value; const recursiveMap = (recurseSelections) => { diff --git a/src/normalize.ts b/src/normalize.ts index 467341d..06b3766 100644 --- a/src/normalize.ts +++ b/src/normalize.ts @@ -62,11 +62,9 @@ export const hashMaker = ( hashableKeys: Array ): string => { let hash = ''; - let value = ''; - for (const hashableKey of hashableKeys) { - value = '~'; - value += hashableObject[hashableKey]; - hash += value; + for (let i = 0; i < hashableKeys.length; i++) { + hash += hashableObject[hashableKeys[i]]; + if (i < hashableKeys.length - 1) hash += '~' } return hash; }; diff --git a/src/queryHash.js b/src/queryHash.js new file mode 100644 index 0000000..c5f59ea --- /dev/null +++ b/src/queryHash.js @@ -0,0 +1,79 @@ +// Create hash table +class Node { + constructor(key, str) { + this.value = {key, str}; + this.next = null; + } +} + +class LinkedList { + constructor() { + this.head = null; + this.tail = null; + } + + // adds a node to the end of the linked list + addNode(key, str) { + if (this.head === null) { + this.head = new Node(key, str); + this.tail = this.head; + } else { + this.tail.next = new Node(key, str); + this.tail = this.tail.next + } + } + + // finds a node from the SHA256-hashed queryStr and returns the queryStr + getNode(key) { + if (this.head === null) return undefined; + let currNode = this.head; + while (currNode) { + if (currNode.value.key === key) return currNode.value.str; + else currNode = currNode.next; + } + return undefined; + } +} + +export class HashTable { + constructor(size) { + this.SIZE = size; + this.table = new Array(this.SIZE); + } + + // adds a value to the hashTable + add(sha256Str, str) { + const index = hashSlingingSlasher(sha256Str, this.SIZE); + // if there is nothing at that index of the hash table + if (!this.table[index]) { + // initialize a new linked list and add a node to it + this.table[index] = new LinkedList(); + this.table[index].addNode(sha256Str, str); + // if there is already a linked list at that index + } else { + // add a new node + this.table[index].addNode(sha256Str, str); + } + } + + // gets the queryStr given the SHA256-Hashed queryStr + get(key) { + + const index = hashSlingingSlasher(key, this.SIZE); + if (!this.table[index]) return undefined; + return this.table[index].getNode(key); + } + +} + +// hashing function +function hashSlingingSlasher(string, size) { + let hash = 0; + if (string.length === 0) return hash; + for (let i = 0; i < string.length; i++) { + const letter = string.charCodeAt(i); + hash = ((hash << 5) - hash) + letter; + hash = hash & hash; // Convert to 32bit integer + } + return Math.abs(hash) % size; +} \ No newline at end of file diff --git a/src/quickCache.js b/src/quickCache.js index d5c8e70..f2a72f9 100644 --- a/src/quickCache.js +++ b/src/quickCache.js @@ -1,22 +1,12 @@ /** @format */ -import 'https://deno.land/x/dotenv/load.ts'; -import { connect } from 'https://deno.land/x/redis/mod.ts'; -import { gql } from 'https://deno.land/x/oak_graphql/mod.ts'; -import { print, visit } from 'https://deno.land/x/graphql_deno/mod.ts'; - -let redis; -const context = window.Deno ? 'server' : 'client'; - -if (context === 'server') { - redis = await connect({ - hostname: Deno.env.get('REDIS_HOST'), - port: 6379, - }); -} -//this is being exported so we can flush db in invalidateCacheCheck +import "https://deno.land/x/dotenv@v3.2.2/load.ts"; +import { connect } from "https://deno.land/x/redis@v0.29.2/mod.ts"; +import { gql } from "https://deno.land/x/oak_graphql@0.6.4/mod.ts"; +import { print, visit } from "https://deno.land/x/graphql_deno@v15.0.0/mod.ts"; +import { destructureQueries } from './Browser/destructure.js'; + -export const redisdb = redis; export class Cache { constructor( initialCache = { @@ -24,70 +14,121 @@ export class Cache { ROOT_MUTATION: {}, } ) { - this.storage = initialCache; - this.context = window.Deno ? 'server' : 'client'; + this.ROOT_QUERY = initialCache.ROOT_QUERY; + this.ROOT_MUTATION = initialCache.ROOT_MUTATION; } - // set cache configurations - async configSet(parameter, value) { - return await redis.configSet(parameter, value); + // METHOD TO CONNECT TO CACHE + async connect(port, policy, maxmemory) { + this.redis = await connect({ + hostname: Deno.env.get('REDIS_HOST'), + port: port, + }); + console.log('connecting to redis'); + this.cacheClear(); + this.redis.configSet('maxmemory-policy', policy); + this.redis.configSet('maxmemory', maxmemory); } - // Main functionality methods below - // for reading the inital query + // METHOD TO READ FROM REDIS CACHE & RESTRUCTURE THE DATA async read(queryStr) { - //the queryStr it gets is the JSON stringified - const returnedValue = await this.cacheRead(queryStr); - - if (('returnedValue', returnedValue)) { - return JSON.parse(returnedValue); - } else { - return undefined; + // destructure the query string into an object + const queries = destructureQueries(queryStr).queries; + if (!queries) return; + const responseObject = {}; + // iterate through each query in the input object + for (const query in queries) { + const queryHash = queries[query].name.concat(queries[query].arguments); + if (this.ROOT_QUERY[queryHash]) { + const hashArray = this.ROOT_QUERY[queryHash]; + const respObjProp = queries[query].alias ?? queries[query].name; + // invoke populateAllHashes to add data object to the response object + responseObject[respObjProp] = await this.populateAllHashes(hashArray, queries[query].fields); + if (!responseObject[respObjProp]) return; + } else { + return null; + } } + return { data: responseObject }; } - async write(queryStr, respObj, deleteFlag) { - // update the original cache with same reference - const cacheHash = this.createQueryKey(queryStr); - await this.cacheWrite(cacheHash, JSON.stringify(respObj)); - } - - //will overwrite a list at the given hash by default - //if you pass a false value to overwrite, it will append the list items to the end - //Probably be used in normalize - cacheWriteList = async (hash, array, overwrite = true) => { - if (overwrite) { - await redis.del(hash); - } - array = array.map((element) => JSON.stringify(element)); - await redis.rpush(hash, ...array); + populateAllHashes(allHashes, fields){ + if (!allHashes.length) return []; + const tildeInd = allHashes[0].indexOf('~'); + const typeName = allHashes[0].slice(0, tildeInd); + const reduction = allHashes.reduce(async (acc, hash) => { + const readStr = await this.redis.get(hash); + const readVal = await JSON.parse(readStr); + if (!readVal) return; + const dataObj = {}; + // iterate over the fields object to populate with data from cache + for (const field in fields) { + if (typeof fields[field] !== 'object') { + if (field === '__typename') { + dataObj[field] = typeName; + } else { + dataObj[field] = readVal[field] || 'n/a'; + } + } else { + // if the field from the input query is an array of hashes, recursively invoke + dataObj[field] = await this.populateAllHashes(readVal[field], fields[field]); + if (dataObj[field] === undefined) return; + } + } + // at this point acc should be an array of response objects for each hash + const resolvedProm = await Promise.resolve(acc); + resolvedProm.push(dataObj); + return resolvedProm; + }, []); + return reduction; }; - cacheReadList = async (hash) => { - let cachedArray = await redis.lrange(hash, 0, -1); - cachedArray = cachedArray.map((element) => JSON.parse(element)); + // METHOD TO WRITE TO REDIS CACHE + async write(queryStr, respObj, searchTerms, deleteFlag) { + const hash = this.createQueryKey(queryStr); + const array = Object.keys(respObj); + // isolate type of of query - 'person,' 'book,' etc. + const tildeInd = array[0].indexOf('~'); + const typeName = array[0].slice(0, tildeInd); + // store the array of keys to ROOT_QUERY + this.ROOT_QUERY[hash] = array; + // write each item in the array to the cache + for (let i = 0; i < array.length; i++) { + await this.redis.set(array[i], JSON.stringify(respObj[array[i]])); + // if using searchTerms, iterate throuogh those and also store each item + // according to those terms in ROOT_QUERY + if (searchTerms.length && queryStr.slice(8 , 11) === 'all') { + searchTerms.forEach(el => { + const elVal = respObj[array[i]][el].replaceAll(' ', ''); + const hashKey = `one${typeName}(${el}:"${elVal}")` + if (!this.ROOT_QUERY[hashKey]) this.ROOT_QUERY[hashKey] = []; + this.ROOT_QUERY[hashKey].push(array[i]); + }) + } + } + } - return cachedArray; - }; + // CURRENTLY BEING UTILIZED BY invalidateCacheCheck.ts, WHICH IS A FILE THAT SHOULD BE REFACTORED IN FUTURE ITERATION cacheWriteObject = async (hash, obj) => { let entries = Object.entries(obj).flat(); entries = entries.map((entry) => JSON.stringify(entry)); // adding as nested strings? take out one layer for clarity. - await redis.hset(hash, ...entries); + await this.redis.hset(hash, ...entries); }; + // CURRENTLY BEING UTILIZED BY invalidateCacheCheck.ts, WHICH IS A FILE THAT SHOULD BE REFACTORED IN FUTURE ITERATION cacheReadObject = async (hash, fields = []) => { // Checks for the fields requested, then queries cache for those specific keys in the hashes if (fields.length !== 0) { const fieldObj = {}; for (const field of fields) { - const rawCacheValue = await redisdb.hget(hash, JSON.stringify(field)); + const rawCacheValue = await this.redisdb.hget(hash, JSON.stringify(field)); fieldObj[field] = JSON.parse(rawCacheValue); } return fieldObj; } else { - let objArray = await redisdb.hgetall(hash); + let objArray = await this.redisdb.hgetall(hash); if (objArray.length == 0) return undefined; let parsedArray = objArray.map((entry) => JSON.parse(entry)); @@ -103,43 +144,8 @@ export class Cache { } }; - createBigHash(inputfromQuery) { - let ast = gql(inputfromQuery); - - let returned = visit(ast, { enter: print(ast) }); - let finalReturn = print(returned); - return JSON.stringify(finalReturn); - } - - async cacheRead(queryStr) { - if (this.context === 'client') { - return this.storage[queryStr]; - } else { - if (queryStr === 'ROOT_QUERY' || queryStr === 'ROOT_MUTATION') { - const hasRootQuery = await redis.get('ROOT_QUERY'); - - if (!hasRootQuery) { - await redis.set('ROOT_QUERY', JSON.stringify({})); - } - const hasRootMutation = await redis.get('ROOT_MUTATION'); - - if (!hasRootMutation) { - await redis.set('ROOT_MUTATION', JSON.stringify({})); - } - } - // use cacheQueryKey to create a key with object name and inputs to save in cache - const queryKey = this.createQueryKey(queryStr); - const cacheResponse = await redis.hget('ROOT_QUERY', queryKey); - - if (!cacheResponse === undefined) return; - return JSON.parse(cacheResponse); - } - } - /* Creates a string to search the cache or add as a key in the cache. - If GraphQL query string is query{plants(input:{maintenance:"Low"}) name id ...} - returned queryKey will be plants:maintenance:Low */ createQueryKey(queryStr) { // traverses AST and gets object name, and any filter keys in the query @@ -150,7 +156,7 @@ export class Cache { if (ast.definitions[0].operation === 'mutation') return queryKey; if (ast.definitions[0].selectionSet.selections[0].arguments.length) { const fieldsArray = - ast.definitions[0].selectionSet.selections[0].arguments[0].value.fields; + ast.definitions[0].selectionSet.selections[0].arguments; const resultsObj = {}; fieldsArray.forEach((el) => { const name = el.name.value; @@ -158,49 +164,24 @@ export class Cache { resultsObj[name] = value; }); - for (let key in resultsObj) { - queryKey += `:${key}:${resultsObj[key]}`; + let parens = '' // name:"Yoda" + for (const key in resultsObj) { + parens += `${key}:"${resultsObj[key]}"`; } + queryKey = queryKey + '(' + parens + ')'; } return queryKey; } - async cacheWrite(hash, value) { - // writes value to object cache or JSON.stringified value to redis cache - if (this.context === 'client') { - this.storage[hash] = value; - } else { - value = JSON.stringify(value); - await redis.hset('ROOT_QUERY', hash, value); - } - } - - async cacheWriteList(hash, array) { - await redis.rpush(hash, ...array); - } - - async cacheReadList(hash) { - let cachedArray = await redis.lrange(hash, 0, -1); - return cachedArray; - } async cacheDelete(hash) { - // deletes the hash/value pair on either object cache or redis cache - if (this.context === 'client') { - delete this.storage[hash]; - } else await redis.del(hash); + await this.redis.del(hash); } + async cacheClear() { - // erases either object cache or redis cache - if (this.context === 'client') { - this.storage = { ROOT_QUERY: {}, ROOT_MUTATION: {} }; - } else { - await redis.flushdb((err, successful) => { - if (err) console.log('redis error', err); - console.log(successful, 'clear'); - }); - await redis.hset('ROOT_QUERY', 'blank', JSON.stringify({})); - await redis.set('ROOT_MUTATION', 'blank', JSON.stringify({})); - } + await this.redis.flushdb((err, successful) => { + if (err) console.log('redis error', err); + console.log(successful, 'clear'); + }); } // functionality to stop polling diff --git a/src/rebuild.js b/src/rebuild.js deleted file mode 100644 index 4766a63..0000000 --- a/src/rebuild.js +++ /dev/null @@ -1,142 +0,0 @@ -/** @format */ - -import { redisdb } from './quickCache.js'; -import { gql } from 'https://deno.land/x/oak_graphql/mod.ts'; - -let localCacheObject = {}; -const cacheReadList = async (hash) => { - //await redisdb.lrange(hash, 0, -1); - - let redisList = await redisdb.lrange(hash, 0, -1); - - //if (redisList.length===0) return undefined; - let cachedArray = redisList.map((element) => JSON.parse(element)); - localCacheObject[hash] = cachedArray; - - return cachedArray; -}; - -const cacheReadObject = async (hash, field) => { - if (field) { - if (localCacheObject[hash] && localCacheObject[hash][field]) { - return localCacheObject[hash][field]; - } else { - } - let returnValue = await redisdb.hget(hash, JSON.stringify(field)); - if (returnValue === undefined) return undefined; - - if (!localCacheObject[hash]) { - localCacheObject[hash] = {}; - } - if (localCacheObject[hash]) { - localCacheObject[hash][field] = JSON.parse(returnValue); - } - return JSON.parse(returnValue); - } else { - if (localCacheObject[hash]) { - return localCacheObject[hash]; - } else { - } - let objArray = await redisdb.hgetall(hash); - if (objArray.length == 0) return undefined; - let parsedArray = objArray.map((entry) => JSON.parse(entry)); - - if (parsedArray.length % 2 !== 0) { - return undefined; - } - let returnObj = {}; - for (let i = 0; i < parsedArray.length; i += 2) { - returnObj[parsedArray[i]] = parsedArray[i + 1]; - } - - localCacheObject[hash] = returnObj; - return returnObj; - } -}; - -export const rebuildFromQuery = async (restructuredQuery) => { - localCacheObject = {}; - let ast = gql(restructuredQuery); - - const primaryFieldsArray = ast.definitions[0].selectionSet.selections; - const primaryFieldResponseObject = {}; - for (const primaryField of primaryFieldsArray) { - const hashName = - primaryField.name.value + - JSON.stringify(primaryField.arguments) + - JSON.stringify(primaryField.directives); - const responseKeyName = primaryField.alias - ? primaryField.alias.value - : primaryField.name.value; - - let fieldsArray = primaryField.selectionSet.selections; - - const retrievedArray = await cacheReadList(hashName); - - if (retrievedArray.length === 0) return undefined; - const entry = await rebuildArrays(retrievedArray, fieldsArray); - if (entry === undefined) return undefined; - - primaryFieldResponseObject[responseKeyName] = entry; - } - - const rebuiltResponse = { data: primaryFieldResponseObject }; - - return rebuiltResponse; -}; - -const rebuildArrays = async (cachedArray, queryArray) => { - const returnArray = []; - for (const cachedHash of cachedArray) { - let returnObject = {}; - - for (const queryField of queryArray) { - let objKey; - let nameyName; - if (queryField.kind == 'InlineFragment') { - let __typeof = await cacheReadObject(cachedHash, '__typeof'); - if (__typeof == queryField.typeCondition.name.value) { - } - } - if (queryField.name && queryField.name.value) { - objKey = queryField.name.value; - nameyName = queryField.name.value; - } - if (queryField.alias && queryField.alias.value) { - objKey = queryField.alias.value; - } - - const fieldValue = await cacheReadObject(cachedHash, nameyName); - - if (fieldValue === undefined) return undefined; - - if (Array.isArray(fieldValue)) { - returnObject[objKey] = await rebuildArrays( - fieldValue, - queryField.selectionSet.selections - ); - if (returnObject[objKey] === undefined) return undefined; - } else { - if (queryField.selectionSet == undefined) { - returnObject[objKey] = fieldValue; - } else { - //its not undefined because its an inline fragment - - if (returnObject.__typename === queryField.typeCondition.name.value) { - let inlines = await rebuildArrays( - [cachedHash], - queryField.selectionSet.selections - ); - if (inlines == undefined) return undefined; - - returnObject = { ...returnObject, ...inlines[0] }; - } - } - } - } - - returnArray.push(returnObject); - } - - return returnArray; -}; diff --git a/src/restructure.ts b/src/restructure.ts index d89de51..0d4cb62 100644 --- a/src/restructure.ts +++ b/src/restructure.ts @@ -1,6 +1,6 @@ -import { gql } from 'https://deno.land/x/oak_graphql/mod.ts'; +import { gql } from "https://deno.land/x/oak_graphql@0.6.4/mod.ts"; -import { print, visit } from 'https://deno.land/x/graphql_deno/mod.ts'; +import { print, visit } from "https://deno.land/x/graphql_deno@v15.0.0/mod.ts"; /** * The restructure function: diff --git a/src/transformResponse.ts b/src/transformResponse.ts deleted file mode 100644 index 367cd13..0000000 --- a/src/transformResponse.ts +++ /dev/null @@ -1,151 +0,0 @@ -import { - isHashableObject, - containsHashableObject, - hashMaker, -} from './normalize.ts'; -import { GenericObject } from './normalize.ts'; -import { Cache } from './quickCache.js'; -const cache = new Cache(); - -const isArrayOfHashableObjects = ( - arrayOfObjects: Array, - hashableKeys: Array -): boolean => { - if (Array.isArray(arrayOfObjects)) { - return arrayOfObjects.every((object) => { - return containsHashableObject(object, hashableKeys); - }); - } - return false; -}; - -/* ----------------------------------------------------------------*/ -/** transformResponse - * Returns a nested object representing an object of references, where the references are hashes in Redis. The responseObject input must: - * 1) Contain hashable object(s) - * 2) have a first key of 'data', as should all GraphQL response objects - * 3) have an inner array of data response objects corresponding to the GraphQL fields - * - * @param {GenericObject} responseObject GraphQL response Object for large read query - * @param {array} hashableKeys Array of hashable keys - * @return {GenericObject} Nested object representing an object of references, where the references are hashes in Redis - */ -export const transformResponse = ( - responseObject: any, - hashableKeys: Array -): GenericObject => { - const result: GenericObject = {}; - - if (responseObject.data) { - return transformResponse(responseObject.data, hashableKeys); - } else if (isHashableObject(responseObject, hashableKeys)) { - return result; - } else { - for (const key in responseObject) { - if (isArrayOfHashableObjects(responseObject[key], hashableKeys)) { - for (const element of responseObject[key]) { - let hash = hashMaker(element, hashableKeys); - result[hash] = transformResponse(element, hashableKeys); - } - } - } - } - return result; -}; - -/* ----------------------------------------------------------------*/ -/** detransformResponse - * Returns a nested object representing the original graphQL response object for a given queryKey - * @param {String} queryKey String representing the stringified GraphQL query for a big read query, which should have been saved as a key in Redis - * @param {GenericObject} transformedValue Nested object representing of references, where the references are hashes in Redis - * @return {GenericObject} Nested object representing the original graphQL response object for a given queryKey - */ -export const detransformResponse = async ( - queryString: String, - transformedValue: any, - selectedFields: Array -): Promise => { - // remove all text within parentheses aka '(input: ...)' - queryString = queryString.replace(/\(([^)]+)\)/, ''); - // save Regex matches for line break followed by '{' - const matches = [...queryString.matchAll(/\n([^\n]+)\{/g)]; - - // get fields of query - const tableNames: Array = []; - matches.forEach((match) => { - tableNames.push(match[1].trim()); - }); - // fields ends up as array of just the fields ("plants" in the demo case); - // define recursiveDetransform function body for use later - const recursiveDetransform = async ( - transformedValue: any, - tableNames: Array, - selectedFields: Array, - depth: number = 0 - ): Promise => { - const keys = Object.keys(transformedValue); - let result: any = {}; - let currDepth = depth; - - // base case: transformedValue is innermost object aka empty object - if (Object.keys(transformedValue).length === 0) { - return result; - } else { - let currTable: string = tableNames[currDepth]; - result[currTable] = []; - - for (let hash in transformedValue) { - const redisValue: GenericObject = await cache.cacheReadObject( - hash, - selectedFields - ); - - // edge case in which our eviction strategy has pushed partial Cache data out of Redis - if (!redisValue) { - return 'cacheEvicted'; - } - - result[currTable].push(redisValue); - - let recursiveResult = await recursiveDetransform( - transformedValue[hash], - tableNames, - selectedFields, - (depth = currDepth + 1) - ); - - // edge case in which our eviction strategy has pushed partial Cache data out of Redis, for recursive call - if (recursiveResult === 'cacheEvicted') { - return 'cacheEvicted'; - // normal case with no cache eviction - } else { - result[currTable][result[currTable].length - 1] = Object.assign( - result[currTable][result[currTable].length - 1], - recursiveResult - ); - } - } - return result; - } - }; - - // actually call recursiveDetransform - // Formats Redis cache value into GraphQL response syntax. cacheReadObject is called and returns only fields that are present in selectedFields - let detransformedResult: any = { data: {} }; - const detransformedSubresult = await recursiveDetransform( - transformedValue, - tableNames, - selectedFields - ); - if (detransformedSubresult === 'cacheEvicted') { - detransformedResult = undefined; - } else { - detransformedResult.data = await recursiveDetransform( - transformedValue, - tableNames, - selectedFields - ); - } - - return detransformedResult; -}; diff --git a/test_files/rhum_test_files/garbage_collection_test.ts b/test_files/rhum_test_files/garbage_collection_test.ts deleted file mode 100644 index 178c433..0000000 --- a/test_files/rhum_test_files/garbage_collection_test.ts +++ /dev/null @@ -1,61 +0,0 @@ -import { Rhum } from 'https://deno.land/x/rhum@v1.1.11/mod.ts'; -import Cache from '../../src/Browser/CacheClassBrowser.js'; -import { test } from '../test_variables/garbage_collection_variables.ts'; - -Rhum.testPlan('CacheClassBrowser garbage collection', () => { - Rhum.testSuite('getBadHashes()', () => { - Rhum.testCase( - 'should return a badHashes set that contains all the hashes that are flagged for deletion', - async () => { - const cache = new Cache(test.cache); - const result = Array.from(cache.getBadHashes()); - Rhum.asserts.assertEquals(result, test.badHashesSet); - } - ); - }); - Rhum.testSuite('rootQueryCleaner()', () => { - Rhum.testCase( - 'should return (partial) goodHashes set from the root queries after removing bad hashes', - async () => { - const cache = new Cache(test.cache); - const badHashes = new Set(test.badHashesSet); - const result = Array.from(cache.rootQueryCleaner(badHashes)).sort(); - Rhum.asserts.assertEquals(result, test.goodHashesSet.sort()); - } - ); - Rhum.testCase( - 'should clean up the root queries by removing bad hashes', - async () => { - const cache = new Cache(test.cache); - const badHashes = new Set(test.badHashesSet); - cache.rootQueryCleaner(badHashes); - Rhum.asserts.assertEquals(cache.storage.ROOT_QUERY, test.cleanedRootQuery); - } - ) - }); - Rhum.testSuite('getGoodHashes()', () => { - Rhum.testCase( - 'should return (complete) goodHashes set after checking goodHashes for nested hashes and adding them to the goodHashes set', - async () => { - const cache = new Cache(test.cache); - const badHashes = new Set(test.badHashesSet); - const goodHashes = new Set(test.goodHashesSet); - const result = Array.from(cache.getGoodHashes(badHashes, goodHashes)).sort(); - Rhum.asserts.assertEquals(result, test.getGoodHashes.sort()); - } - ); - }); - Rhum.testSuite('removeInaccessibleHashes()', () => { - Rhum.testCase( - 'should remove inaccessible hashes from cache', - async () => { - const cache = new Cache(test.cache); - const badHashes = new Set(test.badHashesSet); - const goodHashes = new Set(test.getGoodHashes); - cache.removeInaccessibleHashes(badHashes, goodHashes); - Rhum.asserts.assertEquals(cache.storage, test.removeInaccessibleHashes); - } - ); - }); - }); - Rhum.run(); \ No newline at end of file diff --git a/test_files/rhum_test_files/wTinyLFU_test.js b/test_files/rhum_test_files/wTinyLFU_test.js new file mode 100644 index 0000000..0a77285 --- /dev/null +++ b/test_files/rhum_test_files/wTinyLFU_test.js @@ -0,0 +1,93 @@ +import WTinyLFUCache from "../test_variables/wTinyLFU_variables.js"; +import { Rhum } from 'https://deno.land/x/rhum@v1.1.11/mod.ts'; + +Rhum.testPlan('WTinyLFU cache functionality', () => { + Rhum.testSuite('WTinyLFU Initialization', () => { + Rhum.testCase('should initialize with corect capacities', () => { + const cache = new WTinyLFUCache(1000); + Rhum.asserts.assertEquals(cache.capacity, 1000); + Rhum.asserts.assertEquals(cache.WLRU.capacity, 10); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.capacity, 198); + Rhum.asserts.assertEquals(cache.SLRU.protectedLRU.capacity, 792); + }); + }) + Rhum.testSuite('Window cache functionality', () => { + Rhum.testCase('should add new item to the windowLRU when adding to WTLFU cache', () => { + const cache = new WTinyLFUCache(100); + cache.putAndPromote('one', 1); + Rhum.asserts.assertEquals(cache.WLRU.get('one'), 1); + }); + Rhum.testCase('should move items ejected from windowLRU into the probationaryLRU cache', async () => { + const cache = new WTinyLFUCache(100); + await cache.putAndPromote('one', 1); + await cache.putAndPromote('two', 2); + Rhum.asserts.assertEquals(cache.WLRU.get('one'), null); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.peek('one'), 1); + Rhum.asserts.assertEquals(cache.WLRU.get('two'), 2); + }) + Rhum.testCase('should promote items from probationaryLRU to the protectedLRU when accessed', async () => { + const cache = new WTinyLFUCache(100); + await cache.putAndPromote('one', 1); + await cache.putAndPromote('two', 2); + Rhum.asserts.assertEquals(cache.SLRU.get('one'), 1); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('one'), null); + Rhum.asserts.assertEquals(cache.SLRU.protectedLRU.peek('one'), 1); + }) + Rhum.testCase('should demote items ejected from protectedLRU to probationary LRU', async () => { + const cache = new WTinyLFUCache(100); + cache.SLRU.protectedLRU.capacity = 1; + cache.SLRU.protectedLRU.put('one', 1); + await cache.SLRU.putAndDemote('two', 2); + Rhum.asserts.assertEquals(cache.SLRU.protectedLRU.get('one'), null); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('one'), 1); + Rhum.asserts.assertEquals(cache.SLRU.protectedLRU.get('two'), 2); + }) + Rhum.testCase('should move highest frequency item into full probationary cache', async () => { + const cache = new WTinyLFUCache(100); + cache.SLRU.probationaryLRU.capacity = 1; + await cache.putAndPromote('one', 1); + await cache.putAndPromote('two', 2); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('one'), 1); + cache.sketch['one'] = 3; + cache.sketch['two'] = 2; + await cache.putAndPromote('three', 3); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('one'), 1); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('two'), null); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('three'), null); + Rhum.asserts.assertEquals(cache.WLRU.get('one'), null); + Rhum.asserts.assertEquals(cache.WLRU.get('two'), null); + Rhum.asserts.assertEquals(cache.WLRU.get('three'), 3); + }) + Rhum.testCase('should evict least recently used item from WLRU', async () => { + const cache = new WTinyLFUCache(200); + await cache.WLRU.put('one', 1); + await cache.WLRU.put('two', 2); + await cache.WLRU.put('three', 3); + Rhum.asserts.assertEquals(cache.WLRU.get('one'), null); + Rhum.asserts.assertEquals(cache.WLRU.get('two'), 2); + Rhum.asserts.assertEquals(cache.WLRU.get('three'), 3); + }) + Rhum.testCase('should evict least recently used item from ProbationaryLRU', async () => { + const cache = new WTinyLFUCache(100); + cache.SLRU.probationaryLRU.capacity = 2; + await cache.SLRU.probationaryLRU.put('one', 1); + await cache.SLRU.probationaryLRU.put('two', 2); + await cache.SLRU.probationaryLRU.put('three', 3); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('one'), null); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('two'), 2); + Rhum.asserts.assertEquals(cache.SLRU.probationaryLRU.get('three'), 3); + }) + Rhum.testCase('should evict least recently used item from ProtectedLRU', async () => { + const cache = new WTinyLFUCache(100); + cache.SLRU.protectedLRU.capacity = 2; + await cache.SLRU.protectedLRU.put('one', 1); + await cache.SLRU.protectedLRU.put('two', 2); + await cache.SLRU.protectedLRU.put('three', 3); + Rhum.asserts.assertEquals(cache.SLRU.protectedLRU.get('one'), null); + Rhum.asserts.assertEquals(cache.SLRU.protectedLRU.get('two'), 2); + Rhum.asserts.assertEquals(cache.SLRU.protectedLRU.get('three'), 3); + }) + }) +}); + +Rhum.run(); \ No newline at end of file diff --git a/test_files/test_variables/wTinyLFU_variables.js b/test_files/test_variables/wTinyLFU_variables.js new file mode 100644 index 0000000..1ba8236 --- /dev/null +++ b/test_files/test_variables/wTinyLFU_variables.js @@ -0,0 +1,196 @@ +// import { FrequencySketch } from '../../src/Browser/FrequencySketch.js' + +/***** +* Overall w-TinyLFU Cache +*****/ +export default function WTinyLFUCache (capacity) { + this.capacity = capacity; + this.sketch = {}; + + // initialize window cache with access to frequency sketch + this.WLRU = new LRUCache(capacity * .01); + this.WLRU.sketch = this.sketch; + // initialize segmented main cache with access to frequency sketch + this.SLRU = new SLRUCache(capacity * .99); + this.SLRU.probationaryLRU.sketch = this.sketch; + this.SLRU.protectedLRU.sketch = this.sketch; +} + +WTinyLFUCache.prototype.putAndPromote = async function (key, value) { + const WLRUCandidate = this.WLRU.put(key, value); + // if adding to the WLRU cache results in an eviction... + if (WLRUCandidate) { + // if the probationary cache is at capacity... + let winner = WLRUCandidate; + if (this.SLRU.probationaryLRU.nodeHash.size >= Math.floor(this.SLRU.probationaryLRU.capacity)) { + // send the last accessed item in the probationary cache to the TinyLFU + const SLRUCandidate = this.SLRU.probationaryLRU.getCandidate(); + // determine which item will improve the hit-ratio most + winner = await this.TinyLFU(WLRUCandidate, SLRUCandidate); + } + // add the winner to the probationary SLRU + this.SLRU.probationaryLRU.put(winner.key, winner.value); + } +} + +WTinyLFUCache.prototype.TinyLFU = function (WLRUCandidate, SLRUCandidate) { + // get the frequency values of both items + const WLRUFreq = this.sketch[WLRUCandidate.key]; + const SLRUFreq = this.sketch[SLRUCandidate.key]; + // return the object with the higher frequency, prioritizing items in the window cache, + return WLRUFreq >= SLRUFreq ? WLRUCandidate : SLRUCandidate; +} + +/***** +* Main SLRU Cache +*****/ +function SLRUCache(capacity) { + // Probationary LRU Cache using existing LRU structure in lruBrowserCache.js + this.probationaryLRU = new LRUCache(capacity * .20); + // Protected LRU Cache + this.protectedLRU = new LRUCache(capacity * .80); +} + +// Get item from cache, updates last access, +// and promotes existing items to protected +SLRUCache.prototype.get = function (key) { + // get the item from the protectedLRU + const protectedItem = this.protectedLRU.get(key); + // check to see if the item is in the probationaryLRU + const probationaryItem = this.probationaryLRU.peek(key); + + // If the item is in neither segment, return undefined + if (protectedItem === null && probationaryItem === null) return; + + // If the item only exists in the protected segment, return that item + if (protectedItem !== null) return protectedItem; + + // If the item only exists in the probationary segment, promote to protected and return item + // if adding an item to the protectedLRU results in ejection, demote ejected node + this.probationaryLRU.delete(key); + this.putAndDemote(key, probationaryItem); + return probationaryItem; +} + +// add or update item in cache +SLRUCache.prototype.put = function (key, node) { + // if the item is in the protected segment, update it + if (this.protectedLRU.nodeHash.get(key)) this.putAndDemote(key, node); + else if (this.probationaryLRU.nodeHash(key)) { + // if the item is in the probationary segment, + // promote and update it + this.probationaryLRU.delete(key); + this.putAndDemote(key, node); + } + // if in neither, add item to the probationary segment + else this.probationaryLRU.put(key, node) +} + +// Check to see if the item exists in the cache without updating access +SLRUCache.prototype.has = function (key) { + return this.protectedLRU.nodeHash.get(key) || this.probationaryLRU.nodeHash.get(key); +} + +// Adds a node to the protectedLRU +SLRUCache.prototype.putAndDemote = function (key, value) { + // if adding an item to the protectedLRU results in ejection, demote ejected node + const demoted = this.protectedLRU.put(key, value); + if (demoted) this.probationaryLRU.put(demoted.key, demoted.value); +} + +class Node { + constructor (key, value) { + this.key = key; + this.value = value; + this.next = this.prev = null; + } +} + +function LRUCache(capacity) { + this.capacity = capacity; + this.currentSize = 0; + // node hash for cache lookup and storage + this.nodeHash = new Map(); + + // doubly-linked list to keep track of recency and handle eviction + this.head = new Node('head', null); + this.tail = new Node('tail', null); + this.head.next = this.tail; + this.tail.prev = this.head; +} + +LRUCache.prototype.removeNode = function (node) { + const prev = node.prev; + const next = node.next; + prev.next = next; + next.prev = prev; +}; + + +LRUCache.prototype.addNode = function (node) { + const tempTail = this.tail.prev; + tempTail.next = node; + + this.tail.prev = node; + node.next = this.tail; + node.prev = tempTail; +} + +// Like get, but doesn't update anything +LRUCache.prototype.peek = function(key) { + const node = this.nodeHash.get(key); + if (!node) return null; + return node.value; +} + +// Like removeNode, but takes key and deletes from hash +LRUCache.prototype.delete = function (key) { + const node = this.nodeHash.get(key); + const prev = node.prev; + const next = node.next; + prev.next = next; + next.prev = prev; + this.nodeHash.delete(key); +} + +LRUCache.prototype.get = function(key) { + const node = this.nodeHash.get(key); + + // check if node does not exist in nodeHash obj + if (!node) return null; + // update position to most recent in list + this.removeNode(node); + this.addNode(node); + return node.value; +} + +// used by wTinyLFU to get SLRU eviction candidates for TinyLFU decision +LRUCache.prototype.getCandidate = function () { + const tempHead = this.head.next; + this.removeNode(tempHead); + this.nodeHash.delete(tempHead.key); + return {key: tempHead.key, value: tempHead.value}; +} + +LRUCache.prototype.put = function (key, value) { + // create a new node + const newNode = new Node(key, value); + + // remove node from old position + const node = this.nodeHash.get(key); + if (node) this.removeNode(node); + + // add new node to tail + this.addNode(newNode); + this.nodeHash.set(key, newNode); + + // check capacity - if over capacity, remove and reassign head node + if (this.nodeHash.size > this.capacity){ + const tempHead = this.head.next; + this.removeNode(tempHead); + this.nodeHash.delete(tempHead.key); + // return tempHead for use in w-TinyLFU's SLRU cache + return {key: tempHead.key, value: tempHead.value}; + } +} +