diff options
Diffstat (limited to 'background_scripts/completion_search.coffee')
| -rw-r--r-- | background_scripts/completion_search.coffee | 97 |
1 files changed, 45 insertions, 52 deletions
diff --git a/background_scripts/completion_search.coffee b/background_scripts/completion_search.coffee index eb27c076..46533833 100644 --- a/background_scripts/completion_search.coffee +++ b/background_scripts/completion_search.coffee @@ -1,9 +1,12 @@ CompletionSearch = debug: true + inTransit: {} + completionCache: new SimpleCache 2 * 60 * 60 * 1000, 5000 # Two hour, 5000 entries. + engineCache:new SimpleCache 1000 * 60 * 60 * 1000 # 1000 hours. - # The amount of time to wait for new requests before launching the HTTP request. The intention is to cut - # down on the number of HTTP requests we issue. + # The amount of time to wait for new requests before launching the current request (for example, if the user + # is still typing). delay: 100 get: (searchUrl, url, callback) -> @@ -15,13 +18,11 @@ CompletionSearch = xhr.onreadystatechange = -> if xhr.readyState == 4 - callback(if xhr.status == 200 then xhr else null) + callback if xhr.status == 200 then xhr else null - # Look up the completion engine for this searchUrl. Because of DummyCompletionEngine, above, we know there - # will always be a match. Imagining that there may be many completion engines, and knowing that this is - # called for every query, we cache the result. + # Look up the completion engine for this searchUrl. Because of DummyCompletionEngine, we know there will + # always be a match. lookupEngine: (searchUrl) -> - @engineCache ?= new SimpleCache 30 * 60 * 60 * 1000 # 30 hours (these are small, we can keep them longer). if @engineCache.has searchUrl @engineCache.get searchUrl else @@ -29,7 +30,7 @@ CompletionSearch = engine = new engine() return @engineCache.set searchUrl, engine if engine.match searchUrl - # True if we have a completion engine for this search URL, undefined otherwise. + # True if we have a completion engine for this search URL, false otherwise. haveCompletionEngine: (searchUrl) -> not @lookupEngine(searchUrl).dummy @@ -39,17 +40,19 @@ CompletionSearch = # - queryTerms are the query terms. # - callback will be applied to a list of suggestion strings (which may be an empty list, if anything goes # wrong). + # + # If no callback is provided, then we're to provide suggestions only if we can do so synchronously (ie. + # from a cache). In this case we just return the results. Returns null if we cannot service the request + # synchronously. + # complete: (searchUrl, queryTerms, callback = null) -> - query = queryTerms.join "" + query = queryTerms.join(" ").toLowerCase() - # If no callback is provided, then we're to provide suggestions only if we can do so synchronously (ie. - # from a cache). In this case we just return the results. Return null if we cannot service the request - # synchronously. returnResultsOnlyFromCache = not callback? callback ?= (suggestions) -> suggestions - # We don't complete single characters: the results are usually useless. - return callback [] unless 1 < query.length + # We don't complete queries which are too short: the results are usually useless. + return callback [] unless 3 < query.length # We don't complete regular URLs or Javascript URLs. return callback [] if 1 == queryTerms.length and Utils.isUrl query @@ -57,26 +60,18 @@ CompletionSearch = # Cache completions. However, completions depend upon both the searchUrl and the query terms. So we need # to generate a key. We mix in some junk generated by pwgen. A key clash might be possible, but - # vanishingly unlikely. + # is vanishingly unlikely. junk = "//Zi?ei5;o//" completionCacheKey = searchUrl + junk + queryTerms.map((s) -> s.toLowerCase()).join junk - @completionCache ?= new SimpleCache 60 * 60 * 1000, 2000 # One hour, 2000 entries. + if @completionCache.has completionCacheKey - if returnResultsOnlyFromCache - return callback @completionCache.get completionCacheKey - else - # We add a short delay, even for a cache hit. This avoids an ugly flicker when the additional - # suggestions are posted. - Utils.setTimeout 50, => - console.log "hit", completionCacheKey if @debug - callback @completionCache.get completionCacheKey - return + console.log "hit", completionCacheKey if @debug + return callback @completionCache.get completionCacheKey # If the user appears to be typing a continuation of the characters of the most recent query, then we can # re-use the previous suggestions. if @mostRecentQuery? and @mostRecentSuggestions? - reusePreviousSuggestions = do (query) => - query = queryTerms.join(" ").toLowerCase() + reusePreviousSuggestions = do => # Verify that the previous query is a prefix of the current query. return false unless 0 == query.indexOf @mostRecentQuery.toLowerCase() # Ensure that every previous suggestion contains the text of the new query. @@ -93,44 +88,42 @@ CompletionSearch = # That's all of the caches we can try. Bail if the caller is looking for synchronous results. return callback null if returnResultsOnlyFromCache - fetchSuggestions = (engine, callback) => - url = engine.getUrl queryTerms - query = queryTerms.join(" ").toLowerCase() - @get searchUrl, url, (xhr = null) => - # Parsing the response may fail if we receive an unexpected or an unexpectedly-formatted response. In - # all cases, we fall back to the catch clause, below. Therefore, we "fail safe" in the case of - # incorrect or out-of-date completion engines. - try - suggestions = engine.parse xhr - # Filter out the query itself. It's not adding anything. - suggestions = (suggestion for suggestion in suggestions when suggestion.toLowerCase() != query) - console.log "GET", url if @debug - catch - suggestions = [] - # We cache failures too, but remove them after just thirty minutes. - Utils.setTimeout 30 * 60 * 1000, => @completionCache.set completionCacheKey, null - console.log "fail", url if @debug - - callback suggestions - # We pause in case the user is still typing. Utils.setTimeout @delay, handler = @mostRecentHandler = => if handler == @mostRecentHandler @mostRecentHandler = null - # Share duplicate requests. First fetch the suggestions... - @inTransit ?= {} + # Elide duplicate requests. First fetch the suggestions... @inTransit[completionCacheKey] ?= new AsyncDataFetcher (callback) => - fetchSuggestions @lookupEngine(searchUrl), callback + engine = @lookupEngine searchUrl + url = engine.getUrl queryTerms + + @get searchUrl, url, (xhr = null) => + # Parsing the response may fail if we receive an unexpected or an unexpectedly-formatted response. + # In all cases, we fall back to the catch clause, below. Therefore, we "fail safe" in the case of + # incorrect or out-of-date completion engines. + try + suggestions = engine.parse xhr + # Filter out the query itself. It's not adding anything. + suggestions = (suggestion for suggestion in suggestions when suggestion.toLowerCase() != query) + console.log "GET", url if @debug + catch + suggestions = [] + # We cache failures too, but remove them after just thirty minutes. + Utils.setTimeout 30 * 60 * 1000, => @completionCache.set completionCacheKey, null + console.log "fail", url if @debug + + callback suggestions # ... then use the suggestions. @inTransit[completionCacheKey].use (suggestions) => - @mostRecentQuery = queryTerms.join " " + @mostRecentQuery = query @mostRecentSuggestions = suggestions callback @completionCache.set completionCacheKey, suggestions delete @inTransit[completionCacheKey] - # Cancel any pending (ie. blocked on @delay) queries. Does not cancel in-flight queries. + # Cancel any pending (ie. blocked on @delay) queries. Does not cancel in-flight queries. This is called + # whenever the user is typing. cancel: -> if @mostRecentHandler? @mostRecentHandler = null |
