aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Blott2015-05-04 06:35:22 +0100
committerStephen Blott2015-05-04 09:49:27 +0100
commit136b132c81ae4f7a3b296109427fd757ca05dacf (patch)
tree6e0a60512017799e78c22627ed3763b60ef6bd9c
parent776f617ece5d333fe70df903982a18d65fc2776a (diff)
downloadvimium-136b132c81ae4f7a3b296109427fd757ca05dacf.tar.bz2
Search completion; tweak scoring and synchronization.
-rw-r--r--background_scripts/completion.coffee121
-rw-r--r--background_scripts/search_engines.coffee85
-rw-r--r--tests/unit_tests/completion_test.coffee4
-rw-r--r--tests/unit_tests/settings_test.coffee2
4 files changed, 111 insertions, 101 deletions
diff --git a/background_scripts/completion.coffee b/background_scripts/completion.coffee
index fee4778a..37e9ea6b 100644
--- a/background_scripts/completion.coffee
+++ b/background_scripts/completion.coffee
@@ -335,42 +335,67 @@ class SearchEngineCompleter
else
(string) -> Utils.createSearchUrl string.split /\s+/
- type = if description? then description else "search"
+ haveDescription = description? and 0 < description.trim().length
+ type = if haveDescription then description else "search"
searchUrl = if custom then url else Settings.get "searchUrl"
- query = queryTerms[(if custom then 1 else 0)..].join " "
# For custom search engines, we add an auto-selected suggestion.
if custom
- title = if description? then query else queryTerms[0] + ": " + query
+ query = queryTerms[1..].join " "
+ title = if haveDescription then query else keyword + ": " + query
suggestions.push @mkSuggestion false, queryTerms, type, mkUrl(query), title, @computeRelevancy, 1
suggestions[0].autoSelect = true
queryTerms = queryTerms[1..]
- onComplete suggestions, (onComplete) =>
+ if queryTerms.length == 0
+ return onComplete suggestions
+
+ onComplete suggestions, (existingSuggestions, onComplete) =>
suggestions = []
# For custom search-engine queries, this adds suggestions only if we have a completer. For other queries,
# this adds suggestions for the default search engine (if we have a completer for that).
- SearchEngines.complete searchUrl, queryTerms, (searchSuggestions = []) =>
-
- # Scoring:
- # - The score does not depend upon the actual suggestion (so, it does not depend upon word relevancy).
- # We assume that the completion engine has already factored that in.
- # - The score is higher if the query is longer. The idea is that search suggestions are more likely
- # to be relevant if, after typing quite some number of characters, the user hasn't yet found a
- # useful suggestion from another completer.
- # - Scores are weighted such that they retain the ordering provided by the completion engine.
- characterCount = query.length - queryTerms.length + 1
- score = 0.8 * (Math.min(characterCount, 12.0)/12.0)
-
- for suggestion in searchSuggestions
- suggestions.push @mkSuggestion true, queryTerms, type, mkUrl(suggestion), suggestion, @computeRelevancy, score
- score *= 0.9
- if custom
- for suggestion in suggestions
- suggestion.reinsertPrefix = "#{keyword} " if suggestion.insertText
-
- onComplete suggestions
+ # Scoring:
+ # - The score does not depend upon the actual suggestion (so, it does not depend upon word
+ # relevancy). We assume that the completion engine has already factored that in. Also, completion
+ # engines often handle spelling mistakes, in which case we wouldn't find the query terms in the
+ # suggestion anyway.
+ # - The score is based on the length of the last query term. The idea is that the user is already
+ # happy with the earlier terms.
+ # - The score is higher if the last query term is longer. The idea is that search suggestions are more
+ # likely to be relevant if, after typing some number of characters, the user hasn't yet found
+ # a useful suggestion from another completer.
+ # - Scores are weighted such that they retain the order provided by the completion engine.
+ characterCount = queryTerms[queryTerms.length - 1].length
+ score = 0.6 * (Math.min(characterCount, 10.0)/10.0)
+
+ if 0 < existingSuggestions.length
+ existingSuggestionMinScore = existingSuggestions[existingSuggestions.length-1].relevancy
+ if score < existingSuggestionMinScore and MultiCompleter.maxResults <= existingSuggestions.length
+ # No suggestion we propose will have a high enough score to beat the existing suggestions, so bail
+ # immediately.
+ return onComplete []
+
+ # We pause in case the user is still typing.
+ Utils.setTimeout 250, handler = @mostRecentHandler = =>
+ return onComplete [] if handler != @mostRecentHandler # Bail if another completion has begun.
+
+ SearchEngines.complete searchUrl, queryTerms, (searchSuggestions = []) =>
+ for suggestion in searchSuggestions
+ suggestions.push @mkSuggestion true, queryTerms, type, mkUrl(suggestion), suggestion, @computeRelevancy, score
+ score *= 0.9
+
+ if custom
+ # For custom search engines, we need to tell the front end to insert the search engine's keyword
+ # when copying a suggestion into the vomnibar.
+ suggestion.reinsertPrefix = "#{keyword} " for suggestion in suggestions
+
+ # We keep at least three suggestions (if possible) and at most six. We keep more than three only if
+ # there are enough slots. The idea is that these suggestions shouldn't wholly displace suggestions
+ # from other completers. That would potentially be a problem because there is no relationship
+ # between the relevancy scores produced here and those produced by other completers.
+ count = Math.min 6, Math.max 3, MultiCompleter.maxResults - existingSuggestions.length
+ onComplete suggestions[...count]
mkSuggestion: (insertText, args...) ->
suggestion = new Suggestion args...
@@ -412,8 +437,10 @@ class SearchEngineCompleter
# A completer which calls filter() on many completers, aggregates the results, ranks them, and returns the top
# 10. Queries from the vomnibar frontend script come through a multi completer.
class MultiCompleter
+ @maxResults: 10
+
constructor: (@completers) ->
- @maxResults = 10
+ @maxResults = MultiCompleter.maxResults
refresh: ->
completer.refresh?() for completer in @completers
@@ -429,36 +456,30 @@ class MultiCompleter
suggestions = []
completersFinished = 0
continuation = null
+ # Call filter() on every source completer and wait for them all to finish before returning results.
+ # At most one of the completers (SearchEngineCompleter) may pass a continuation function, which will be
+ # called after the results of all of the other completers have been posted. Any additional results
+ # from this continuation will be added to the existing results and posted later. We don't call the
+ # continuation if another query is already waiting.
for completer in @completers
- # Call filter() on every source completer and wait for them all to finish before returning results.
- # At most one of the completers (SearchEngineCompleter) may pass a continuation function, which will be
- # called asynchronously after the results of all of the other completers have been posted. Any
- # additional results from this continuation will be added to the existing results and posted. We don't
- # call the continuation if another query is already waiting.
- completer.filter queryTerms, (newSuggestions, cont = null) =>
- # Allow completers to execute concurrently.
+ do (completer) =>
Utils.nextTick =>
- suggestions = suggestions.concat newSuggestions
- continuation = cont if cont?
- completersFinished += 1
- if completersFinished >= @completers.length
- onComplete @prepareSuggestions(suggestions), keepAlive: continuation?
- onDone = =>
+ completer.filter queryTerms, (newSuggestions, cont = null) =>
+ suggestions = suggestions.concat newSuggestions
+ continuation = cont if cont?
+ if @completers.length <= ++completersFinished
+ shouldRunContinuation = continuation? and not @mostRecentQuery
+ onComplete @prepareSuggestions(queryTerms, suggestions), keepAlive: shouldRunContinuation
+ # Allow subsequent queries to begin.
@filterInProgress = false
- @filter @mostRecentQuery.queryTerms, @mostRecentQuery.onComplete if @mostRecentQuery
- # We add a very short delay. It is possible for all of this processing to have been handled
- # pretty-much synchronously, which would have prevented any newly-arriving queries from
- # registering.
- Utils.setTimeout 10, =>
- if continuation? and not @mostRecentQuery
- continuation (newSuggestions) =>
- onComplete @prepareSuggestions suggestions.concat(newSuggestions)
- onDone()
+ if shouldRunContinuation
+ continuation suggestions, (newSuggestions) =>
+ onComplete @prepareSuggestions queryTerms, suggestions.concat(newSuggestions)
else
- onDone()
+ @filter @mostRecentQuery.queryTerms, @mostRecentQuery.onComplete if @mostRecentQuery
- prepareSuggestions: (suggestions) ->
- suggestion.computeRelevancy @queryTerms for suggestion in suggestions
+ prepareSuggestions: (queryTerms, suggestions) ->
+ suggestion.computeRelevancy queryTerms for suggestion in suggestions
suggestions.sort (a, b) -> b.relevancy - a.relevancy
suggestions = suggestions[0...@maxResults]
suggestion.generateHtml() for suggestion in suggestions
diff --git a/background_scripts/search_engines.coffee b/background_scripts/search_engines.coffee
index abf8c86e..3ddbe742 100644
--- a/background_scripts/search_engines.coffee
+++ b/background_scripts/search_engines.coffee
@@ -114,38 +114,16 @@ completionEngines = [
]
SearchEngines =
- cancel: (searchUrl, callback = null) ->
- @requests[searchUrl]?.abort()
- delete @requests[searchUrl]
- callback? null
-
- # Perform an HTTP GET.
get: (searchUrl, url, callback) ->
- @requests ?= {} # Maps a searchUrl to any outstanding HTTP request for that search engine.
- @cancel searchUrl
-
- # We cache the results of the most-recent 100 successfully XMLHttpRequests with a ten-second (ie. very
- # short) expiry.
- @requestCache ?= new SimpleCache 10 * 1000, 100
-
- if @requestCache.has url
- callback @requestCache.get url
- return
-
- @requests[searchUrl] = xhr = new XMLHttpRequest()
+ xhr = new XMLHttpRequest()
xhr.open "GET", url, true
- xhr.timeout = 750
- xhr.ontimeout = => @cancel searchUrl, callback
- xhr.onerror = => @cancel searchUrl, callback
+ xhr.timeout = 1000
+ xhr.ontimeout = xhr.onerror = -> callback null
xhr.send()
- xhr.onreadystatechange = =>
+ xhr.onreadystatechange = ->
if xhr.readyState == 4
- @requests[searchUrl] = null
- if xhr.status == 200
- callback @requestCache.set url, xhr
- else
- callback null
+ callback(if xhr.status == 200 then xhr else null)
# Look up the search-completion engine for this searchUrl. Because of DummySearchEngine, above, we know
# there will always be a match. Imagining that there may be many completion engines, and knowing that this
@@ -176,7 +154,7 @@ SearchEngines =
return callback [] if Utils.hasJavascriptPrefix queryTerms[0]
# Cache completions. However, completions depend upon both the searchUrl and the query terms. So we need
- # to generate a key. We mix in some nonsense generated by pwgen. A key clash is possible, but vanishingly
+ # to generate a key. We mix in some junk generated by pwgen. A key clash is possible, but vanishingly
# unlikely.
junk = "//Zi?ei5;o//"
completionCacheKey = searchUrl + junk + queryTerms.join junk
@@ -184,26 +162,37 @@ SearchEngines =
if @completionCache.has completionCacheKey
return callback @completionCache.get completionCacheKey
- engine = @lookupEngine searchUrl
- url = engine.getUrl queryTerms
- query = queryTerms.join(" ").toLowerCase()
- @get searchUrl, url, (xhr = null) =>
- # Parsing the response may fail if we receive an unexpected or an unexpectedly-formatted response. In
- # all cases, we fall back to the catch clause, below.
- try
- suggestions = engine.parse xhr
- # Make sure we really do have an iterable of strings.
- suggestions = (suggestion for suggestion in suggestions when "string" == typeof suggestion)
- # Filter out the query itself. It's not adding anything.
- suggestions = (suggestion for suggestion in suggestions when suggestion.toLowerCase() != query)
- # We keep at most three suggestions, the top three.
- callback @completionCache.set completionCacheKey, suggestions[...3]
- catch
- callback @completionCache.set completionCacheKey, callback []
- # We cache failures, but remove them after just ten minutes. This (it is hoped) avoids repeated
- # XMLHttpRequest failures over a short period of time.
- removeCompletionCacheKey = => @completionCache.set completionCacheKey, null
- setTimeout removeCompletionCacheKey, 10 * 60 * 1000 # Ten minutes.
+ fetchSuggestions = (callback) =>
+ engine = @lookupEngine searchUrl
+ url = engine.getUrl queryTerms
+ query = queryTerms.join(" ").toLowerCase()
+ @get searchUrl, url, (xhr = null) =>
+ # Parsing the response may fail if we receive an unexpected or an unexpectedly-formatted response. In
+ # all cases, we fall back to the catch clause, below.
+ try
+ suggestions = engine.parse xhr
+ # Make sure we really do have an iterable of strings.
+ suggestions = (suggestion for suggestion in suggestions when "string" == typeof suggestion)
+ # Filter out the query itself. It's not adding anything.
+ suggestions = (suggestion for suggestion in suggestions when suggestion.toLowerCase() != query)
+ catch
+ suggestions = []
+ # We cache failures, but remove them after just ten minutes. This (it is hoped) avoids repeated
+ # XMLHttpRequest failures over a short period of time.
+ removeCompletionCacheKey = => @completionCache.set completionCacheKey, null
+ setTimeout removeCompletionCacheKey, 10 * 60 * 1000 # Ten minutes.
+
+ callback suggestions
+
+ # Don't allow duplicate identical active requests. This can happen, for example, when the user enters or
+ # removes a space, or when they enter a character and immediately delete it.
+ @inTransit ?= {}
+ unless @inTransit[completionCacheKey]?.push callback
+ queue = @inTransit[completionCacheKey] = []
+ fetchSuggestions (suggestions) =>
+ callback @completionCache.set completionCacheKey, suggestions
+ delete @inTransit[completionCacheKey]
+ callback suggestions for callback in queue
root = exports ? window
root.SearchEngines = SearchEngines
diff --git a/tests/unit_tests/completion_test.coffee b/tests/unit_tests/completion_test.coffee
index 39437b52..56fcc456 100644
--- a/tests/unit_tests/completion_test.coffee
+++ b/tests/unit_tests/completion_test.coffee
@@ -239,10 +239,10 @@ context "search engines",
setup ->
searchEngines = "foo: bar?q=%s\n# comment\nbaz: qux?q=%s baz description"
Settings.set 'searchEngines', searchEngines
- @completer = new CustomSearchEngineCompleter()
+ @completer = new SearchEngineCompleter()
# note, I couldn't just call @completer.refresh() here as I couldn't set root.Settings without errors
# workaround is below, would be good for someone that understands the testing system better than me to improve
- @completer.searchEngines = CustomSearchEngineCompleter.getSearchEngines()
+ @completer.searchEngines = SearchEngineCompleter.getSearchEngines()
should "return search engine suggestion without description", ->
results = filterCompleter(@completer, ["foo", "hello"])
diff --git a/tests/unit_tests/settings_test.coffee b/tests/unit_tests/settings_test.coffee
index a67b69fa..346c98da 100644
--- a/tests/unit_tests/settings_test.coffee
+++ b/tests/unit_tests/settings_test.coffee
@@ -73,7 +73,7 @@ context "settings",
should "set search engines, retrieve them correctly and check that they have been parsed correctly", ->
searchEngines = "foo: bar?q=%s\n# comment\nbaz: qux?q=%s baz description"
Settings.set 'searchEngines', searchEngines
- result = CustomSearchEngineCompleter.getSearchEngines()
+ result = SearchEngineCompleter.getSearchEngines()
assert.equal Object.keys(result).length, 2
assert.equal "bar?q=%s", result["foo"].url
assert.isFalse result["foo"].description