diff options
| -rw-r--r-- | background_scripts/completion.coffee | 24 | ||||
| -rw-r--r-- | lib/utils.coffee | 37 |
2 files changed, 39 insertions, 22 deletions
diff --git a/background_scripts/completion.coffee b/background_scripts/completion.coffee index bae73b8d..c83066a6 100644 --- a/background_scripts/completion.coffee +++ b/background_scripts/completion.coffee @@ -413,7 +413,6 @@ class TabCompleter class SearchEngineCompleter @debug: false - searchEngines: null previousSuggestions: null cancel: -> @@ -422,7 +421,7 @@ class SearchEngineCompleter # This looks up the custom search engine and, if one is found, notes it and removes its keyword from the # query terms. preprocessRequest: (request) -> - @searchEngines.use (engines) => + SearchEngines.use (engines) => { queryTerms, query } = request extend request, searchEngines: engines, keywords: key for own key of engines keyword = queryTerms[0] @@ -436,26 +435,7 @@ class SearchEngineCompleter refresh: (port) -> @previousSuggestions = {} - # Parse the search-engine configuration. - @searchEngines = new AsyncDataFetcher (callback) -> - engines = {} - for line in Settings.get("searchEngines").split "\n" - line = line.trim() - continue if /^[#"]/.test line - tokens = line.split /\s+/ - continue unless 2 <= tokens.length - keyword = tokens[0].split(":")[0] - url = tokens[1] - description = tokens[2..].join(" ") || "search (#{keyword})" - continue unless Utils.hasFullUrlPrefix url - engines[keyword] = - keyword: keyword - searchUrl: url - description: description - searchUrlPrefix: url.split("%s")[0] - - callback engines - + SearchEngines.refreshAndUse Settings.get("searchEngines"), (engines) -> # Let the front-end vomnibar know the search-engine keywords. It needs to know them so that, when the # query goes from "w" to "w ", the vomnibar can synchronously launch the next filter() request (which # avoids an ugly delay/flicker). diff --git a/lib/utils.coffee b/lib/utils.coffee index 65e26b7a..cb7b4d5c 100644 --- a/lib/utils.coffee +++ b/lib/utils.coffee @@ -228,6 +228,42 @@ Utils = # Like Nodejs's nextTick. nextTick: (func) -> @setTimeout 0, func +# Utility for parsing and using the custom search-engine configuration. We re-use the previous parse if the +# search-engine configuration is unchanged. +SearchEngines = + previousSearchEngines: null + searchEngines: null + + refresh: (searchEngines) -> + unless @previousSearchEngines? and searchEngines == @previousSearchEngines + @previousSearchEngines = searchEngines + @searchEngines = new AsyncDataFetcher (callback) -> + engines = {} + for line in searchEngines.split "\n" + line = line.trim() + continue if /^[#"]/.test line + tokens = line.split /\s+/ + continue unless 2 <= tokens.length + keyword = tokens[0].split(":")[0] + url = tokens[1] + description = tokens[2..].join(" ") || "search (#{keyword})" + continue unless Utils.hasFullUrlPrefix url + engines[keyword] = + keyword: keyword + searchUrl: url + description: description + searchUrlPrefix: url.split("%s")[0] + + callback engines + + # Use the parsed search-engine configuration, possibly asynchronously. + use: (callback) -> + @searchEngines.use callback + + # Both set (refresh) the search-engine configuration and use it at the same time. + refreshAndUse: (searchEngines, callback) -> + @refresh searchEngines + @use callback # This creates a new function out of an existing function, where the new function takes fewer arguments. This # allows us to pass around functions instead of functions + a partial list of arguments. @@ -325,6 +361,7 @@ class JobRunner root = exports ? window root.Utils = Utils +root.SearchEngines = SearchEngines root.SimpleCache = SimpleCache root.AsyncDataFetcher = AsyncDataFetcher root.JobRunner = JobRunner |
