You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2924 lines
81 KiB

  1. /**
  2. * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.1.2
  3. * Copyright (C) 2017 Oliver Nightingale
  4. * @license MIT
  5. */
  6. ;(function(){
  7. /**
  8. * A convenience function for configuring and constructing
  9. * a new lunr Index.
  10. *
  11. * A lunr.Builder instance is created and the pipeline setup
  12. * with a trimmer, stop word filter and stemmer.
  13. *
  14. * This builder object is yielded to the configuration function
  15. * that is passed as a parameter, allowing the list of fields
  16. * and other builder parameters to be customised.
  17. *
  18. * All documents _must_ be added within the passed config function.
  19. *
  20. * @example
  21. * var idx = lunr(function () {
  22. * this.field('title')
  23. * this.field('body')
  24. * this.ref('id')
  25. *
  26. * documents.forEach(function (doc) {
  27. * this.add(doc)
  28. * }, this)
  29. * })
  30. *
  31. * @see {@link lunr.Builder}
  32. * @see {@link lunr.Pipeline}
  33. * @see {@link lunr.trimmer}
  34. * @see {@link lunr.stopWordFilter}
  35. * @see {@link lunr.stemmer}
  36. * @namespace {function} lunr
  37. */
  38. var lunr = function (config) {
  39. var builder = new lunr.Builder
  40. builder.pipeline.add(
  41. lunr.trimmer,
  42. lunr.stopWordFilter,
  43. lunr.stemmer
  44. )
  45. builder.searchPipeline.add(
  46. lunr.stemmer
  47. )
  48. config.call(builder, builder)
  49. return builder.build()
  50. }
  51. lunr.version = "2.1.2"
  52. /*!
  53. * lunr.utils
  54. * Copyright (C) 2017 Oliver Nightingale
  55. */
  56. /**
  57. * A namespace containing utils for the rest of the lunr library
  58. */
  59. lunr.utils = {}
  60. /**
  61. * Print a warning message to the console.
  62. *
  63. * @param {String} message The message to be printed.
  64. * @memberOf Utils
  65. */
  66. lunr.utils.warn = (function (global) {
  67. /* eslint-disable no-console */
  68. return function (message) {
  69. if (global.console && console.warn) {
  70. console.warn(message)
  71. }
  72. }
  73. /* eslint-enable no-console */
  74. })(this)
  75. /**
  76. * Convert an object to a string.
  77. *
  78. * In the case of `null` and `undefined` the function returns
  79. * the empty string, in all other cases the result of calling
  80. * `toString` on the passed object is returned.
  81. *
  82. * @param {Any} obj The object to convert to a string.
  83. * @return {String} string representation of the passed object.
  84. * @memberOf Utils
  85. */
  86. lunr.utils.asString = function (obj) {
  87. if (obj === void 0 || obj === null) {
  88. return ""
  89. } else {
  90. return obj.toString()
  91. }
  92. }
  93. lunr.FieldRef = function (docRef, fieldName) {
  94. this.docRef = docRef
  95. this.fieldName = fieldName
  96. this._stringValue = fieldName + lunr.FieldRef.joiner + docRef
  97. }
  98. lunr.FieldRef.joiner = "/"
  99. lunr.FieldRef.fromString = function (s) {
  100. var n = s.indexOf(lunr.FieldRef.joiner)
  101. if (n === -1) {
  102. throw "malformed field ref string"
  103. }
  104. var fieldRef = s.slice(0, n),
  105. docRef = s.slice(n + 1)
  106. return new lunr.FieldRef (docRef, fieldRef)
  107. }
  108. lunr.FieldRef.prototype.toString = function () {
  109. return this._stringValue
  110. }
  111. /**
  112. * A function to calculate the inverse document frequency for
  113. * a posting. This is shared between the builder and the index
  114. *
  115. * @private
  116. * @param {object} posting - The posting for a given term
  117. * @param {number} documentCount - The total number of documents.
  118. */
  119. lunr.idf = function (posting, documentCount) {
  120. var documentsWithTerm = 0
  121. for (var fieldName in posting) {
  122. if (fieldName == '_index') continue // Ignore the term index, its not a field
  123. documentsWithTerm += Object.keys(posting[fieldName]).length
  124. }
  125. var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5)
  126. return Math.log(1 + Math.abs(x))
  127. }
  128. /**
  129. * A token wraps a string representation of a token
  130. * as it is passed through the text processing pipeline.
  131. *
  132. * @constructor
  133. * @param {string} [str=''] - The string token being wrapped.
  134. * @param {object} [metadata={}] - Metadata associated with this token.
  135. */
  136. lunr.Token = function (str, metadata) {
  137. this.str = str || ""
  138. this.metadata = metadata || {}
  139. }
  140. /**
  141. * Returns the token string that is being wrapped by this object.
  142. *
  143. * @returns {string}
  144. */
  145. lunr.Token.prototype.toString = function () {
  146. return this.str
  147. }
  148. /**
  149. * A token update function is used when updating or optionally
  150. * when cloning a token.
  151. *
  152. * @callback lunr.Token~updateFunction
  153. * @param {string} str - The string representation of the token.
  154. * @param {Object} metadata - All metadata associated with this token.
  155. */
  156. /**
  157. * Applies the given function to the wrapped string token.
  158. *
  159. * @example
  160. * token.update(function (str, metadata) {
  161. * return str.toUpperCase()
  162. * })
  163. *
  164. * @param {lunr.Token~updateFunction} fn - A function to apply to the token string.
  165. * @returns {lunr.Token}
  166. */
  167. lunr.Token.prototype.update = function (fn) {
  168. this.str = fn(this.str, this.metadata)
  169. return this
  170. }
  171. /**
  172. * Creates a clone of this token. Optionally a function can be
  173. * applied to the cloned token.
  174. *
  175. * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token.
  176. * @returns {lunr.Token}
  177. */
  178. lunr.Token.prototype.clone = function (fn) {
  179. fn = fn || function (s) { return s }
  180. return new lunr.Token (fn(this.str, this.metadata), this.metadata)
  181. }
  182. /*!
  183. * lunr.tokenizer
  184. * Copyright (C) 2017 Oliver Nightingale
  185. */
  186. /**
  187. * A function for splitting a string into tokens ready to be inserted into
  188. * the search index. Uses `lunr.tokenizer.separator` to split strings, change
  189. * the value of this property to change how strings are split into tokens.
  190. *
  191. * This tokenizer will convert its parameter to a string by calling `toString` and
  192. * then will split this string on the character in `lunr.tokenizer.separator`.
  193. * Arrays will have their elements converted to strings and wrapped in a lunr.Token.
  194. *
  195. * @static
  196. * @param {?(string|object|object[])} obj - The object to convert into tokens
  197. * @returns {lunr.Token[]}
  198. */
  199. lunr.tokenizer = function (obj) {
  200. if (obj == null || obj == undefined) {
  201. return []
  202. }
  203. if (Array.isArray(obj)) {
  204. return obj.map(function (t) {
  205. return new lunr.Token(lunr.utils.asString(t).toLowerCase())
  206. })
  207. }
  208. var str = obj.toString().trim().toLowerCase(),
  209. len = str.length,
  210. tokens = []
  211. for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {
  212. var char = str.charAt(sliceEnd),
  213. sliceLength = sliceEnd - sliceStart
  214. if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {
  215. if (sliceLength > 0) {
  216. tokens.push(
  217. new lunr.Token (str.slice(sliceStart, sliceEnd), {
  218. position: [sliceStart, sliceLength],
  219. index: tokens.length
  220. })
  221. )
  222. }
  223. sliceStart = sliceEnd + 1
  224. }
  225. }
  226. return tokens
  227. }
  228. /**
  229. * The separator used to split a string into tokens. Override this property to change the behaviour of
  230. * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.
  231. *
  232. * @static
  233. * @see lunr.tokenizer
  234. */
  235. lunr.tokenizer.separator = /[\s\-]+/
  236. /*!
  237. * lunr.Pipeline
  238. * Copyright (C) 2017 Oliver Nightingale
  239. */
  240. /**
  241. * lunr.Pipelines maintain an ordered list of functions to be applied to all
  242. * tokens in documents entering the search index and queries being ran against
  243. * the index.
  244. *
  245. * An instance of lunr.Index created with the lunr shortcut will contain a
  246. * pipeline with a stop word filter and an English language stemmer. Extra
  247. * functions can be added before or after either of these functions or these
  248. * default functions can be removed.
  249. *
  250. * When run the pipeline will call each function in turn, passing a token, the
  251. * index of that token in the original list of all tokens and finally a list of
  252. * all the original tokens.
  253. *
  254. * The output of functions in the pipeline will be passed to the next function
  255. * in the pipeline. To exclude a token from entering the index the function
  256. * should return undefined, the rest of the pipeline will not be called with
  257. * this token.
  258. *
  259. * For serialisation of pipelines to work, all functions used in an instance of
  260. * a pipeline should be registered with lunr.Pipeline. Registered functions can
  261. * then be loaded. If trying to load a serialised pipeline that uses functions
  262. * that are not registered an error will be thrown.
  263. *
  264. * If not planning on serialising the pipeline then registering pipeline functions
  265. * is not necessary.
  266. *
  267. * @constructor
  268. */
  269. lunr.Pipeline = function () {
  270. this._stack = []
  271. }
  272. lunr.Pipeline.registeredFunctions = Object.create(null)
  273. /**
  274. * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token
  275. * string as well as all known metadata. A pipeline function can mutate the token string
  276. * or mutate (or add) metadata for a given token.
  277. *
  278. * A pipeline function can indicate that the passed token should be discarded by returning
  279. * null. This token will not be passed to any downstream pipeline functions and will not be
  280. * added to the index.
  281. *
  282. * Multiple tokens can be returned by returning an array of tokens. Each token will be passed
  283. * to any downstream pipeline functions and all will returned tokens will be added to the index.
  284. *
  285. * Any number of pipeline functions may be chained together using a lunr.Pipeline.
  286. *
  287. * @interface lunr.PipelineFunction
  288. * @param {lunr.Token} token - A token from the document being processed.
  289. * @param {number} i - The index of this token in the complete list of tokens for this document/field.
  290. * @param {lunr.Token[]} tokens - All tokens for this document/field.
  291. * @returns {(?lunr.Token|lunr.Token[])}
  292. */
  293. /**
  294. * Register a function with the pipeline.
  295. *
  296. * Functions that are used in the pipeline should be registered if the pipeline
  297. * needs to be serialised, or a serialised pipeline needs to be loaded.
  298. *
  299. * Registering a function does not add it to a pipeline, functions must still be
  300. * added to instances of the pipeline for them to be used when running a pipeline.
  301. *
  302. * @param {lunr.PipelineFunction} fn - The function to check for.
  303. * @param {String} label - The label to register this function with
  304. */
  305. lunr.Pipeline.registerFunction = function (fn, label) {
  306. if (label in this.registeredFunctions) {
  307. lunr.utils.warn('Overwriting existing registered function: ' + label)
  308. }
  309. fn.label = label
  310. lunr.Pipeline.registeredFunctions[fn.label] = fn
  311. }
  312. /**
  313. * Warns if the function is not registered as a Pipeline function.
  314. *
  315. * @param {lunr.PipelineFunction} fn - The function to check for.
  316. * @private
  317. */
  318. lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) {
  319. var isRegistered = fn.label && (fn.label in this.registeredFunctions)
  320. if (!isRegistered) {
  321. lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn)
  322. }
  323. }
  324. /**
  325. * Loads a previously serialised pipeline.
  326. *
  327. * All functions to be loaded must already be registered with lunr.Pipeline.
  328. * If any function from the serialised data has not been registered then an
  329. * error will be thrown.
  330. *
  331. * @param {Object} serialised - The serialised pipeline to load.
  332. * @returns {lunr.Pipeline}
  333. */
  334. lunr.Pipeline.load = function (serialised) {
  335. var pipeline = new lunr.Pipeline
  336. serialised.forEach(function (fnName) {
  337. var fn = lunr.Pipeline.registeredFunctions[fnName]
  338. if (fn) {
  339. pipeline.add(fn)
  340. } else {
  341. throw new Error('Cannot load unregistered function: ' + fnName)
  342. }
  343. })
  344. return pipeline
  345. }
  346. /**
  347. * Adds new functions to the end of the pipeline.
  348. *
  349. * Logs a warning if the function has not been registered.
  350. *
  351. * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline.
  352. */
  353. lunr.Pipeline.prototype.add = function () {
  354. var fns = Array.prototype.slice.call(arguments)
  355. fns.forEach(function (fn) {
  356. lunr.Pipeline.warnIfFunctionNotRegistered(fn)
  357. this._stack.push(fn)
  358. }, this)
  359. }
  360. /**
  361. * Adds a single function after a function that already exists in the
  362. * pipeline.
  363. *
  364. * Logs a warning if the function has not been registered.
  365. *
  366. * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
  367. * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
  368. */
  369. lunr.Pipeline.prototype.after = function (existingFn, newFn) {
  370. lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
  371. var pos = this._stack.indexOf(existingFn)
  372. if (pos == -1) {
  373. throw new Error('Cannot find existingFn')
  374. }
  375. pos = pos + 1
  376. this._stack.splice(pos, 0, newFn)
  377. }
  378. /**
  379. * Adds a single function before a function that already exists in the
  380. * pipeline.
  381. *
  382. * Logs a warning if the function has not been registered.
  383. *
  384. * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
  385. * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
  386. */
  387. lunr.Pipeline.prototype.before = function (existingFn, newFn) {
  388. lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
  389. var pos = this._stack.indexOf(existingFn)
  390. if (pos == -1) {
  391. throw new Error('Cannot find existingFn')
  392. }
  393. this._stack.splice(pos, 0, newFn)
  394. }
  395. /**
  396. * Removes a function from the pipeline.
  397. *
  398. * @param {lunr.PipelineFunction} fn The function to remove from the pipeline.
  399. */
  400. lunr.Pipeline.prototype.remove = function (fn) {
  401. var pos = this._stack.indexOf(fn)
  402. if (pos == -1) {
  403. return
  404. }
  405. this._stack.splice(pos, 1)
  406. }
  407. /**
  408. * Runs the current list of functions that make up the pipeline against the
  409. * passed tokens.
  410. *
  411. * @param {Array} tokens The tokens to run through the pipeline.
  412. * @returns {Array}
  413. */
  414. lunr.Pipeline.prototype.run = function (tokens) {
  415. var stackLength = this._stack.length
  416. for (var i = 0; i < stackLength; i++) {
  417. var fn = this._stack[i]
  418. tokens = tokens.reduce(function (memo, token, j) {
  419. var result = fn(token, j, tokens)
  420. if (result === void 0 || result === '') return memo
  421. return memo.concat(result)
  422. }, [])
  423. }
  424. return tokens
  425. }
  426. /**
  427. * Convenience method for passing a string through a pipeline and getting
  428. * strings out. This method takes care of wrapping the passed string in a
  429. * token and mapping the resulting tokens back to strings.
  430. *
  431. * @param {string} str - The string to pass through the pipeline.
  432. * @returns {string[]}
  433. */
  434. lunr.Pipeline.prototype.runString = function (str) {
  435. var token = new lunr.Token (str)
  436. return this.run([token]).map(function (t) {
  437. return t.toString()
  438. })
  439. }
  440. /**
  441. * Resets the pipeline by removing any existing processors.
  442. *
  443. */
  444. lunr.Pipeline.prototype.reset = function () {
  445. this._stack = []
  446. }
  447. /**
  448. * Returns a representation of the pipeline ready for serialisation.
  449. *
  450. * Logs a warning if the function has not been registered.
  451. *
  452. * @returns {Array}
  453. */
  454. lunr.Pipeline.prototype.toJSON = function () {
  455. return this._stack.map(function (fn) {
  456. lunr.Pipeline.warnIfFunctionNotRegistered(fn)
  457. return fn.label
  458. })
  459. }
  460. /*!
  461. * lunr.Vector
  462. * Copyright (C) 2017 Oliver Nightingale
  463. */
  464. /**
  465. * A vector is used to construct the vector space of documents and queries. These
  466. * vectors support operations to determine the similarity between two documents or
  467. * a document and a query.
  468. *
  469. * Normally no parameters are required for initializing a vector, but in the case of
  470. * loading a previously dumped vector the raw elements can be provided to the constructor.
  471. *
  472. * For performance reasons vectors are implemented with a flat array, where an elements
  473. * index is immediately followed by its value. E.g. [index, value, index, value]. This
  474. * allows the underlying array to be as sparse as possible and still offer decent
  475. * performance when being used for vector calculations.
  476. *
  477. * @constructor
  478. * @param {Number[]} [elements] - The flat list of element index and element value pairs.
  479. */
  480. lunr.Vector = function (elements) {
  481. this._magnitude = 0
  482. this.elements = elements || []
  483. }
  484. /**
  485. * Calculates the position within the vector to insert a given index.
  486. *
  487. * This is used internally by insert and upsert. If there are duplicate indexes then
  488. * the position is returned as if the value for that index were to be updated, but it
  489. * is the callers responsibility to check whether there is a duplicate at that index
  490. *
  491. * @param {Number} insertIdx - The index at which the element should be inserted.
  492. * @returns {Number}
  493. */
  494. lunr.Vector.prototype.positionForIndex = function (index) {
  495. // For an empty vector the tuple can be inserted at the beginning
  496. if (this.elements.length == 0) {
  497. return 0
  498. }
  499. var start = 0,
  500. end = this.elements.length / 2,
  501. sliceLength = end - start,
  502. pivotPoint = Math.floor(sliceLength / 2),
  503. pivotIndex = this.elements[pivotPoint * 2]
  504. while (sliceLength > 1) {
  505. if (pivotIndex < index) {
  506. start = pivotPoint
  507. }
  508. if (pivotIndex > index) {
  509. end = pivotPoint
  510. }
  511. if (pivotIndex == index) {
  512. break
  513. }
  514. sliceLength = end - start
  515. pivotPoint = start + Math.floor(sliceLength / 2)
  516. pivotIndex = this.elements[pivotPoint * 2]
  517. }
  518. if (pivotIndex == index) {
  519. return pivotPoint * 2
  520. }
  521. if (pivotIndex > index) {
  522. return pivotPoint * 2
  523. }
  524. if (pivotIndex < index) {
  525. return (pivotPoint + 1) * 2
  526. }
  527. }
  528. /**
  529. * Inserts an element at an index within the vector.
  530. *
  531. * Does not allow duplicates, will throw an error if there is already an entry
  532. * for this index.
  533. *
  534. * @param {Number} insertIdx - The index at which the element should be inserted.
  535. * @param {Number} val - The value to be inserted into the vector.
  536. */
  537. lunr.Vector.prototype.insert = function (insertIdx, val) {
  538. this.upsert(insertIdx, val, function () {
  539. throw "duplicate index"
  540. })
  541. }
  542. /**
  543. * Inserts or updates an existing index within the vector.
  544. *
  545. * @param {Number} insertIdx - The index at which the element should be inserted.
  546. * @param {Number} val - The value to be inserted into the vector.
  547. * @param {function} fn - A function that is called for updates, the existing value and the
  548. * requested value are passed as arguments
  549. */
  550. lunr.Vector.prototype.upsert = function (insertIdx, val, fn) {
  551. this._magnitude = 0
  552. var position = this.positionForIndex(insertIdx)
  553. if (this.elements[position] == insertIdx) {
  554. this.elements[position + 1] = fn(this.elements[position + 1], val)
  555. } else {
  556. this.elements.splice(position, 0, insertIdx, val)
  557. }
  558. }
  559. /**
  560. * Calculates the magnitude of this vector.
  561. *
  562. * @returns {Number}
  563. */
  564. lunr.Vector.prototype.magnitude = function () {
  565. if (this._magnitude) return this._magnitude
  566. var sumOfSquares = 0,
  567. elementsLength = this.elements.length
  568. for (var i = 1; i < elementsLength; i += 2) {
  569. var val = this.elements[i]
  570. sumOfSquares += val * val
  571. }
  572. return this._magnitude = Math.sqrt(sumOfSquares)
  573. }
  574. /**
  575. * Calculates the dot product of this vector and another vector.
  576. *
  577. * @param {lunr.Vector} otherVector - The vector to compute the dot product with.
  578. * @returns {Number}
  579. */
  580. lunr.Vector.prototype.dot = function (otherVector) {
  581. var dotProduct = 0,
  582. a = this.elements, b = otherVector.elements,
  583. aLen = a.length, bLen = b.length,
  584. aVal = 0, bVal = 0,
  585. i = 0, j = 0
  586. while (i < aLen && j < bLen) {
  587. aVal = a[i], bVal = b[j]
  588. if (aVal < bVal) {
  589. i += 2
  590. } else if (aVal > bVal) {
  591. j += 2
  592. } else if (aVal == bVal) {
  593. dotProduct += a[i + 1] * b[j + 1]
  594. i += 2
  595. j += 2
  596. }
  597. }
  598. return dotProduct
  599. }
  600. /**
  601. * Calculates the cosine similarity between this vector and another
  602. * vector.
  603. *
  604. * @param {lunr.Vector} otherVector - The other vector to calculate the
  605. * similarity with.
  606. * @returns {Number}
  607. */
  608. lunr.Vector.prototype.similarity = function (otherVector) {
  609. return this.dot(otherVector) / (this.magnitude() * otherVector.magnitude())
  610. }
  611. /**
  612. * Converts the vector to an array of the elements within the vector.
  613. *
  614. * @returns {Number[]}
  615. */
  616. lunr.Vector.prototype.toArray = function () {
  617. var output = new Array (this.elements.length / 2)
  618. for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) {
  619. output[j] = this.elements[i]
  620. }
  621. return output
  622. }
  623. /**
  624. * A JSON serializable representation of the vector.
  625. *
  626. * @returns {Number[]}
  627. */
  628. lunr.Vector.prototype.toJSON = function () {
  629. return this.elements
  630. }
  631. /* eslint-disable */
  632. /*!
  633. * lunr.stemmer
  634. * Copyright (C) 2017 Oliver Nightingale
  635. * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
  636. */
  637. /**
  638. * lunr.stemmer is an english language stemmer, this is a JavaScript
  639. * implementation of the PorterStemmer taken from http://tartarus.org/~martin
  640. *
  641. * @static
  642. * @implements {lunr.PipelineFunction}
  643. * @param {lunr.Token} token - The string to stem
  644. * @returns {lunr.Token}
  645. * @see {@link lunr.Pipeline}
  646. */
  647. lunr.stemmer = (function(){
  648. var step2list = {
  649. "ational" : "ate",
  650. "tional" : "tion",
  651. "enci" : "ence",
  652. "anci" : "ance",
  653. "izer" : "ize",
  654. "bli" : "ble",
  655. "alli" : "al",
  656. "entli" : "ent",
  657. "eli" : "e",
  658. "ousli" : "ous",
  659. "ization" : "ize",
  660. "ation" : "ate",
  661. "ator" : "ate",
  662. "alism" : "al",
  663. "iveness" : "ive",
  664. "fulness" : "ful",
  665. "ousness" : "ous",
  666. "aliti" : "al",
  667. "iviti" : "ive",
  668. "biliti" : "ble",
  669. "logi" : "log"
  670. },
  671. step3list = {
  672. "icate" : "ic",
  673. "ative" : "",
  674. "alize" : "al",
  675. "iciti" : "ic",
  676. "ical" : "ic",
  677. "ful" : "",
  678. "ness" : ""
  679. },
  680. c = "[^aeiou]", // consonant
  681. v = "[aeiouy]", // vowel
  682. C = c + "[^aeiouy]*", // consonant sequence
  683. V = v + "[aeiou]*", // vowel sequence
  684. mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0
  685. meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1
  686. mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1
  687. s_v = "^(" + C + ")?" + v; // vowel in stem
  688. var re_mgr0 = new RegExp(mgr0);
  689. var re_mgr1 = new RegExp(mgr1);
  690. var re_meq1 = new RegExp(meq1);
  691. var re_s_v = new RegExp(s_v);
  692. var re_1a = /^(.+?)(ss|i)es$/;
  693. var re2_1a = /^(.+?)([^s])s$/;
  694. var re_1b = /^(.+?)eed$/;
  695. var re2_1b = /^(.+?)(ed|ing)$/;
  696. var re_1b_2 = /.$/;
  697. var re2_1b_2 = /(at|bl|iz)$/;
  698. var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$");
  699. var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$");
  700. var re_1c = /^(.+?[^aeiou])y$/;
  701. var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
  702. var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
  703. var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
  704. var re2_4 = /^(.+?)(s|t)(ion)$/;
  705. var re_5 = /^(.+?)e$/;
  706. var re_5_1 = /ll$/;
  707. var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$");
  708. var porterStemmer = function porterStemmer(w) {
  709. var stem,
  710. suffix,
  711. firstch,
  712. re,
  713. re2,
  714. re3,
  715. re4;
  716. if (w.length < 3) { return w; }
  717. firstch = w.substr(0,1);
  718. if (firstch == "y") {
  719. w = firstch.toUpperCase() + w.substr(1);
  720. }
  721. // Step 1a
  722. re = re_1a
  723. re2 = re2_1a;
  724. if (re.test(w)) { w = w.replace(re,"$1$2"); }
  725. else if (re2.test(w)) { w = w.replace(re2,"$1$2"); }
  726. // Step 1b
  727. re = re_1b;
  728. re2 = re2_1b;
  729. if (re.test(w)) {
  730. var fp = re.exec(w);
  731. re = re_mgr0;
  732. if (re.test(fp[1])) {
  733. re = re_1b_2;
  734. w = w.replace(re,"");
  735. }
  736. } else if (re2.test(w)) {
  737. var fp = re2.exec(w);
  738. stem = fp[1];
  739. re2 = re_s_v;
  740. if (re2.test(stem)) {
  741. w = stem;
  742. re2 = re2_1b_2;
  743. re3 = re3_1b_2;
  744. re4 = re4_1b_2;
  745. if (re2.test(w)) { w = w + "e"; }
  746. else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); }
  747. else if (re4.test(w)) { w = w + "e"; }
  748. }
  749. }
  750. // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say)
  751. re = re_1c;
  752. if (re.test(w)) {
  753. var fp = re.exec(w);
  754. stem = fp[1];
  755. w = stem + "i";
  756. }
  757. // Step 2
  758. re = re_2;
  759. if (re.test(w)) {
  760. var fp = re.exec(w);
  761. stem = fp[1];
  762. suffix = fp[2];
  763. re = re_mgr0;
  764. if (re.test(stem)) {
  765. w = stem + step2list[suffix];
  766. }
  767. }
  768. // Step 3
  769. re = re_3;
  770. if (re.test(w)) {
  771. var fp = re.exec(w);
  772. stem = fp[1];
  773. suffix = fp[2];
  774. re = re_mgr0;
  775. if (re.test(stem)) {
  776. w = stem + step3list[suffix];
  777. }
  778. }
  779. // Step 4
  780. re = re_4;
  781. re2 = re2_4;
  782. if (re.test(w)) {
  783. var fp = re.exec(w);
  784. stem = fp[1];
  785. re = re_mgr1;
  786. if (re.test(stem)) {
  787. w = stem;
  788. }
  789. } else if (re2.test(w)) {
  790. var fp = re2.exec(w);
  791. stem = fp[1] + fp[2];
  792. re2 = re_mgr1;
  793. if (re2.test(stem)) {
  794. w = stem;
  795. }
  796. }
  797. // Step 5
  798. re = re_5;
  799. if (re.test(w)) {
  800. var fp = re.exec(w);
  801. stem = fp[1];
  802. re = re_mgr1;
  803. re2 = re_meq1;
  804. re3 = re3_5;
  805. if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) {
  806. w = stem;
  807. }
  808. }
  809. re = re_5_1;
  810. re2 = re_mgr1;
  811. if (re.test(w) && re2.test(w)) {
  812. re = re_1b_2;
  813. w = w.replace(re,"");
  814. }
  815. // and turn initial Y back to y
  816. if (firstch == "y") {
  817. w = firstch.toLowerCase() + w.substr(1);
  818. }
  819. return w;
  820. };
  821. return function (token) {
  822. return token.update(porterStemmer);
  823. }
  824. })();
  825. lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer')
  826. /*!
  827. * lunr.stopWordFilter
  828. * Copyright (C) 2017 Oliver Nightingale
  829. */
  830. /**
  831. * lunr.generateStopWordFilter builds a stopWordFilter function from the provided
  832. * list of stop words.
  833. *
  834. * The built in lunr.stopWordFilter is built using this generator and can be used
  835. * to generate custom stopWordFilters for applications or non English languages.
  836. *
  837. * @param {Array} token The token to pass through the filter
  838. * @returns {lunr.PipelineFunction}
  839. * @see lunr.Pipeline
  840. * @see lunr.stopWordFilter
  841. */
  842. lunr.generateStopWordFilter = function (stopWords) {
  843. var words = stopWords.reduce(function (memo, stopWord) {
  844. memo[stopWord] = stopWord
  845. return memo
  846. }, {})
  847. return function (token) {
  848. if (token && words[token.toString()] !== token.toString()) return token
  849. }
  850. }
  851. /**
  852. * lunr.stopWordFilter is an English language stop word list filter, any words
  853. * contained in the list will not be passed through the filter.
  854. *
  855. * This is intended to be used in the Pipeline. If the token does not pass the
  856. * filter then undefined will be returned.
  857. *
  858. * @implements {lunr.PipelineFunction}
  859. * @params {lunr.Token} token - A token to check for being a stop word.
  860. * @returns {lunr.Token}
  861. * @see {@link lunr.Pipeline}
  862. */
  863. lunr.stopWordFilter = lunr.generateStopWordFilter([
  864. 'a',
  865. 'able',
  866. 'about',
  867. 'across',
  868. 'after',
  869. 'all',
  870. 'almost',
  871. 'also',
  872. 'am',
  873. 'among',
  874. 'an',
  875. 'and',
  876. 'any',
  877. 'are',
  878. 'as',
  879. 'at',
  880. 'be',
  881. 'because',
  882. 'been',
  883. 'but',
  884. 'by',
  885. 'can',
  886. 'cannot',
  887. 'could',
  888. 'dear',
  889. 'did',
  890. 'do',
  891. 'does',
  892. 'either',
  893. 'else',
  894. 'ever',
  895. 'every',
  896. 'for',
  897. 'from',
  898. 'get',
  899. 'got',
  900. 'had',
  901. 'has',
  902. 'have',
  903. 'he',
  904. 'her',
  905. 'hers',
  906. 'him',
  907. 'his',
  908. 'how',
  909. 'however',
  910. 'i',
  911. 'if',
  912. 'in',
  913. 'into',
  914. 'is',
  915. 'it',
  916. 'its',
  917. 'just',
  918. 'least',
  919. 'let',
  920. 'like',
  921. 'likely',
  922. 'may',
  923. 'me',
  924. 'might',
  925. 'most',
  926. 'must',
  927. 'my',
  928. 'neither',
  929. 'no',
  930. 'nor',
  931. 'not',
  932. 'of',
  933. 'off',
  934. 'often',
  935. 'on',
  936. 'only',
  937. 'or',
  938. 'other',
  939. 'our',
  940. 'own',
  941. 'rather',
  942. 'said',
  943. 'say',
  944. 'says',
  945. 'she',
  946. 'should',
  947. 'since',
  948. 'so',
  949. 'some',
  950. 'than',
  951. 'that',
  952. 'the',
  953. 'their',
  954. 'them',
  955. 'then',
  956. 'there',
  957. 'these',
  958. 'they',
  959. 'this',
  960. 'tis',
  961. 'to',
  962. 'too',
  963. 'twas',
  964. 'us',
  965. 'wants',
  966. 'was',
  967. 'we',
  968. 'were',
  969. 'what',
  970. 'when',
  971. 'where',
  972. 'which',
  973. 'while',
  974. 'who',
  975. 'whom',
  976. 'why',
  977. 'will',
  978. 'with',
  979. 'would',
  980. 'yet',
  981. 'you',
  982. 'your'
  983. ])
  984. lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter')
  985. /*!
  986. * lunr.trimmer
  987. * Copyright (C) 2017 Oliver Nightingale
  988. */
  989. /**
  990. * lunr.trimmer is a pipeline function for trimming non word
  991. * characters from the beginning and end of tokens before they
  992. * enter the index.
  993. *
  994. * This implementation may not work correctly for non latin
  995. * characters and should either be removed or adapted for use
  996. * with languages with non-latin characters.
  997. *
  998. * @static
  999. * @implements {lunr.PipelineFunction}
  1000. * @param {lunr.Token} token The token to pass through the filter
  1001. * @returns {lunr.Token}
  1002. * @see lunr.Pipeline
  1003. */
  1004. lunr.trimmer = function (token) {
  1005. return token.update(function (s) {
  1006. return s.replace(/^\W+/, '').replace(/\W+$/, '')
  1007. })
  1008. }
  1009. lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer')
  1010. /*!
  1011. * lunr.TokenSet
  1012. * Copyright (C) 2017 Oliver Nightingale
  1013. */
  1014. /**
  1015. * A token set is used to store the unique list of all tokens
  1016. * within an index. Token sets are also used to represent an
  1017. * incoming query to the index, this query token set and index
  1018. * token set are then intersected to find which tokens to look
  1019. * up in the inverted index.
  1020. *
  1021. * A token set can hold multiple tokens, as in the case of the
  1022. * index token set, or it can hold a single token as in the
  1023. * case of a simple query token set.
  1024. *
  1025. * Additionally token sets are used to perform wildcard matching.
  1026. * Leading, contained and trailing wildcards are supported, and
  1027. * from this edit distance matching can also be provided.
  1028. *
  1029. * Token sets are implemented as a minimal finite state automata,
  1030. * where both common prefixes and suffixes are shared between tokens.
  1031. * This helps to reduce the space used for storing the token set.
  1032. *
  1033. * @constructor
  1034. */
  1035. lunr.TokenSet = function () {
  1036. this.final = false
  1037. this.edges = {}
  1038. this.id = lunr.TokenSet._nextId
  1039. lunr.TokenSet._nextId += 1
  1040. }
  1041. /**
  1042. * Keeps track of the next, auto increment, identifier to assign
  1043. * to a new tokenSet.
  1044. *
  1045. * TokenSets require a unique identifier to be correctly minimised.
  1046. *
  1047. * @private
  1048. */
  1049. lunr.TokenSet._nextId = 1
  1050. /**
  1051. * Creates a TokenSet instance from the given sorted array of words.
  1052. *
  1053. * @param {String[]} arr - A sorted array of strings to create the set from.
  1054. * @returns {lunr.TokenSet}
  1055. * @throws Will throw an error if the input array is not sorted.
  1056. */
  1057. lunr.TokenSet.fromArray = function (arr) {
  1058. var builder = new lunr.TokenSet.Builder
  1059. for (var i = 0, len = arr.length; i < len; i++) {
  1060. builder.insert(arr[i])
  1061. }
  1062. builder.finish()
  1063. return builder.root
  1064. }
  1065. /**
  1066. * Creates a token set from a query clause.
  1067. *
  1068. * @private
  1069. * @param {Object} clause - A single clause from lunr.Query.
  1070. * @param {string} clause.term - The query clause term.
  1071. * @param {number} [clause.editDistance] - The optional edit distance for the term.
  1072. * @returns {lunr.TokenSet}
  1073. */
  1074. lunr.TokenSet.fromClause = function (clause) {
  1075. if ('editDistance' in clause) {
  1076. return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance)
  1077. } else {
  1078. return lunr.TokenSet.fromString(clause.term)
  1079. }
  1080. }
  1081. /**
  1082. * Creates a token set representing a single string with a specified
  1083. * edit distance.
  1084. *
  1085. * Insertions, deletions, substitutions and transpositions are each
  1086. * treated as an edit distance of 1.
  1087. *
  1088. * Increasing the allowed edit distance will have a dramatic impact
  1089. * on the performance of both creating and intersecting these TokenSets.
  1090. * It is advised to keep the edit distance less than 3.
  1091. *
  1092. * @param {string} str - The string to create the token set from.
  1093. * @param {number} editDistance - The allowed edit distance to match.
  1094. * @returns {lunr.Vector}
  1095. */
  1096. lunr.TokenSet.fromFuzzyString = function (str, editDistance) {
  1097. var root = new lunr.TokenSet
  1098. var stack = [{
  1099. node: root,
  1100. editsRemaining: editDistance,
  1101. str: str
  1102. }]
  1103. while (stack.length) {
  1104. var frame = stack.pop()
  1105. // no edit
  1106. if (frame.str.length > 0) {
  1107. var char = frame.str.charAt(0),
  1108. noEditNode
  1109. if (char in frame.node.edges) {
  1110. noEditNode = frame.node.edges[char]
  1111. } else {
  1112. noEditNode = new lunr.TokenSet
  1113. frame.node.edges[char] = noEditNode
  1114. }
  1115. if (frame.str.length == 1) {
  1116. noEditNode.final = true
  1117. } else {
  1118. stack.push({
  1119. node: noEditNode,
  1120. editsRemaining: frame.editsRemaining,
  1121. str: frame.str.slice(1)
  1122. })
  1123. }
  1124. }
  1125. // deletion
  1126. // can only do a deletion if we have enough edits remaining
  1127. // and if there are characters left to delete in the string
  1128. if (frame.editsRemaining > 0 && frame.str.length > 1) {
  1129. var char = frame.str.charAt(1),
  1130. deletionNode
  1131. if (char in frame.node.edges) {
  1132. deletionNode = frame.node.edges[char]
  1133. } else {
  1134. deletionNode = new lunr.TokenSet
  1135. frame.node.edges[char] = deletionNode
  1136. }
  1137. if (frame.str.length <= 2) {
  1138. deletionNode.final = true
  1139. } else {
  1140. stack.push({
  1141. node: deletionNode,
  1142. editsRemaining: frame.editsRemaining - 1,
  1143. str: frame.str.slice(2)
  1144. })
  1145. }
  1146. }
  1147. // deletion
  1148. // just removing the last character from the str
  1149. if (frame.editsRemaining > 0 && frame.str.length == 1) {
  1150. frame.node.final = true
  1151. }
  1152. // substitution
  1153. // can only do a substitution if we have enough edits remaining
  1154. // and if there are characters left to substitute
  1155. if (frame.editsRemaining > 0 && frame.str.length >= 1) {
  1156. if ("*" in frame.node.edges) {
  1157. var substitutionNode = frame.node.edges["*"]
  1158. } else {
  1159. var substitutionNode = new lunr.TokenSet
  1160. frame.node.edges["*"] = substitutionNode
  1161. }
  1162. if (frame.str.length == 1) {
  1163. substitutionNode.final = true
  1164. } else {
  1165. stack.push({
  1166. node: substitutionNode,
  1167. editsRemaining: frame.editsRemaining - 1,
  1168. str: frame.str.slice(1)
  1169. })
  1170. }
  1171. }
  1172. // insertion
  1173. // can only do insertion if there are edits remaining
  1174. if (frame.editsRemaining > 0) {
  1175. if ("*" in frame.node.edges) {
  1176. var insertionNode = frame.node.edges["*"]
  1177. } else {
  1178. var insertionNode = new lunr.TokenSet
  1179. frame.node.edges["*"] = insertionNode
  1180. }
  1181. if (frame.str.length == 0) {
  1182. insertionNode.final = true
  1183. } else {
  1184. stack.push({
  1185. node: insertionNode,
  1186. editsRemaining: frame.editsRemaining - 1,
  1187. str: frame.str
  1188. })
  1189. }
  1190. }
  1191. // transposition
  1192. // can only do a transposition if there are edits remaining
  1193. // and there are enough characters to transpose
  1194. if (frame.editsRemaining > 0 && frame.str.length > 1) {
  1195. var charA = frame.str.charAt(0),
  1196. charB = frame.str.charAt(1),
  1197. transposeNode
  1198. if (charB in frame.node.edges) {
  1199. transposeNode = frame.node.edges[charB]
  1200. } else {
  1201. transposeNode = new lunr.TokenSet
  1202. frame.node.edges[charB] = transposeNode
  1203. }
  1204. if (frame.str.length == 1) {
  1205. transposeNode.final = true
  1206. } else {
  1207. stack.push({
  1208. node: transposeNode,
  1209. editsRemaining: frame.editsRemaining - 1,
  1210. str: charA + frame.str.slice(2)
  1211. })
  1212. }
  1213. }
  1214. }
  1215. return root
  1216. }
  1217. /**
  1218. * Creates a TokenSet from a string.
  1219. *
  1220. * The string may contain one or more wildcard characters (*)
  1221. * that will allow wildcard matching when intersecting with
  1222. * another TokenSet.
  1223. *
  1224. * @param {string} str - The string to create a TokenSet from.
  1225. * @returns {lunr.TokenSet}
  1226. */
  1227. lunr.TokenSet.fromString = function (str) {
  1228. var node = new lunr.TokenSet,
  1229. root = node,
  1230. wildcardFound = false
  1231. /*
  1232. * Iterates through all characters within the passed string
  1233. * appending a node for each character.
  1234. *
  1235. * As soon as a wildcard character is found then a self
  1236. * referencing edge is introduced to continually match
  1237. * any number of any characters.
  1238. */
  1239. for (var i = 0, len = str.length; i < len; i++) {
  1240. var char = str[i],
  1241. final = (i == len - 1)
  1242. if (char == "*") {
  1243. wildcardFound = true
  1244. node.edges[char] = node
  1245. node.final = final
  1246. } else {
  1247. var next = new lunr.TokenSet
  1248. next.final = final
  1249. node.edges[char] = next
  1250. node = next
  1251. // TODO: is this needed anymore?
  1252. if (wildcardFound) {
  1253. node.edges["*"] = root
  1254. }
  1255. }
  1256. }
  1257. return root
  1258. }
  1259. /**
  1260. * Converts this TokenSet into an array of strings
  1261. * contained within the TokenSet.
  1262. *
  1263. * @returns {string[]}
  1264. */
  1265. lunr.TokenSet.prototype.toArray = function () {
  1266. var words = []
  1267. var stack = [{
  1268. prefix: "",
  1269. node: this
  1270. }]
  1271. while (stack.length) {
  1272. var frame = stack.pop(),
  1273. edges = Object.keys(frame.node.edges),
  1274. len = edges.length
  1275. if (frame.node.final) {
  1276. words.push(frame.prefix)
  1277. }
  1278. for (var i = 0; i < len; i++) {
  1279. var edge = edges[i]
  1280. stack.push({
  1281. prefix: frame.prefix.concat(edge),
  1282. node: frame.node.edges[edge]
  1283. })
  1284. }
  1285. }
  1286. return words
  1287. }
  1288. /**
  1289. * Generates a string representation of a TokenSet.
  1290. *
  1291. * This is intended to allow TokenSets to be used as keys
  1292. * in objects, largely to aid the construction and minimisation
  1293. * of a TokenSet. As such it is not designed to be a human
  1294. * friendly representation of the TokenSet.
  1295. *
  1296. * @returns {string}
  1297. */
  1298. lunr.TokenSet.prototype.toString = function () {
  1299. // NOTE: Using Object.keys here as this.edges is very likely
  1300. // to enter 'hash-mode' with many keys being added
  1301. //
  1302. // avoiding a for-in loop here as it leads to the function
  1303. // being de-optimised (at least in V8). From some simple
  1304. // benchmarks the performance is comparable, but allowing
  1305. // V8 to optimize may mean easy performance wins in the future.
  1306. if (this._str) {
  1307. return this._str
  1308. }
  1309. var str = this.final ? '1' : '0',
  1310. labels = Object.keys(this.edges).sort(),
  1311. len = labels.length
  1312. for (var i = 0; i < len; i++) {
  1313. var label = labels[i],
  1314. node = this.edges[label]
  1315. str = str + label + node.id
  1316. }
  1317. return str
  1318. }
  1319. /**
  1320. * Returns a new TokenSet that is the intersection of
  1321. * this TokenSet and the passed TokenSet.
  1322. *
  1323. * This intersection will take into account any wildcards
  1324. * contained within the TokenSet.
  1325. *
  1326. * @param {lunr.TokenSet} b - An other TokenSet to intersect with.
  1327. * @returns {lunr.TokenSet}
  1328. */
  1329. lunr.TokenSet.prototype.intersect = function (b) {
  1330. var output = new lunr.TokenSet,
  1331. frame = undefined
  1332. var stack = [{
  1333. qNode: b,
  1334. output: output,
  1335. node: this
  1336. }]
  1337. while (stack.length) {
  1338. frame = stack.pop()
  1339. // NOTE: As with the #toString method, we are using
  1340. // Object.keys and a for loop instead of a for-in loop
  1341. // as both of these objects enter 'hash' mode, causing
  1342. // the function to be de-optimised in V8
  1343. var qEdges = Object.keys(frame.qNode.edges),
  1344. qLen = qEdges.length,
  1345. nEdges = Object.keys(frame.node.edges),
  1346. nLen = nEdges.length
  1347. for (var q = 0; q < qLen; q++) {
  1348. var qEdge = qEdges[q]
  1349. for (var n = 0; n < nLen; n++) {
  1350. var nEdge = nEdges[n]
  1351. if (nEdge == qEdge || qEdge == '*') {
  1352. var node = frame.node.edges[nEdge],
  1353. qNode = frame.qNode.edges[qEdge],
  1354. final = node.final && qNode.final,
  1355. next = undefined
  1356. if (nEdge in frame.output.edges) {
  1357. // an edge already exists for this character
  1358. // no need to create a new node, just set the finality
  1359. // bit unless this node is already final
  1360. next = frame.output.edges[nEdge]
  1361. next.final = next.final || final
  1362. } else {
  1363. // no edge exists yet, must create one
  1364. // set the finality bit and insert it
  1365. // into the output
  1366. next = new lunr.TokenSet
  1367. next.final = final
  1368. frame.output.edges[nEdge] = next
  1369. }
  1370. stack.push({
  1371. qNode: qNode,
  1372. output: next,
  1373. node: node
  1374. })
  1375. }
  1376. }
  1377. }
  1378. }
  1379. return output
  1380. }
  1381. lunr.TokenSet.Builder = function () {
  1382. this.previousWord = ""
  1383. this.root = new lunr.TokenSet
  1384. this.uncheckedNodes = []
  1385. this.minimizedNodes = {}
  1386. }
  1387. lunr.TokenSet.Builder.prototype.insert = function (word) {
  1388. var node,
  1389. commonPrefix = 0
  1390. if (word < this.previousWord) {
  1391. throw new Error ("Out of order word insertion")
  1392. }
  1393. for (var i = 0; i < word.length && i < this.previousWord.length; i++) {
  1394. if (word[i] != this.previousWord[i]) break
  1395. commonPrefix++
  1396. }
  1397. this.minimize(commonPrefix)
  1398. if (this.uncheckedNodes.length == 0) {
  1399. node = this.root
  1400. } else {
  1401. node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child
  1402. }
  1403. for (var i = commonPrefix; i < word.length; i++) {
  1404. var nextNode = new lunr.TokenSet,
  1405. char = word[i]
  1406. node.edges[char] = nextNode
  1407. this.uncheckedNodes.push({
  1408. parent: node,
  1409. char: char,
  1410. child: nextNode
  1411. })
  1412. node = nextNode
  1413. }
  1414. node.final = true
  1415. this.previousWord = word
  1416. }
  1417. lunr.TokenSet.Builder.prototype.finish = function () {
  1418. this.minimize(0)
  1419. }
  1420. lunr.TokenSet.Builder.prototype.minimize = function (downTo) {
  1421. for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) {
  1422. var node = this.uncheckedNodes[i],
  1423. childKey = node.child.toString()
  1424. if (childKey in this.minimizedNodes) {
  1425. node.parent.edges[node.char] = this.minimizedNodes[childKey]
  1426. } else {
  1427. // Cache the key for this node since
  1428. // we know it can't change anymore
  1429. node.child._str = childKey
  1430. this.minimizedNodes[childKey] = node.child
  1431. }
  1432. this.uncheckedNodes.pop()
  1433. }
  1434. }
  1435. /*!
  1436. * lunr.Index
  1437. * Copyright (C) 2017 Oliver Nightingale
  1438. */
  1439. /**
  1440. * An index contains the built index of all documents and provides a query interface
  1441. * to the index.
  1442. *
  1443. * Usually instances of lunr.Index will not be created using this constructor, instead
  1444. * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be
  1445. * used to load previously built and serialized indexes.
  1446. *
  1447. * @constructor
  1448. * @param {Object} attrs - The attributes of the built search index.
  1449. * @param {Object} attrs.invertedIndex - An index of term/field to document reference.
  1450. * @param {Object<string, lunr.Vector>} attrs.documentVectors - Document vectors keyed by document reference.
  1451. * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens.
  1452. * @param {string[]} attrs.fields - The names of indexed document fields.
  1453. * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms.
  1454. */
  1455. lunr.Index = function (attrs) {
  1456. this.invertedIndex = attrs.invertedIndex
  1457. this.fieldVectors = attrs.fieldVectors
  1458. this.tokenSet = attrs.tokenSet
  1459. this.fields = attrs.fields
  1460. this.pipeline = attrs.pipeline
  1461. }
  1462. /**
  1463. * A result contains details of a document matching a search query.
  1464. * @typedef {Object} lunr.Index~Result
  1465. * @property {string} ref - The reference of the document this result represents.
  1466. * @property {number} score - A number between 0 and 1 representing how similar this document is to the query.
  1467. * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match.
  1468. */
  1469. /**
  1470. * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple
  1471. * query language which itself is parsed into an instance of lunr.Query.
  1472. *
  1473. * For programmatically building queries it is advised to directly use lunr.Query, the query language
  1474. * is best used for human entered text rather than program generated text.
  1475. *
  1476. * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported
  1477. * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello'
  1478. * or 'world', though those that contain both will rank higher in the results.
  1479. *
  1480. * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can
  1481. * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding
  1482. * wildcards will increase the number of documents that will be found but can also have a negative
  1483. * impact on query performance, especially with wildcards at the beginning of a term.
  1484. *
  1485. * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term
  1486. * hello in the title field will match this query. Using a field not present in the index will lead
  1487. * to an error being thrown.
  1488. *
  1489. * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term
  1490. * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported
  1491. * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2.
  1492. * Avoid large values for edit distance to improve query performance.
  1493. *
  1494. * To escape special characters the backslash character '\' can be used, this allows searches to include
  1495. * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead
  1496. * of attempting to apply a boost of 2 to the search term "foo".
  1497. *
  1498. * @typedef {string} lunr.Index~QueryString
  1499. * @example <caption>Simple single term query</caption>
  1500. * hello
  1501. * @example <caption>Multiple term query</caption>
  1502. * hello world
  1503. * @example <caption>term scoped to a field</caption>
  1504. * title:hello
  1505. * @example <caption>term with a boost of 10</caption>
  1506. * hello^10
  1507. * @example <caption>term with an edit distance of 2</caption>
  1508. * hello~2
  1509. */
  1510. /**
  1511. * Performs a search against the index using lunr query syntax.
  1512. *
  1513. * Results will be returned sorted by their score, the most relevant results
  1514. * will be returned first.
  1515. *
  1516. * For more programmatic querying use lunr.Index#query.
  1517. *
  1518. * @param {lunr.Index~QueryString} queryString - A string containing a lunr query.
  1519. * @throws {lunr.QueryParseError} If the passed query string cannot be parsed.
  1520. * @returns {lunr.Index~Result[]}
  1521. */
  1522. lunr.Index.prototype.search = function (queryString) {
  1523. return this.query(function (query) {
  1524. var parser = new lunr.QueryParser(queryString, query)
  1525. parser.parse()
  1526. })
  1527. }
  1528. /**
  1529. * A query builder callback provides a query object to be used to express
  1530. * the query to perform on the index.
  1531. *
  1532. * @callback lunr.Index~queryBuilder
  1533. * @param {lunr.Query} query - The query object to build up.
  1534. * @this lunr.Query
  1535. */
  1536. /**
  1537. * Performs a query against the index using the yielded lunr.Query object.
  1538. *
  1539. * If performing programmatic queries against the index, this method is preferred
  1540. * over lunr.Index#search so as to avoid the additional query parsing overhead.
  1541. *
  1542. * A query object is yielded to the supplied function which should be used to
  1543. * express the query to be run against the index.
  1544. *
  1545. * Note that although this function takes a callback parameter it is _not_ an
  1546. * asynchronous operation, the callback is just yielded a query object to be
  1547. * customized.
  1548. *
  1549. * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query.
  1550. * @returns {lunr.Index~Result[]}
  1551. */
  1552. lunr.Index.prototype.query = function (fn) {
  1553. // for each query clause
  1554. // * process terms
  1555. // * expand terms from token set
  1556. // * find matching documents and metadata
  1557. // * get document vectors
  1558. // * score documents
  1559. var query = new lunr.Query(this.fields),
  1560. matchingFields = Object.create(null),
  1561. queryVectors = Object.create(null)
  1562. fn.call(query, query)
  1563. for (var i = 0; i < query.clauses.length; i++) {
  1564. /*
  1565. * Unless the pipeline has been disabled for this term, which is
  1566. * the case for terms with wildcards, we need to pass the clause
  1567. * term through the search pipeline. A pipeline returns an array
  1568. * of processed terms. Pipeline functions may expand the passed
  1569. * term, which means we may end up performing multiple index lookups
  1570. * for a single query term.
  1571. */
  1572. var clause = query.clauses[i],
  1573. terms = null
  1574. if (clause.usePipeline) {
  1575. terms = this.pipeline.runString(clause.term)
  1576. } else {
  1577. terms = [clause.term]
  1578. }
  1579. for (var m = 0; m < terms.length; m++) {
  1580. var term = terms[m]
  1581. /*
  1582. * Each term returned from the pipeline needs to use the same query
  1583. * clause object, e.g. the same boost and or edit distance. The
  1584. * simplest way to do this is to re-use the clause object but mutate
  1585. * its term property.
  1586. */
  1587. clause.term = term
  1588. /*
  1589. * From the term in the clause we create a token set which will then
  1590. * be used to intersect the indexes token set to get a list of terms
  1591. * to lookup in the inverted index
  1592. */
  1593. var termTokenSet = lunr.TokenSet.fromClause(clause),
  1594. expandedTerms = this.tokenSet.intersect(termTokenSet).toArray()
  1595. for (var j = 0; j < expandedTerms.length; j++) {
  1596. /*
  1597. * For each term get the posting and termIndex, this is required for
  1598. * building the query vector.
  1599. */
  1600. var expandedTerm = expandedTerms[j],
  1601. posting = this.invertedIndex[expandedTerm],
  1602. termIndex = posting._index
  1603. for (var k = 0; k < clause.fields.length; k++) {
  1604. /*
  1605. * For each field that this query term is scoped by (by default
  1606. * all fields are in scope) we need to get all the document refs
  1607. * that have this term in that field.
  1608. *
  1609. * The posting is the entry in the invertedIndex for the matching
  1610. * term from above.
  1611. */
  1612. var field = clause.fields[k],
  1613. fieldPosting = posting[field],
  1614. matchingDocumentRefs = Object.keys(fieldPosting)
  1615. /*
  1616. * To support field level boosts a query vector is created per
  1617. * field. This vector is populated using the termIndex found for
  1618. * the term and a unit value with the appropriate boost applied.
  1619. *
  1620. * If the query vector for this field does not exist yet it needs
  1621. * to be created.
  1622. */
  1623. if (!(field in queryVectors)) {
  1624. queryVectors[field] = new lunr.Vector
  1625. }
  1626. /*
  1627. * Using upsert because there could already be an entry in the vector
  1628. * for the term we are working with. In that case we just add the scores
  1629. * together.
  1630. */
  1631. queryVectors[field].upsert(termIndex, 1 * clause.boost, function (a, b) { return a + b })
  1632. for (var l = 0; l < matchingDocumentRefs.length; l++) {
  1633. /*
  1634. * All metadata for this term/field/document triple
  1635. * are then extracted and collected into an instance
  1636. * of lunr.MatchData ready to be returned in the query
  1637. * results
  1638. */
  1639. var matchingDocumentRef = matchingDocumentRefs[l],
  1640. matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field),
  1641. documentMetadata, matchData
  1642. documentMetadata = fieldPosting[matchingDocumentRef]
  1643. matchData = new lunr.MatchData (expandedTerm, field, documentMetadata)
  1644. if (matchingFieldRef in matchingFields) {
  1645. matchingFields[matchingFieldRef].combine(matchData)
  1646. } else {
  1647. matchingFields[matchingFieldRef] = matchData
  1648. }
  1649. }
  1650. }
  1651. }
  1652. }
  1653. }
  1654. var matchingFieldRefs = Object.keys(matchingFields),
  1655. results = {}
  1656. for (var i = 0; i < matchingFieldRefs.length; i++) {
  1657. /*
  1658. * Currently we have document fields that match the query, but we
  1659. * need to return documents. The matchData and scores are combined
  1660. * from multiple fields belonging to the same document.
  1661. *
  1662. * Scores are calculated by field, using the query vectors created
  1663. * above, and combined into a final document score using addition.
  1664. */
  1665. var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]),
  1666. docRef = fieldRef.docRef,
  1667. fieldVector = this.fieldVectors[fieldRef],
  1668. score = queryVectors[fieldRef.fieldName].similarity(fieldVector)
  1669. if (docRef in results) {
  1670. results[docRef].score += score
  1671. results[docRef].matchData.combine(matchingFields[fieldRef])
  1672. } else {
  1673. results[docRef] = {
  1674. ref: docRef,
  1675. score: score,
  1676. matchData: matchingFields[fieldRef]
  1677. }
  1678. }
  1679. }
  1680. /*
  1681. * The results object needs to be converted into a list
  1682. * of results, sorted by score before being returned.
  1683. */
  1684. return Object.keys(results)
  1685. .map(function (key) {
  1686. return results[key]
  1687. })
  1688. .sort(function (a, b) {
  1689. return b.score - a.score
  1690. })
  1691. }
  1692. /**
  1693. * Prepares the index for JSON serialization.
  1694. *
  1695. * The schema for this JSON blob will be described in a
  1696. * separate JSON schema file.
  1697. *
  1698. * @returns {Object}
  1699. */
  1700. lunr.Index.prototype.toJSON = function () {
  1701. var invertedIndex = Object.keys(this.invertedIndex)
  1702. .sort()
  1703. .map(function (term) {
  1704. return [term, this.invertedIndex[term]]
  1705. }, this)
  1706. var fieldVectors = Object.keys(this.fieldVectors)
  1707. .map(function (ref) {
  1708. return [ref, this.fieldVectors[ref].toJSON()]
  1709. }, this)
  1710. return {
  1711. version: lunr.version,
  1712. fields: this.fields,
  1713. fieldVectors: fieldVectors,
  1714. invertedIndex: invertedIndex,
  1715. pipeline: this.pipeline.toJSON()
  1716. }
  1717. }
  1718. /**
  1719. * Loads a previously serialized lunr.Index
  1720. *
  1721. * @param {Object} serializedIndex - A previously serialized lunr.Index
  1722. * @returns {lunr.Index}
  1723. */
  1724. lunr.Index.load = function (serializedIndex) {
  1725. var attrs = {},
  1726. fieldVectors = {},
  1727. serializedVectors = serializedIndex.fieldVectors,
  1728. invertedIndex = {},
  1729. serializedInvertedIndex = serializedIndex.invertedIndex,
  1730. tokenSetBuilder = new lunr.TokenSet.Builder,
  1731. pipeline = lunr.Pipeline.load(serializedIndex.pipeline)
  1732. if (serializedIndex.version != lunr.version) {
  1733. lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'")
  1734. }
  1735. for (var i = 0; i < serializedVectors.length; i++) {
  1736. var tuple = serializedVectors[i],
  1737. ref = tuple[0],
  1738. elements = tuple[1]
  1739. fieldVectors[ref] = new lunr.Vector(elements)
  1740. }
  1741. for (var i = 0; i < serializedInvertedIndex.length; i++) {
  1742. var tuple = serializedInvertedIndex[i],
  1743. term = tuple[0],
  1744. posting = tuple[1]
  1745. tokenSetBuilder.insert(term)
  1746. invertedIndex[term] = posting
  1747. }
  1748. tokenSetBuilder.finish()
  1749. attrs.fields = serializedIndex.fields
  1750. attrs.fieldVectors = fieldVectors
  1751. attrs.invertedIndex = invertedIndex
  1752. attrs.tokenSet = tokenSetBuilder.root
  1753. attrs.pipeline = pipeline
  1754. return new lunr.Index(attrs)
  1755. }
  1756. /*!
  1757. * lunr.Builder
  1758. * Copyright (C) 2017 Oliver Nightingale
  1759. */
  1760. /**
  1761. * lunr.Builder performs indexing on a set of documents and
  1762. * returns instances of lunr.Index ready for querying.
  1763. *
  1764. * All configuration of the index is done via the builder, the
  1765. * fields to index, the document reference, the text processing
  1766. * pipeline and document scoring parameters are all set on the
  1767. * builder before indexing.
  1768. *
  1769. * @constructor
  1770. * @property {string} _ref - Internal reference to the document reference field.
  1771. * @property {string[]} _fields - Internal reference to the document fields to index.
  1772. * @property {object} invertedIndex - The inverted index maps terms to document fields.
  1773. * @property {object} documentTermFrequencies - Keeps track of document term frequencies.
  1774. * @property {object} documentLengths - Keeps track of the length of documents added to the index.
  1775. * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing.
  1776. * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing.
  1777. * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index.
  1778. * @property {number} documentCount - Keeps track of the total number of documents indexed.
  1779. * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75.
  1780. * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2.
  1781. * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space.
  1782. * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index.
  1783. */
  1784. lunr.Builder = function () {
  1785. this._ref = "id"
  1786. this._fields = []
  1787. this.invertedIndex = Object.create(null)
  1788. this.fieldTermFrequencies = {}
  1789. this.fieldLengths = {}
  1790. this.tokenizer = lunr.tokenizer
  1791. this.pipeline = new lunr.Pipeline
  1792. this.searchPipeline = new lunr.Pipeline
  1793. this.documentCount = 0
  1794. this._b = 0.75
  1795. this._k1 = 1.2
  1796. this.termIndex = 0
  1797. this.metadataWhitelist = []
  1798. }
  1799. /**
  1800. * Sets the document field used as the document reference. Every document must have this field.
  1801. * The type of this field in the document should be a string, if it is not a string it will be
  1802. * coerced into a string by calling toString.
  1803. *
  1804. * The default ref is 'id'.
  1805. *
  1806. * The ref should _not_ be changed during indexing, it should be set before any documents are
  1807. * added to the index. Changing it during indexing can lead to inconsistent results.
  1808. *
  1809. * @param {string} ref - The name of the reference field in the document.
  1810. */
  1811. lunr.Builder.prototype.ref = function (ref) {
  1812. this._ref = ref
  1813. }
  1814. /**
  1815. * Adds a field to the list of document fields that will be indexed. Every document being
  1816. * indexed should have this field. Null values for this field in indexed documents will
  1817. * not cause errors but will limit the chance of that document being retrieved by searches.
  1818. *
  1819. * All fields should be added before adding documents to the index. Adding fields after
  1820. * a document has been indexed will have no effect on already indexed documents.
  1821. *
  1822. * @param {string} field - The name of a field to index in all documents.
  1823. */
  1824. lunr.Builder.prototype.field = function (field) {
  1825. this._fields.push(field)
  1826. }
  1827. /**
  1828. * A parameter to tune the amount of field length normalisation that is applied when
  1829. * calculating relevance scores. A value of 0 will completely disable any normalisation
  1830. * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b
  1831. * will be clamped to the range 0 - 1.
  1832. *
  1833. * @param {number} number - The value to set for this tuning parameter.
  1834. */
  1835. lunr.Builder.prototype.b = function (number) {
  1836. if (number < 0) {
  1837. this._b = 0
  1838. } else if (number > 1) {
  1839. this._b = 1
  1840. } else {
  1841. this._b = number
  1842. }
  1843. }
  1844. /**
  1845. * A parameter that controls the speed at which a rise in term frequency results in term
  1846. * frequency saturation. The default value is 1.2. Setting this to a higher value will give
  1847. * slower saturation levels, a lower value will result in quicker saturation.
  1848. *
  1849. * @param {number} number - The value to set for this tuning parameter.
  1850. */
  1851. lunr.Builder.prototype.k1 = function (number) {
  1852. this._k1 = number
  1853. }
  1854. /**
  1855. * Adds a document to the index.
  1856. *
  1857. * Before adding fields to the index the index should have been fully setup, with the document
  1858. * ref and all fields to index already having been specified.
  1859. *
  1860. * The document must have a field name as specified by the ref (by default this is 'id') and
  1861. * it should have all fields defined for indexing, though null or undefined values will not
  1862. * cause errors.
  1863. *
  1864. * @param {object} doc - The document to add to the index.
  1865. */
  1866. lunr.Builder.prototype.add = function (doc) {
  1867. var docRef = doc[this._ref]
  1868. this.documentCount += 1
  1869. for (var i = 0; i < this._fields.length; i++) {
  1870. var fieldName = this._fields[i],
  1871. field = doc[fieldName],
  1872. tokens = this.tokenizer(field),
  1873. terms = this.pipeline.run(tokens),
  1874. fieldRef = new lunr.FieldRef (docRef, fieldName),
  1875. fieldTerms = Object.create(null)
  1876. this.fieldTermFrequencies[fieldRef] = fieldTerms
  1877. this.fieldLengths[fieldRef] = 0
  1878. // store the length of this field for this document
  1879. this.fieldLengths[fieldRef] += terms.length
  1880. // calculate term frequencies for this field
  1881. for (var j = 0; j < terms.length; j++) {
  1882. var term = terms[j]
  1883. if (fieldTerms[term] == undefined) {
  1884. fieldTerms[term] = 0
  1885. }
  1886. fieldTerms[term] += 1
  1887. // add to inverted index
  1888. // create an initial posting if one doesn't exist
  1889. if (this.invertedIndex[term] == undefined) {
  1890. var posting = Object.create(null)
  1891. posting["_index"] = this.termIndex
  1892. this.termIndex += 1
  1893. for (var k = 0; k < this._fields.length; k++) {
  1894. posting[this._fields[k]] = Object.create(null)
  1895. }
  1896. this.invertedIndex[term] = posting
  1897. }
  1898. // add an entry for this term/fieldName/docRef to the invertedIndex
  1899. if (this.invertedIndex[term][fieldName][docRef] == undefined) {
  1900. this.invertedIndex[term][fieldName][docRef] = Object.create(null)
  1901. }
  1902. // store all whitelisted metadata about this token in the
  1903. // inverted index
  1904. for (var l = 0; l < this.metadataWhitelist.length; l++) {
  1905. var metadataKey = this.metadataWhitelist[l],
  1906. metadata = term.metadata[metadataKey]
  1907. if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) {
  1908. this.invertedIndex[term][fieldName][docRef][metadataKey] = []
  1909. }
  1910. this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata)
  1911. }
  1912. }
  1913. }
  1914. }
  1915. /**
  1916. * Calculates the average document length for this index
  1917. *
  1918. * @private
  1919. */
  1920. lunr.Builder.prototype.calculateAverageFieldLengths = function () {
  1921. var fieldRefs = Object.keys(this.fieldLengths),
  1922. numberOfFields = fieldRefs.length,
  1923. accumulator = {},
  1924. documentsWithField = {}
  1925. for (var i = 0; i < numberOfFields; i++) {
  1926. var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
  1927. field = fieldRef.fieldName
  1928. documentsWithField[field] || (documentsWithField[field] = 0)
  1929. documentsWithField[field] += 1
  1930. accumulator[field] || (accumulator[field] = 0)
  1931. accumulator[field] += this.fieldLengths[fieldRef]
  1932. }
  1933. for (var i = 0; i < this._fields.length; i++) {
  1934. var field = this._fields[i]
  1935. accumulator[field] = accumulator[field] / documentsWithField[field]
  1936. }
  1937. this.averageFieldLength = accumulator
  1938. }
  1939. /**
  1940. * Builds a vector space model of every document using lunr.Vector
  1941. *
  1942. * @private
  1943. */
  1944. lunr.Builder.prototype.createFieldVectors = function () {
  1945. var fieldVectors = {},
  1946. fieldRefs = Object.keys(this.fieldTermFrequencies),
  1947. fieldRefsLength = fieldRefs.length
  1948. for (var i = 0; i < fieldRefsLength; i++) {
  1949. var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
  1950. field = fieldRef.fieldName,
  1951. fieldLength = this.fieldLengths[fieldRef],
  1952. fieldVector = new lunr.Vector,
  1953. termFrequencies = this.fieldTermFrequencies[fieldRef],
  1954. terms = Object.keys(termFrequencies),
  1955. termsLength = terms.length
  1956. for (var j = 0; j < termsLength; j++) {
  1957. var term = terms[j],
  1958. tf = termFrequencies[term],
  1959. termIndex = this.invertedIndex[term]._index,
  1960. idf = lunr.idf(this.invertedIndex[term], this.documentCount),
  1961. score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[field])) + tf),
  1962. scoreWithPrecision = Math.round(score * 1000) / 1000
  1963. // Converts 1.23456789 to 1.234.
  1964. // Reducing the precision so that the vectors take up less
  1965. // space when serialised. Doing it now so that they behave
  1966. // the same before and after serialisation. Also, this is
  1967. // the fastest approach to reducing a number's precision in
  1968. // JavaScript.
  1969. fieldVector.insert(termIndex, scoreWithPrecision)
  1970. }
  1971. fieldVectors[fieldRef] = fieldVector
  1972. }
  1973. this.fieldVectors = fieldVectors
  1974. }
  1975. /**
  1976. * Creates a token set of all tokens in the index using lunr.TokenSet
  1977. *
  1978. * @private
  1979. */
  1980. lunr.Builder.prototype.createTokenSet = function () {
  1981. this.tokenSet = lunr.TokenSet.fromArray(
  1982. Object.keys(this.invertedIndex).sort()
  1983. )
  1984. }
  1985. /**
  1986. * Builds the index, creating an instance of lunr.Index.
  1987. *
  1988. * This completes the indexing process and should only be called
  1989. * once all documents have been added to the index.
  1990. *
  1991. * @private
  1992. * @returns {lunr.Index}
  1993. */
  1994. lunr.Builder.prototype.build = function () {
  1995. this.calculateAverageFieldLengths()
  1996. this.createFieldVectors()
  1997. this.createTokenSet()
  1998. return new lunr.Index({
  1999. invertedIndex: this.invertedIndex,
  2000. fieldVectors: this.fieldVectors,
  2001. tokenSet: this.tokenSet,
  2002. fields: this._fields,
  2003. pipeline: this.searchPipeline
  2004. })
  2005. }
  2006. /**
  2007. * Applies a plugin to the index builder.
  2008. *
  2009. * A plugin is a function that is called with the index builder as its context.
  2010. * Plugins can be used to customise or extend the behaviour of the index
  2011. * in some way. A plugin is just a function, that encapsulated the custom
  2012. * behaviour that should be applied when building the index.
  2013. *
  2014. * The plugin function will be called with the index builder as its argument, additional
  2015. * arguments can also be passed when calling use. The function will be called
  2016. * with the index builder as its context.
  2017. *
  2018. * @param {Function} plugin The plugin to apply.
  2019. */
  2020. lunr.Builder.prototype.use = function (fn) {
  2021. var args = Array.prototype.slice.call(arguments, 1)
  2022. args.unshift(this)
  2023. fn.apply(this, args)
  2024. }
  2025. /**
  2026. * Contains and collects metadata about a matching document.
  2027. * A single instance of lunr.MatchData is returned as part of every
  2028. * lunr.Index~Result.
  2029. *
  2030. * @constructor
  2031. * @param {string} term - The term this match data is associated with
  2032. * @param {string} field - The field in which the term was found
  2033. * @param {object} metadata - The metadata recorded about this term in this field
  2034. * @property {object} metadata - A cloned collection of metadata associated with this document.
  2035. * @see {@link lunr.Index~Result}
  2036. */
  2037. lunr.MatchData = function (term, field, metadata) {
  2038. var clonedMetadata = Object.create(null),
  2039. metadataKeys = Object.keys(metadata)
  2040. // Cloning the metadata to prevent the original
  2041. // being mutated during match data combination.
  2042. // Metadata is kept in an array within the inverted
  2043. // index so cloning the data can be done with
  2044. // Array#slice
  2045. for (var i = 0; i < metadataKeys.length; i++) {
  2046. var key = metadataKeys[i]
  2047. clonedMetadata[key] = metadata[key].slice()
  2048. }
  2049. this.metadata = Object.create(null)
  2050. this.metadata[term] = Object.create(null)
  2051. this.metadata[term][field] = clonedMetadata
  2052. }
  2053. /**
  2054. * An instance of lunr.MatchData will be created for every term that matches a
  2055. * document. However only one instance is required in a lunr.Index~Result. This
  2056. * method combines metadata from another instance of lunr.MatchData with this
  2057. * objects metadata.
  2058. *
  2059. * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one.
  2060. * @see {@link lunr.Index~Result}
  2061. */
  2062. lunr.MatchData.prototype.combine = function (otherMatchData) {
  2063. var terms = Object.keys(otherMatchData.metadata)
  2064. for (var i = 0; i < terms.length; i++) {
  2065. var term = terms[i],
  2066. fields = Object.keys(otherMatchData.metadata[term])
  2067. if (this.metadata[term] == undefined) {
  2068. this.metadata[term] = Object.create(null)
  2069. }
  2070. for (var j = 0; j < fields.length; j++) {
  2071. var field = fields[j],
  2072. keys = Object.keys(otherMatchData.metadata[term][field])
  2073. if (this.metadata[term][field] == undefined) {
  2074. this.metadata[term][field] = Object.create(null)
  2075. }
  2076. for (var k = 0; k < keys.length; k++) {
  2077. var key = keys[k]
  2078. if (this.metadata[term][field][key] == undefined) {
  2079. this.metadata[term][field][key] = otherMatchData.metadata[term][field][key]
  2080. } else {
  2081. this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key])
  2082. }
  2083. }
  2084. }
  2085. }
  2086. }
  2087. /**
  2088. * A lunr.Query provides a programmatic way of defining queries to be performed
  2089. * against a {@link lunr.Index}.
  2090. *
  2091. * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method
  2092. * so the query object is pre-initialized with the right index fields.
  2093. *
  2094. * @constructor
  2095. * @property {lunr.Query~Clause[]} clauses - An array of query clauses.
  2096. * @property {string[]} allFields - An array of all available fields in a lunr.Index.
  2097. */
  2098. lunr.Query = function (allFields) {
  2099. this.clauses = []
  2100. this.allFields = allFields
  2101. }
  2102. /**
  2103. * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause.
  2104. *
  2105. * This allows wildcards to be added to the beginning and end of a term without having to manually do any string
  2106. * concatenation.
  2107. *
  2108. * The wildcard constants can be bitwise combined to select both leading and trailing wildcards.
  2109. *
  2110. * @constant
  2111. * @default
  2112. * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour
  2113. * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists
  2114. * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists
  2115. * @see lunr.Query~Clause
  2116. * @see lunr.Query#clause
  2117. * @see lunr.Query#term
  2118. * @example <caption>query term with trailing wildcard</caption>
  2119. * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING })
  2120. * @example <caption>query term with leading and trailing wildcard</caption>
  2121. * query.term('foo', {
  2122. * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING
  2123. * })
  2124. */
  2125. lunr.Query.wildcard = new String ("*")
  2126. lunr.Query.wildcard.NONE = 0
  2127. lunr.Query.wildcard.LEADING = 1
  2128. lunr.Query.wildcard.TRAILING = 2
  2129. /**
  2130. * A single clause in a {@link lunr.Query} contains a term and details on how to
  2131. * match that term against a {@link lunr.Index}.
  2132. *
  2133. * @typedef {Object} lunr.Query~Clause
  2134. * @property {string[]} fields - The fields in an index this clause should be matched against.
  2135. * @property {number} [boost=1] - Any boost that should be applied when matching this clause.
  2136. * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be.
  2137. * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline.
  2138. * @property {number} [wildcard=0] - Whether the term should have wildcards appended or prepended.
  2139. */
  2140. /**
  2141. * Adds a {@link lunr.Query~Clause} to this query.
  2142. *
  2143. * Unless the clause contains the fields to be matched all fields will be matched. In addition
  2144. * a default boost of 1 is applied to the clause.
  2145. *
  2146. * @param {lunr.Query~Clause} clause - The clause to add to this query.
  2147. * @see lunr.Query~Clause
  2148. * @returns {lunr.Query}
  2149. */
  2150. lunr.Query.prototype.clause = function (clause) {
  2151. if (!('fields' in clause)) {
  2152. clause.fields = this.allFields
  2153. }
  2154. if (!('boost' in clause)) {
  2155. clause.boost = 1
  2156. }
  2157. if (!('usePipeline' in clause)) {
  2158. clause.usePipeline = true
  2159. }
  2160. if (!('wildcard' in clause)) {
  2161. clause.wildcard = lunr.Query.wildcard.NONE
  2162. }
  2163. if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) {
  2164. clause.term = "*" + clause.term
  2165. }
  2166. if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) {
  2167. clause.term = "" + clause.term + "*"
  2168. }
  2169. this.clauses.push(clause)
  2170. return this
  2171. }
  2172. /**
  2173. * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause}
  2174. * to the list of clauses that make up this query.
  2175. *
  2176. * @param {string} term - The term to add to the query.
  2177. * @param {Object} [options] - Any additional properties to add to the query clause.
  2178. * @returns {lunr.Query}
  2179. * @see lunr.Query#clause
  2180. * @see lunr.Query~Clause
  2181. * @example <caption>adding a single term to a query</caption>
  2182. * query.term("foo")
  2183. * @example <caption>adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard</caption>
  2184. * query.term("foo", {
  2185. * fields: ["title"],
  2186. * boost: 10,
  2187. * wildcard: lunr.Query.wildcard.TRAILING
  2188. * })
  2189. */
  2190. lunr.Query.prototype.term = function (term, options) {
  2191. var clause = options || {}
  2192. clause.term = term
  2193. this.clause(clause)
  2194. return this
  2195. }
  2196. lunr.QueryParseError = function (message, start, end) {
  2197. this.name = "QueryParseError"
  2198. this.message = message
  2199. this.start = start
  2200. this.end = end
  2201. }
  2202. lunr.QueryParseError.prototype = new Error
  2203. lunr.QueryLexer = function (str) {
  2204. this.lexemes = []
  2205. this.str = str
  2206. this.length = str.length
  2207. this.pos = 0
  2208. this.start = 0
  2209. this.escapeCharPositions = []
  2210. }
  2211. lunr.QueryLexer.prototype.run = function () {
  2212. var state = lunr.QueryLexer.lexText
  2213. while (state) {
  2214. state = state(this)
  2215. }
  2216. }
  2217. lunr.QueryLexer.prototype.sliceString = function () {
  2218. var subSlices = [],
  2219. sliceStart = this.start,
  2220. sliceEnd = this.pos
  2221. for (var i = 0; i < this.escapeCharPositions.length; i++) {
  2222. sliceEnd = this.escapeCharPositions[i]
  2223. subSlices.push(this.str.slice(sliceStart, sliceEnd))
  2224. sliceStart = sliceEnd + 1
  2225. }
  2226. subSlices.push(this.str.slice(sliceStart, this.pos))
  2227. this.escapeCharPositions.length = 0
  2228. return subSlices.join('')
  2229. }
  2230. lunr.QueryLexer.prototype.emit = function (type) {
  2231. this.lexemes.push({
  2232. type: type,
  2233. str: this.sliceString(),
  2234. start: this.start,
  2235. end: this.pos
  2236. })
  2237. this.start = this.pos
  2238. }
  2239. lunr.QueryLexer.prototype.escapeCharacter = function () {
  2240. this.escapeCharPositions.push(this.pos - 1)
  2241. this.pos += 1
  2242. }
  2243. lunr.QueryLexer.prototype.next = function () {
  2244. if (this.pos >= this.length) {
  2245. return lunr.QueryLexer.EOS
  2246. }
  2247. var char = this.str.charAt(this.pos)
  2248. this.pos += 1
  2249. return char
  2250. }
  2251. lunr.QueryLexer.prototype.width = function () {
  2252. return this.pos - this.start
  2253. }
  2254. lunr.QueryLexer.prototype.ignore = function () {
  2255. if (this.start == this.pos) {
  2256. this.pos += 1
  2257. }
  2258. this.start = this.pos
  2259. }
  2260. lunr.QueryLexer.prototype.backup = function () {
  2261. this.pos -= 1
  2262. }
  2263. lunr.QueryLexer.prototype.acceptDigitRun = function () {
  2264. var char, charCode
  2265. do {
  2266. char = this.next()
  2267. charCode = char.charCodeAt(0)
  2268. } while (charCode > 47 && charCode < 58)
  2269. if (char != lunr.QueryLexer.EOS) {
  2270. this.backup()
  2271. }
  2272. }
  2273. lunr.QueryLexer.prototype.more = function () {
  2274. return this.pos < this.length
  2275. }
  2276. lunr.QueryLexer.EOS = 'EOS'
  2277. lunr.QueryLexer.FIELD = 'FIELD'
  2278. lunr.QueryLexer.TERM = 'TERM'
  2279. lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE'
  2280. lunr.QueryLexer.BOOST = 'BOOST'
  2281. lunr.QueryLexer.lexField = function (lexer) {
  2282. lexer.backup()
  2283. lexer.emit(lunr.QueryLexer.FIELD)
  2284. lexer.ignore()
  2285. return lunr.QueryLexer.lexText
  2286. }
  2287. lunr.QueryLexer.lexTerm = function (lexer) {
  2288. if (lexer.width() > 1) {
  2289. lexer.backup()
  2290. lexer.emit(lunr.QueryLexer.TERM)
  2291. }
  2292. lexer.ignore()
  2293. if (lexer.more()) {
  2294. return lunr.QueryLexer.lexText
  2295. }
  2296. }
  2297. lunr.QueryLexer.lexEditDistance = function (lexer) {
  2298. lexer.ignore()
  2299. lexer.acceptDigitRun()
  2300. lexer.emit(lunr.QueryLexer.EDIT_DISTANCE)
  2301. return lunr.QueryLexer.lexText
  2302. }
  2303. lunr.QueryLexer.lexBoost = function (lexer) {
  2304. lexer.ignore()
  2305. lexer.acceptDigitRun()
  2306. lexer.emit(lunr.QueryLexer.BOOST)
  2307. return lunr.QueryLexer.lexText
  2308. }
  2309. lunr.QueryLexer.lexEOS = function (lexer) {
  2310. if (lexer.width() > 0) {
  2311. lexer.emit(lunr.QueryLexer.TERM)
  2312. }
  2313. }
  2314. // This matches the separator used when tokenising fields
  2315. // within a document. These should match otherwise it is
  2316. // not possible to search for some tokens within a document.
  2317. //
  2318. // It is possible for the user to change the separator on the
  2319. // tokenizer so it _might_ clash with any other of the special
  2320. // characters already used within the search string, e.g. :.
  2321. //
  2322. // This means that it is possible to change the separator in
  2323. // such a way that makes some words unsearchable using a search
  2324. // string.
  2325. lunr.QueryLexer.termSeparator = lunr.tokenizer.separator
  2326. lunr.QueryLexer.lexText = function (lexer) {
  2327. while (true) {
  2328. var char = lexer.next()
  2329. if (char == lunr.QueryLexer.EOS) {
  2330. return lunr.QueryLexer.lexEOS
  2331. }
  2332. // Escape character is '\'
  2333. if (char.charCodeAt(0) == 92) {
  2334. lexer.escapeCharacter()
  2335. continue
  2336. }
  2337. if (char == ":") {
  2338. return lunr.QueryLexer.lexField
  2339. }
  2340. if (char == "~") {
  2341. lexer.backup()
  2342. if (lexer.width() > 0) {
  2343. lexer.emit(lunr.QueryLexer.TERM)
  2344. }
  2345. return lunr.QueryLexer.lexEditDistance
  2346. }
  2347. if (char == "^") {
  2348. lexer.backup()
  2349. if (lexer.width() > 0) {
  2350. lexer.emit(lunr.QueryLexer.TERM)
  2351. }
  2352. return lunr.QueryLexer.lexBoost
  2353. }
  2354. if (char.match(lunr.QueryLexer.termSeparator)) {
  2355. return lunr.QueryLexer.lexTerm
  2356. }
  2357. }
  2358. }
  2359. lunr.QueryParser = function (str, query) {
  2360. this.lexer = new lunr.QueryLexer (str)
  2361. this.query = query
  2362. this.currentClause = {}
  2363. this.lexemeIdx = 0
  2364. }
  2365. lunr.QueryParser.prototype.parse = function () {
  2366. this.lexer.run()
  2367. this.lexemes = this.lexer.lexemes
  2368. var state = lunr.QueryParser.parseFieldOrTerm
  2369. while (state) {
  2370. state = state(this)
  2371. }
  2372. return this.query
  2373. }
  2374. lunr.QueryParser.prototype.peekLexeme = function () {
  2375. return this.lexemes[this.lexemeIdx]
  2376. }
  2377. lunr.QueryParser.prototype.consumeLexeme = function () {
  2378. var lexeme = this.peekLexeme()
  2379. this.lexemeIdx += 1
  2380. return lexeme
  2381. }
  2382. lunr.QueryParser.prototype.nextClause = function () {
  2383. var completedClause = this.currentClause
  2384. this.query.clause(completedClause)
  2385. this.currentClause = {}
  2386. }
  2387. lunr.QueryParser.parseFieldOrTerm = function (parser) {
  2388. var lexeme = parser.peekLexeme()
  2389. if (lexeme == undefined) {
  2390. return
  2391. }
  2392. switch (lexeme.type) {
  2393. case lunr.QueryLexer.FIELD:
  2394. return lunr.QueryParser.parseField
  2395. case lunr.QueryLexer.TERM:
  2396. return lunr.QueryParser.parseTerm
  2397. default:
  2398. var errorMessage = "expected either a field or a term, found " + lexeme.type
  2399. if (lexeme.str.length >= 1) {
  2400. errorMessage += " with value '" + lexeme.str + "'"
  2401. }
  2402. throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
  2403. }
  2404. }
  2405. lunr.QueryParser.parseField = function (parser) {
  2406. var lexeme = parser.consumeLexeme()
  2407. if (lexeme == undefined) {
  2408. return
  2409. }
  2410. if (parser.query.allFields.indexOf(lexeme.str) == -1) {
  2411. var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '),
  2412. errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields
  2413. throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
  2414. }
  2415. parser.currentClause.fields = [lexeme.str]
  2416. var nextLexeme = parser.peekLexeme()
  2417. if (nextLexeme == undefined) {
  2418. var errorMessage = "expecting term, found nothing"
  2419. throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
  2420. }
  2421. switch (nextLexeme.type) {
  2422. case lunr.QueryLexer.TERM:
  2423. return lunr.QueryParser.parseTerm
  2424. default:
  2425. var errorMessage = "expecting term, found '" + nextLexeme.type + "'"
  2426. throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
  2427. }
  2428. }
  2429. lunr.QueryParser.parseTerm = function (parser) {
  2430. var lexeme = parser.consumeLexeme()
  2431. if (lexeme == undefined) {
  2432. return
  2433. }
  2434. parser.currentClause.term = lexeme.str.toLowerCase()
  2435. if (lexeme.str.indexOf("*") != -1) {
  2436. parser.currentClause.usePipeline = false
  2437. }
  2438. var nextLexeme = parser.peekLexeme()
  2439. if (nextLexeme == undefined) {
  2440. parser.nextClause()
  2441. return
  2442. }
  2443. switch (nextLexeme.type) {
  2444. case lunr.QueryLexer.TERM:
  2445. parser.nextClause()
  2446. return lunr.QueryParser.parseTerm
  2447. case lunr.QueryLexer.FIELD:
  2448. parser.nextClause()
  2449. return lunr.QueryParser.parseField
  2450. case lunr.QueryLexer.EDIT_DISTANCE:
  2451. return lunr.QueryParser.parseEditDistance
  2452. case lunr.QueryLexer.BOOST:
  2453. return lunr.QueryParser.parseBoost
  2454. default:
  2455. var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
  2456. throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
  2457. }
  2458. }
  2459. lunr.QueryParser.parseEditDistance = function (parser) {
  2460. var lexeme = parser.consumeLexeme()
  2461. if (lexeme == undefined) {
  2462. return
  2463. }
  2464. var editDistance = parseInt(lexeme.str, 10)
  2465. if (isNaN(editDistance)) {
  2466. var errorMessage = "edit distance must be numeric"
  2467. throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
  2468. }
  2469. parser.currentClause.editDistance = editDistance
  2470. var nextLexeme = parser.peekLexeme()
  2471. if (nextLexeme == undefined) {
  2472. parser.nextClause()
  2473. return
  2474. }
  2475. switch (nextLexeme.type) {
  2476. case lunr.QueryLexer.TERM:
  2477. parser.nextClause()
  2478. return lunr.QueryParser.parseTerm
  2479. case lunr.QueryLexer.FIELD:
  2480. parser.nextClause()
  2481. return lunr.QueryParser.parseField
  2482. case lunr.QueryLexer.EDIT_DISTANCE:
  2483. return lunr.QueryParser.parseEditDistance
  2484. case lunr.QueryLexer.BOOST:
  2485. return lunr.QueryParser.parseBoost
  2486. default:
  2487. var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
  2488. throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
  2489. }
  2490. }
  2491. lunr.QueryParser.parseBoost = function (parser) {
  2492. var lexeme = parser.consumeLexeme()
  2493. if (lexeme == undefined) {
  2494. return
  2495. }
  2496. var boost = parseInt(lexeme.str, 10)
  2497. if (isNaN(boost)) {
  2498. var errorMessage = "boost must be numeric"
  2499. throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
  2500. }
  2501. parser.currentClause.boost = boost
  2502. var nextLexeme = parser.peekLexeme()
  2503. if (nextLexeme == undefined) {
  2504. parser.nextClause()
  2505. return
  2506. }
  2507. switch (nextLexeme.type) {
  2508. case lunr.QueryLexer.TERM:
  2509. parser.nextClause()
  2510. return lunr.QueryParser.parseTerm
  2511. case lunr.QueryLexer.FIELD:
  2512. parser.nextClause()
  2513. return lunr.QueryParser.parseField
  2514. case lunr.QueryLexer.EDIT_DISTANCE:
  2515. return lunr.QueryParser.parseEditDistance
  2516. case lunr.QueryLexer.BOOST:
  2517. return lunr.QueryParser.parseBoost
  2518. default:
  2519. var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
  2520. throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
  2521. }
  2522. }
  2523. /**
  2524. * export the module via AMD, CommonJS or as a browser global
  2525. * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js
  2526. */
  2527. ;(function (root, factory) {
  2528. if (typeof define === 'function' && define.amd) {
  2529. // AMD. Register as an anonymous module.
  2530. define(factory)
  2531. } else if (typeof exports === 'object') {
  2532. /**
  2533. * Node. Does not work with strict CommonJS, but
  2534. * only CommonJS-like enviroments that support module.exports,
  2535. * like Node.
  2536. */
  2537. module.exports = factory()
  2538. } else {
  2539. // Browser globals (root is window)
  2540. root.lunr = factory()
  2541. }
  2542. }(this, function () {
  2543. /**
  2544. * Just return a value to define the module export.
  2545. * This example returns an object, but the module
  2546. * can return a function as the exported value.
  2547. */
  2548. return lunr
  2549. }))
  2550. })();